2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/mutex.h>
39 #include <sys/sched.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_extern.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_page.h>
49 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
50 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
51 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
52 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
53 int fubyte_fast(volatile const void *base, u_int kcr3);
54 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
55 int fuword16_fast(volatile const void *base, u_int kcr3);
56 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
57 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
58 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
59 int subyte_fast(volatile void *base, int val, u_int kcr3);
60 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
61 int suword16_fast(volatile void *base, int val, u_int kcr3);
62 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
63 int suword_fast(volatile void *base, long val, u_int kcr3);
64 static int (*suword_fast_tramp)(volatile void *, long, u_int);
66 static int fast_copyout = 1;
67 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
72 copyout_init_tramp(void)
75 copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
76 (uintptr_t)copyin_fast + setidt_disp);
77 copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
78 (uintptr_t)copyout_fast + setidt_disp);
79 fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
80 (uintptr_t)fubyte_fast + setidt_disp);
81 fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
82 (uintptr_t)fuword16_fast + setidt_disp);
83 fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
84 (uintptr_t)fueword_fast + setidt_disp);
85 subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
86 (uintptr_t)subyte_fast + setidt_disp);
87 suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
88 (uintptr_t)suword16_fast + setidt_disp);
89 suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
90 (uintptr_t)suword_fast + setidt_disp);
94 cp_slow0(vm_offset_t uva, size_t len, bool write,
95 void (*f)(vm_offset_t, void *), void *arg)
103 plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
104 MPASS(plen <= nitems(m));
106 i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
107 (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
113 if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
114 (curthread->td_pflags & TDP_NOFAULTING) != 0) {
116 mtx_lock(&pc->pc_copyout_mlock);
117 kaddr = pc->pc_copyout_maddr;
120 sx_xlock(&pc->pc_copyout_slock);
121 kaddr = pc->pc_copyout_saddr;
123 pmap_cp_slow0_map(kaddr, plen, m);
124 kaddr += uva - trunc_page(uva);
128 sx_xunlock(&pc->pc_copyout_slock);
130 mtx_unlock(&pc->pc_copyout_mlock);
131 vm_page_unhold_pages(m, plen);
135 struct copyinstr_arg0 {
143 copyinstr_slow0(vm_offset_t kva, void *arg)
145 struct copyinstr_arg0 *ca;
149 MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
150 while (ca->alen < ca->len && !ca->end) {
151 c = *(char *)(kva + ca->alen);
161 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
163 struct copyinstr_arg0 ca;
170 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
171 plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
172 ca.len = round_page(uc) - uc;
175 if (plen + ca.len > maxlen)
176 ca.len = maxlen - plen;
178 if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
183 if (!ca.end && plen == maxlen && error == 0)
184 error = ENAMETOOLONG;
185 if (lencopied != NULL)
196 copyin_slow0(vm_offset_t kva, void *arg)
198 struct copyin_arg0 *ca;
201 bcopy((void *)kva, (void *)ca->kc, ca->len);
205 copyin(const void *udaddr, void *kaddr, size_t len)
207 struct copyin_arg0 ca;
211 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
212 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
214 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
215 copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0))
217 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
218 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
219 ca.len = round_page(uc) - uc;
222 if (plen + ca.len > len)
224 if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
231 copyout_slow0(vm_offset_t kva, void *arg)
233 struct copyin_arg0 *ca;
236 bcopy((void *)ca->kc, (void *)kva, ca->len);
240 copyout(const void *kaddr, void *udaddr, size_t len)
242 struct copyin_arg0 ca;
246 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
247 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
249 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
250 copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0))
252 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
253 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
254 ca.len = round_page(uc) - uc;
257 if (plen + ca.len > len)
259 if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
266 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
271 fubyte_slow0(vm_offset_t kva, void *arg)
274 *(int *)arg = *(u_char *)kva;
278 fubyte(volatile const void *base)
282 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
283 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
286 res = fubyte_fast_tramp(base, pmap_get_kcr3());
290 if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
297 fuword16_slow0(vm_offset_t kva, void *arg)
300 *(int *)arg = *(uint16_t *)kva;
304 fuword16(volatile const void *base)
308 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
309 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
312 res = fuword16_fast_tramp(base, pmap_get_kcr3());
316 if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
317 fuword16_slow0, &res) != 0)
323 fueword_slow0(vm_offset_t kva, void *arg)
326 *(uint32_t *)arg = *(uint32_t *)kva;
330 fueword(volatile const void *base, long *val)
334 if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
335 (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
338 if (fueword_fast_tramp(base, val, pmap_get_kcr3()) == 0)
341 if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
349 fueword32(volatile const void *base, int32_t *val)
352 return (fueword(base, (long *)val));
356 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
360 subyte_slow0(vm_offset_t kva, void *arg)
363 *(u_char *)kva = *(int *)arg;
367 subyte(volatile void *base, int byte)
370 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
371 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
373 if (fast_copyout && subyte_fast_tramp(base, byte, pmap_get_kcr3()) == 0)
375 return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
376 &byte) != 0 ? -1 : 0);
380 suword16_slow0(vm_offset_t kva, void *arg)
383 *(int *)kva = *(uint16_t *)arg;
387 suword16(volatile void *base, int word)
390 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
391 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
393 if (fast_copyout && suword16_fast_tramp(base, word, pmap_get_kcr3())
396 return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
397 suword16_slow0, &word) != 0 ? -1 : 0);
401 suword_slow0(vm_offset_t kva, void *arg)
404 *(int *)kva = *(uint32_t *)arg;
408 suword(volatile void *base, long word)
411 if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
412 (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
414 if (fast_copyout && suword_fast_tramp(base, word, pmap_get_kcr3()) == 0)
416 return (cp_slow0((vm_offset_t)base, sizeof(long), true,
417 suword_slow0, &word) != 0 ? -1 : 0);
421 suword32(volatile void *base, int32_t word)
424 return (suword(base, word));
427 struct casueword_arg0 {
434 casueword_slow0(vm_offset_t kva, void *arg)
436 struct casueword_arg0 *ca;
439 ca->res = 1 - atomic_fcmpset_int((u_int *)kva, &ca->oldval,
444 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
447 struct casueword_arg0 ca;
452 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
453 casueword_slow0, &ca);
455 *oldvalp = ca.oldval;
462 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
464 struct casueword_arg0 ca;
469 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
470 casueword_slow0, &ca);
472 *oldvalp = ca.oldval;