2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
33 #include <sys/mutex.h>
36 #include <sys/sched.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
40 #include <vm/vm_param.h>
41 #include <vm/vm_extern.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
46 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
47 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
48 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
49 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
50 int fubyte_fast(volatile const void *base, u_int kcr3);
51 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
52 int fuword16_fast(volatile const void *base, u_int kcr3);
53 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
54 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
55 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
56 int subyte_fast(volatile void *base, int val, u_int kcr3);
57 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
58 int suword16_fast(volatile void *base, int val, u_int kcr3);
59 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
60 int suword_fast(volatile void *base, long val, u_int kcr3);
61 static int (*suword_fast_tramp)(volatile void *, long, u_int);
63 static int fast_copyout = 1;
64 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
69 copyout_init_tramp(void)
72 copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
73 (uintptr_t)copyin_fast + setidt_disp);
74 copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
75 (uintptr_t)copyout_fast + setidt_disp);
76 fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
77 (uintptr_t)fubyte_fast + setidt_disp);
78 fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
79 (uintptr_t)fuword16_fast + setidt_disp);
80 fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
81 (uintptr_t)fueword_fast + setidt_disp);
82 subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
83 (uintptr_t)subyte_fast + setidt_disp);
84 suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
85 (uintptr_t)suword16_fast + setidt_disp);
86 suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
87 (uintptr_t)suword_fast + setidt_disp);
91 cp_slow0(vm_offset_t uva, size_t len, bool write,
92 void (*f)(vm_offset_t, void *), void *arg)
100 plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
101 MPASS(plen <= nitems(m));
103 i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
104 (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
110 if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
111 (curthread->td_pflags & TDP_NOFAULTING) != 0) {
113 mtx_lock(&pc->pc_copyout_mlock);
114 kaddr = pc->pc_copyout_maddr;
117 sx_xlock(&pc->pc_copyout_slock);
118 kaddr = pc->pc_copyout_saddr;
120 pmap_cp_slow0_map(kaddr, plen, m);
121 kaddr += uva - trunc_page(uva);
125 sx_xunlock(&pc->pc_copyout_slock);
127 mtx_unlock(&pc->pc_copyout_mlock);
128 vm_page_unhold_pages(m, plen);
132 struct copyinstr_arg0 {
140 copyinstr_slow0(vm_offset_t kva, void *arg)
142 struct copyinstr_arg0 *ca;
146 MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
147 while (ca->alen < ca->len && !ca->end) {
148 c = *(char *)(kva + ca->alen);
158 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
160 struct copyinstr_arg0 ca;
167 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
168 plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
169 ca.len = round_page(uc) - uc;
172 if (plen + ca.len > maxlen)
173 ca.len = maxlen - plen;
175 if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
180 if (!ca.end && plen == maxlen && error == 0)
181 error = ENAMETOOLONG;
182 if (lencopied != NULL)
193 copyin_slow0(vm_offset_t kva, void *arg)
195 struct copyin_arg0 *ca;
198 bcopy((void *)kva, (void *)ca->kc, ca->len);
202 copyin(const void *udaddr, void *kaddr, size_t len)
204 struct copyin_arg0 ca;
208 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
209 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
211 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
212 copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0))
214 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
215 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
216 ca.len = round_page(uc) - uc;
219 if (plen + ca.len > len)
221 if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
228 copyout_slow0(vm_offset_t kva, void *arg)
230 struct copyin_arg0 *ca;
233 bcopy((void *)ca->kc, (void *)kva, ca->len);
237 copyout(const void *kaddr, void *udaddr, size_t len)
239 struct copyin_arg0 ca;
243 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
244 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
246 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
247 copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0))
249 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
250 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
251 ca.len = round_page(uc) - uc;
254 if (plen + ca.len > len)
256 if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
263 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
268 fubyte_slow0(vm_offset_t kva, void *arg)
271 *(int *)arg = *(u_char *)kva;
275 fubyte(volatile const void *base)
279 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
280 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
283 res = fubyte_fast_tramp(base, pmap_get_kcr3());
287 if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
294 fuword16_slow0(vm_offset_t kva, void *arg)
297 *(int *)arg = *(uint16_t *)kva;
301 fuword16(volatile const void *base)
305 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
306 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
309 res = fuword16_fast_tramp(base, pmap_get_kcr3());
313 if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
314 fuword16_slow0, &res) != 0)
320 fueword_slow0(vm_offset_t kva, void *arg)
323 *(uint32_t *)arg = *(uint32_t *)kva;
327 fueword(volatile const void *base, long *val)
331 if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
332 (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
335 if (fueword_fast_tramp(base, val, pmap_get_kcr3()) == 0)
338 if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
346 fueword32(volatile const void *base, int32_t *val)
349 return (fueword(base, (long *)val));
353 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
357 subyte_slow0(vm_offset_t kva, void *arg)
360 *(u_char *)kva = *(int *)arg;
364 subyte(volatile void *base, int byte)
367 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
368 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
370 if (fast_copyout && subyte_fast_tramp(base, byte, pmap_get_kcr3()) == 0)
372 return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
373 &byte) != 0 ? -1 : 0);
377 suword16_slow0(vm_offset_t kva, void *arg)
380 *(int *)kva = *(uint16_t *)arg;
384 suword16(volatile void *base, int word)
387 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
388 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
390 if (fast_copyout && suword16_fast_tramp(base, word, pmap_get_kcr3())
393 return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
394 suword16_slow0, &word) != 0 ? -1 : 0);
398 suword_slow0(vm_offset_t kva, void *arg)
401 *(int *)kva = *(uint32_t *)arg;
405 suword(volatile void *base, long word)
408 if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
409 (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
411 if (fast_copyout && suword_fast_tramp(base, word, pmap_get_kcr3()) == 0)
413 return (cp_slow0((vm_offset_t)base, sizeof(long), true,
414 suword_slow0, &word) != 0 ? -1 : 0);
418 suword32(volatile void *base, int32_t word)
421 return (suword(base, word));
424 struct casueword_arg0 {
431 casueword_slow0(vm_offset_t kva, void *arg)
433 struct casueword_arg0 *ca;
436 ca->res = 1 - atomic_fcmpset_int((u_int *)kva, &ca->oldval,
441 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
444 struct casueword_arg0 ca;
449 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
450 casueword_slow0, &ca);
452 *oldvalp = ca.oldval;
459 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
461 struct casueword_arg0 ca;
466 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
467 casueword_slow0, &ca);
469 *oldvalp = ca.oldval;