2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018 The FreeBSD Foundation
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
34 #include <sys/mutex.h>
37 #include <sys/sched.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_extern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
47 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
48 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
49 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
50 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
51 int fubyte_fast(volatile const void *base, u_int kcr3);
52 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
53 int fuword16_fast(volatile const void *base, u_int kcr3);
54 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
55 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
56 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
57 int subyte_fast(volatile void *base, int val, u_int kcr3);
58 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
59 int suword16_fast(volatile void *base, int val, u_int kcr3);
60 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
61 int suword_fast(volatile void *base, long val, u_int kcr3);
62 static int (*suword_fast_tramp)(volatile void *, long, u_int);
64 static int fast_copyout = 1;
65 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
70 copyout_init_tramp(void)
73 copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
74 (uintptr_t)copyin_fast + setidt_disp);
75 copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
76 (uintptr_t)copyout_fast + setidt_disp);
77 fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
78 (uintptr_t)fubyte_fast + setidt_disp);
79 fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
80 (uintptr_t)fuword16_fast + setidt_disp);
81 fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
82 (uintptr_t)fueword_fast + setidt_disp);
83 subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
84 (uintptr_t)subyte_fast + setidt_disp);
85 suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
86 (uintptr_t)suword16_fast + setidt_disp);
87 suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
88 (uintptr_t)suword_fast + setidt_disp);
92 cp_slow0(vm_offset_t uva, size_t len, bool write,
93 void (*f)(vm_offset_t, void *), void *arg)
101 plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
102 MPASS(plen <= nitems(m));
104 i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
105 (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
111 if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
112 (curthread->td_pflags & TDP_NOFAULTING) != 0) {
114 mtx_lock(&pc->pc_copyout_mlock);
115 kaddr = pc->pc_copyout_maddr;
118 sx_xlock(&pc->pc_copyout_slock);
119 kaddr = pc->pc_copyout_saddr;
121 pmap_cp_slow0_map(kaddr, plen, m);
122 kaddr += uva - trunc_page(uva);
126 sx_xunlock(&pc->pc_copyout_slock);
128 mtx_unlock(&pc->pc_copyout_mlock);
129 vm_page_unhold_pages(m, plen);
133 struct copyinstr_arg0 {
141 copyinstr_slow0(vm_offset_t kva, void *arg)
143 struct copyinstr_arg0 *ca;
147 MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
148 while (ca->alen < ca->len && !ca->end) {
149 c = *(char *)(kva + ca->alen);
159 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
161 struct copyinstr_arg0 ca;
168 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
169 plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
170 ca.len = round_page(uc) - uc;
173 if (plen + ca.len > maxlen)
174 ca.len = maxlen - plen;
176 if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
181 if (!ca.end && plen == maxlen && error == 0)
182 error = ENAMETOOLONG;
183 if (lencopied != NULL)
194 copyin_slow0(vm_offset_t kva, void *arg)
196 struct copyin_arg0 *ca;
199 bcopy((void *)kva, (void *)ca->kc, ca->len);
203 copyin(const void *udaddr, void *kaddr, size_t len)
205 struct copyin_arg0 ca;
209 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
210 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
212 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
213 copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0))
215 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
216 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
217 ca.len = round_page(uc) - uc;
220 if (plen + ca.len > len)
222 if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
229 copyout_slow0(vm_offset_t kva, void *arg)
231 struct copyin_arg0 *ca;
234 bcopy((void *)ca->kc, (void *)kva, ca->len);
238 copyout(const void *kaddr, void *udaddr, size_t len)
240 struct copyin_arg0 ca;
244 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
245 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
247 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
248 copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0))
250 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
251 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
252 ca.len = round_page(uc) - uc;
255 if (plen + ca.len > len)
257 if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
264 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
269 fubyte_slow0(vm_offset_t kva, void *arg)
272 *(int *)arg = *(u_char *)kva;
276 fubyte(volatile const void *base)
280 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
281 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
284 res = fubyte_fast_tramp(base, pmap_get_kcr3());
288 if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
295 fuword16_slow0(vm_offset_t kva, void *arg)
298 *(int *)arg = *(uint16_t *)kva;
302 fuword16(volatile const void *base)
306 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
307 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
310 res = fuword16_fast_tramp(base, pmap_get_kcr3());
314 if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
315 fuword16_slow0, &res) != 0)
321 fueword_slow0(vm_offset_t kva, void *arg)
324 *(uint32_t *)arg = *(uint32_t *)kva;
328 fueword(volatile const void *base, long *val)
332 if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
333 (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
336 if (fueword_fast_tramp(base, val, pmap_get_kcr3()) == 0)
339 if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
347 fueword32(volatile const void *base, int32_t *val)
350 return (fueword(base, (long *)val));
354 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
358 subyte_slow0(vm_offset_t kva, void *arg)
361 *(u_char *)kva = *(int *)arg;
365 subyte(volatile void *base, int byte)
368 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
369 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
371 if (fast_copyout && subyte_fast_tramp(base, byte, pmap_get_kcr3()) == 0)
373 return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
374 &byte) != 0 ? -1 : 0);
378 suword16_slow0(vm_offset_t kva, void *arg)
381 *(int *)kva = *(uint16_t *)arg;
385 suword16(volatile void *base, int word)
388 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
389 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
391 if (fast_copyout && suword16_fast_tramp(base, word, pmap_get_kcr3())
394 return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
395 suword16_slow0, &word) != 0 ? -1 : 0);
399 suword_slow0(vm_offset_t kva, void *arg)
402 *(int *)kva = *(uint32_t *)arg;
406 suword(volatile void *base, long word)
409 if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
410 (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
412 if (fast_copyout && suword_fast_tramp(base, word, pmap_get_kcr3()) == 0)
414 return (cp_slow0((vm_offset_t)base, sizeof(long), true,
415 suword_slow0, &word) != 0 ? -1 : 0);
419 suword32(volatile void *base, int32_t word)
422 return (suword(base, word));
425 struct casueword_arg0 {
432 casueword_slow0(vm_offset_t kva, void *arg)
434 struct casueword_arg0 *ca;
437 ca->res = 1 - atomic_fcmpset_int((u_int *)kva, &ca->oldval,
442 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
445 struct casueword_arg0 ca;
450 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
451 casueword_slow0, &ca);
453 *oldvalp = ca.oldval;
460 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
462 struct casueword_arg0 ca;
467 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
468 casueword_slow0, &ca);
470 *oldvalp = ca.oldval;