2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Copyright (c) 2014 The FreeBSD Foundation
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/limits.h>
50 #include <sys/resourcevar.h>
51 #include <sys/rwlock.h>
52 #include <sys/sched.h>
53 #include <sys/sysctl.h>
54 #include <sys/vnode.h>
57 #include <vm/vm_param.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pageout.h>
61 #include <vm/vm_map.h>
63 #include <machine/bus.h>
65 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
66 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
68 static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
71 copyin_nofault(const void *udaddr, void *kaddr, size_t len)
75 save = vm_fault_disable_pagefaults();
76 error = copyin(udaddr, kaddr, len);
77 vm_fault_enable_pagefaults(save);
82 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
86 save = vm_fault_disable_pagefaults();
87 error = copyout(kaddr, udaddr, len);
88 vm_fault_enable_pagefaults(save);
92 #define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
95 physcopyin(void *src, vm_paddr_t dst, size_t len)
97 vm_page_t m[PHYS_PAGE_COUNT(len)];
102 iov[0].iov_base = src;
103 iov[0].iov_len = len;
108 uio.uio_segflg = UIO_SYSSPACE;
109 uio.uio_rw = UIO_WRITE;
110 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
111 m[i] = PHYS_TO_VM_PAGE(dst);
112 return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
116 physcopyout(vm_paddr_t src, void *dst, size_t len)
118 vm_page_t m[PHYS_PAGE_COUNT(len)];
123 iov[0].iov_base = dst;
124 iov[0].iov_len = len;
129 uio.uio_segflg = UIO_SYSSPACE;
130 uio.uio_rw = UIO_READ;
131 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
132 m[i] = PHYS_TO_VM_PAGE(src);
133 return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
136 #undef PHYS_PAGE_COUNT
139 physcopyin_vlist(bus_dma_segment_t *src, off_t offset, vm_paddr_t dst,
146 while (offset >= src->ds_len) {
147 offset -= src->ds_len;
151 while (len > 0 && error == 0) {
152 seg_len = MIN(src->ds_len - offset, len);
153 error = physcopyin((void *)(uintptr_t)(src->ds_addr + offset),
165 physcopyout_vlist(vm_paddr_t src, bus_dma_segment_t *dst, off_t offset,
172 while (offset >= dst->ds_len) {
173 offset -= dst->ds_len;
177 while (len > 0 && error == 0) {
178 seg_len = MIN(dst->ds_len - offset, len);
179 error = physcopyout(src, (void *)(uintptr_t)(dst->ds_addr +
191 uiomove(void *cp, int n, struct uio *uio)
194 return (uiomove_faultflag(cp, n, uio, 0));
198 uiomove_nofault(void *cp, int n, struct uio *uio)
201 return (uiomove_faultflag(cp, n, uio, 1));
205 uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
209 int error, newflags, save;
213 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
215 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
217 KASSERT(uio->uio_resid >= 0,
218 ("%s: uio %p resid underflow", __func__, uio));
220 if (uio->uio_segflg == UIO_USERSPACE) {
221 newflags = TDP_DEADLKTREAT;
224 * Fail if a non-spurious page fault occurs.
226 newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
228 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
229 "Calling uiomove()");
231 save = curthread_pflags_set(newflags);
233 KASSERT(nofault == 0, ("uiomove: nofault"));
236 while (n > 0 && uio->uio_resid) {
237 KASSERT(uio->uio_iovcnt > 0,
238 ("%s: uio %p iovcnt underflow", __func__, uio));
250 switch (uio->uio_segflg) {
253 if (uio->uio_rw == UIO_READ)
254 error = copyout(cp, iov->iov_base, cnt);
256 error = copyin(iov->iov_base, cp, cnt);
262 if (uio->uio_rw == UIO_READ)
263 bcopy(cp, iov->iov_base, cnt);
265 bcopy(iov->iov_base, cp, cnt);
270 iov->iov_base = (char *)iov->iov_base + cnt;
272 uio->uio_resid -= cnt;
273 uio->uio_offset += cnt;
274 cp = (char *)cp + cnt;
279 curthread_pflags_restore(save);
284 * Wrapper for uiomove() that validates the arguments against a known-good
285 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
286 * is almost definitely a bad thing, so we catch that here as well. We
287 * return a runtime failure, but it might be desirable to generate a runtime
288 * assertion failure instead.
291 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
295 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
296 (offset = uio->uio_offset) != uio->uio_offset)
298 if (buflen <= 0 || offset >= buflen)
300 if ((n = buflen - offset) > IOSIZE_MAX)
302 return (uiomove((char *)buf + offset, n, uio));
306 * Give next character to user as result of read.
309 ureadc(int c, struct uio *uio)
314 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
318 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
321 if (iov->iov_len == 0) {
326 switch (uio->uio_segflg) {
328 if (subyte(iov->iov_base, c) < 0)
333 iov_base = iov->iov_base;
340 iov->iov_base = (char *)iov->iov_base + 1;
348 copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
353 if (iovcnt > UIO_MAXIOV)
355 iovlen = iovcnt * sizeof (struct iovec);
356 *iov = malloc(iovlen, M_IOV, M_WAITOK);
357 error = copyin(iovp, *iov, iovlen);
366 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
374 if (iovcnt > UIO_MAXIOV)
376 iovlen = iovcnt * sizeof (struct iovec);
377 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
378 iov = (struct iovec *)(uio + 1);
379 error = copyin(iovp, iov, iovlen);
385 uio->uio_iovcnt = iovcnt;
386 uio->uio_segflg = UIO_USERSPACE;
387 uio->uio_offset = -1;
389 for (i = 0; i < iovcnt; i++) {
390 if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
394 uio->uio_resid += iov->iov_len;
402 cloneuio(struct uio *uiop)
407 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
408 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
410 uio->uio_iov = (struct iovec *)(uio + 1);
411 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
416 * Map some anonymous memory in user space of size sz, rounded up to the page
420 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
426 vms = td->td_proc->p_vmspace;
429 * Map somewhere after heap in process memory.
431 *addr = round_page((vm_offset_t)vms->vm_daddr +
432 lim_max(td, RLIMIT_DATA));
434 /* round size up to page boundary */
435 size = (vm_size_t)round_page(sz);
438 error = vm_mmap_object(&vms->vm_map, addr, size, VM_PROT_READ |
439 VM_PROT_WRITE, VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, NULL, 0,
445 * Unmap memory in user space.
448 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
456 map = &td->td_proc->p_vmspace->vm_map;
457 size = (vm_size_t)round_page(sz);
459 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
466 fuword32(volatile const void *addr)
471 rv = fueword32(addr, &val);
472 return (rv == -1 ? -1 : val);
477 fuword64(volatile const void *addr)
482 rv = fueword64(addr, &val);
483 return (rv == -1 ? -1 : val);
488 fuword(volatile const void *addr)
493 rv = fueword(addr, &val);
494 return (rv == -1 ? -1 : val);
498 casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
503 rv = casueword32(addr, old, &val, new);
504 return (rv == -1 ? -1 : val);
508 casuword(volatile u_long *addr, u_long old, u_long new)
513 rv = casueword(addr, old, &val, new);
514 return (rv == -1 ? -1 : val);