2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Copyright (c) 2014 The FreeBSD Foundation
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
44 #include <sys/cdefs.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
52 #include <sys/resourcevar.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56 #include <sys/vnode.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pageout.h>
63 #include <vm/vm_map.h>
65 #include <machine/bus.h>
67 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
68 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
70 static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
73 copyin_nofault(const void *udaddr, void *kaddr, size_t len)
77 save = vm_fault_disable_pagefaults();
78 error = copyin(udaddr, kaddr, len);
79 vm_fault_enable_pagefaults(save);
84 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
88 save = vm_fault_disable_pagefaults();
89 error = copyout(kaddr, udaddr, len);
90 vm_fault_enable_pagefaults(save);
94 #define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
97 physcopyin(void *src, vm_paddr_t dst, size_t len)
99 vm_page_t m[PHYS_PAGE_COUNT(len)];
104 iov[0].iov_base = src;
105 iov[0].iov_len = len;
110 uio.uio_segflg = UIO_SYSSPACE;
111 uio.uio_rw = UIO_WRITE;
112 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
113 m[i] = PHYS_TO_VM_PAGE(dst);
114 return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
118 physcopyout(vm_paddr_t src, void *dst, size_t len)
120 vm_page_t m[PHYS_PAGE_COUNT(len)];
125 iov[0].iov_base = dst;
126 iov[0].iov_len = len;
131 uio.uio_segflg = UIO_SYSSPACE;
132 uio.uio_rw = UIO_READ;
133 for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
134 m[i] = PHYS_TO_VM_PAGE(src);
135 return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
138 #undef PHYS_PAGE_COUNT
141 physcopyin_vlist(bus_dma_segment_t *src, off_t offset, vm_paddr_t dst,
148 while (offset >= src->ds_len) {
149 offset -= src->ds_len;
153 while (len > 0 && error == 0) {
154 seg_len = MIN(src->ds_len - offset, len);
155 error = physcopyin((void *)(uintptr_t)(src->ds_addr + offset),
167 physcopyout_vlist(vm_paddr_t src, bus_dma_segment_t *dst, off_t offset,
174 while (offset >= dst->ds_len) {
175 offset -= dst->ds_len;
179 while (len > 0 && error == 0) {
180 seg_len = MIN(dst->ds_len - offset, len);
181 error = physcopyout(src, (void *)(uintptr_t)(dst->ds_addr +
193 uiomove(void *cp, int n, struct uio *uio)
196 return (uiomove_faultflag(cp, n, uio, 0));
200 uiomove_nofault(void *cp, int n, struct uio *uio)
203 return (uiomove_faultflag(cp, n, uio, 1));
207 uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
211 int error, newflags, save;
215 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
217 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
219 KASSERT(uio->uio_resid >= 0,
220 ("%s: uio %p resid underflow", __func__, uio));
222 if (uio->uio_segflg == UIO_USERSPACE) {
223 newflags = TDP_DEADLKTREAT;
226 * Fail if a non-spurious page fault occurs.
228 newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
230 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
231 "Calling uiomove()");
233 save = curthread_pflags_set(newflags);
235 KASSERT(nofault == 0, ("uiomove: nofault"));
238 while (n > 0 && uio->uio_resid) {
239 KASSERT(uio->uio_iovcnt > 0,
240 ("%s: uio %p iovcnt underflow", __func__, uio));
252 switch (uio->uio_segflg) {
255 if (uio->uio_rw == UIO_READ)
256 error = copyout(cp, iov->iov_base, cnt);
258 error = copyin(iov->iov_base, cp, cnt);
264 if (uio->uio_rw == UIO_READ)
265 bcopy(cp, iov->iov_base, cnt);
267 bcopy(iov->iov_base, cp, cnt);
272 iov->iov_base = (char *)iov->iov_base + cnt;
274 uio->uio_resid -= cnt;
275 uio->uio_offset += cnt;
276 cp = (char *)cp + cnt;
281 curthread_pflags_restore(save);
286 * Wrapper for uiomove() that validates the arguments against a known-good
287 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which
288 * is almost definitely a bad thing, so we catch that here as well. We
289 * return a runtime failure, but it might be desirable to generate a runtime
290 * assertion failure instead.
293 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
297 if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
298 (offset = uio->uio_offset) != uio->uio_offset)
300 if (buflen <= 0 || offset >= buflen)
302 if ((n = buflen - offset) > IOSIZE_MAX)
304 return (uiomove((char *)buf + offset, n, uio));
308 * Give next character to user as result of read.
311 ureadc(int c, struct uio *uio)
316 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
320 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
323 if (iov->iov_len == 0) {
328 switch (uio->uio_segflg) {
330 if (subyte(iov->iov_base, c) < 0)
335 iov_base = iov->iov_base;
342 iov->iov_base = (char *)iov->iov_base + 1;
350 copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
355 if (iovcnt > UIO_MAXIOV)
357 iovlen = iovcnt * sizeof (struct iovec);
358 *iov = malloc(iovlen, M_IOV, M_WAITOK);
359 error = copyin(iovp, *iov, iovlen);
368 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
376 if (iovcnt > UIO_MAXIOV)
378 iovlen = iovcnt * sizeof (struct iovec);
379 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
380 iov = (struct iovec *)(uio + 1);
381 error = copyin(iovp, iov, iovlen);
387 uio->uio_iovcnt = iovcnt;
388 uio->uio_segflg = UIO_USERSPACE;
389 uio->uio_offset = -1;
391 for (i = 0; i < iovcnt; i++) {
392 if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
396 uio->uio_resid += iov->iov_len;
404 cloneuio(struct uio *uiop)
409 iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
410 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
412 uio->uio_iov = (struct iovec *)(uio + 1);
413 bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
418 * Map some anonymous memory in user space of size sz, rounded up to the page
422 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
428 vms = td->td_proc->p_vmspace;
431 * Map somewhere after heap in process memory.
433 *addr = round_page((vm_offset_t)vms->vm_daddr +
434 lim_max(td, RLIMIT_DATA));
436 /* round size up to page boundary */
437 size = (vm_size_t)round_page(sz);
440 error = vm_mmap_object(&vms->vm_map, addr, size, VM_PROT_READ |
441 VM_PROT_WRITE, VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, NULL, 0,
447 * Unmap memory in user space.
450 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
458 map = &td->td_proc->p_vmspace->vm_map;
459 size = (vm_size_t)round_page(sz);
461 if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
468 fuword32(volatile const void *addr)
473 rv = fueword32(addr, &val);
474 return (rv == -1 ? -1 : val);
479 fuword64(volatile const void *addr)
484 rv = fueword64(addr, &val);
485 return (rv == -1 ? -1 : val);
490 fuword(volatile const void *addr)
495 rv = fueword(addr, &val);
496 return (rv == -1 ? -1 : val);
500 casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
505 rv = casueword32(addr, old, &val, new);
506 return (rv == -1 ? -1 : val);
510 casuword(volatile u_long *addr, u_long old, u_long new)
515 rv = casueword(addr, old, &val, new);
516 return (rv == -1 ? -1 : val);