2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_capsicum.h"
43 #include "opt_compat.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/capsicum.h>
50 #include <sys/filedesc.h>
51 #include <sys/filio.h>
52 #include <sys/fcntl.h>
56 #include <sys/signalvar.h>
57 #include <sys/socketvar.h>
59 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/malloc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/selinfo.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysent.h>
70 #include <sys/vnode.h>
73 #include <sys/condvar.h>
75 #include <sys/ktrace.h>
78 #include <security/audit/audit.h>
81 * The following macro defines how many bytes will be allocated from
82 * the stack instead of memory allocated when passing the IOCTL data
83 * structures from userspace and to the kernel. Some IOCTLs having
84 * small data structures are used very frequently and this small
85 * buffer on the stack gives a significant speedup improvement for
86 * those requests. The value of this define should be greater or equal
87 * to 64 bytes and should also be power of two. The data structure is
88 * currently hard-aligned to a 8-byte boundary on the stack. This
89 * should currently be sufficient for all supported platforms.
91 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
92 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
95 static int iosize_max_clamp = 0;
96 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
97 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
98 static int devfs_iosize_max_clamp = 1;
99 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
100 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
104 * Assert that the return value of read(2) and write(2) syscalls fits
105 * into a register. If not, an architecture will need to provide the
106 * usermode wrappers to reconstruct the result.
108 CTASSERT(sizeof(register_t) >= sizeof(size_t));
110 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
111 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
112 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
114 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
116 static int pollscan(struct thread *, struct pollfd *, u_int);
117 static int pollrescan(struct thread *);
118 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
119 static int selrescan(struct thread *, fd_mask **, fd_mask **);
120 static void selfdalloc(struct thread *, void *);
121 static void selfdfree(struct seltd *, struct selfd *);
122 static int dofileread(struct thread *, int, struct file *, struct uio *,
124 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
126 static void doselwakeup(struct selinfo *, int);
127 static void seltdinit(struct thread *);
128 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
129 static void seltdclear(struct thread *);
132 * One seltd per-thread allocated on demand as needed.
134 * t - protected by st_mtx
135 * k - Only accessed by curthread or read-only
138 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
139 struct selfd *st_free1; /* (k) free fd for read set. */
140 struct selfd *st_free2; /* (k) free fd for write set. */
141 struct mtx st_mtx; /* Protects struct seltd */
142 struct cv st_wait; /* (t) Wait channel. */
143 int st_flags; /* (t) SELTD_ flags. */
146 #define SELTD_PENDING 0x0001 /* We have pending events. */
147 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
150 * One selfd allocated per-thread per-file-descriptor.
151 * f - protected by sf_mtx
154 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
155 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
156 struct selinfo *sf_si; /* (f) selinfo when linked. */
157 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
158 struct seltd *sf_td; /* (k) owning seltd. */
159 void *sf_cookie; /* (k) fd or pollfd. */
163 static uma_zone_t selfd_zone;
164 static struct mtx_pool *mtxpool_select;
168 devfs_iosize_max(void)
171 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
172 INT_MAX : SSIZE_MAX);
179 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
180 INT_MAX : SSIZE_MAX);
184 #ifndef _SYS_SYSPROTO_H_
194 struct read_args *uap;
200 if (uap->nbyte > IOSIZE_MAX)
202 aiov.iov_base = uap->buf;
203 aiov.iov_len = uap->nbyte;
204 auio.uio_iov = &aiov;
206 auio.uio_resid = uap->nbyte;
207 auio.uio_segflg = UIO_USERSPACE;
208 error = kern_readv(td, uap->fd, &auio);
213 * Positioned read system call
215 #ifndef _SYS_SYSPROTO_H_
225 sys_pread(struct thread *td, struct pread_args *uap)
228 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
232 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
238 if (nbyte > IOSIZE_MAX)
241 aiov.iov_len = nbyte;
242 auio.uio_iov = &aiov;
244 auio.uio_resid = nbyte;
245 auio.uio_segflg = UIO_USERSPACE;
246 error = kern_preadv(td, fd, &auio, offset);
250 #if defined(COMPAT_FREEBSD6)
252 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
255 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
260 * Scatter read system call.
262 #ifndef _SYS_SYSPROTO_H_
270 sys_readv(struct thread *td, struct readv_args *uap)
275 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
278 error = kern_readv(td, uap->fd, auio);
284 kern_readv(struct thread *td, int fd, struct uio *auio)
290 error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp);
293 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
299 * Scatter positioned read system call.
301 #ifndef _SYS_SYSPROTO_H_
310 sys_preadv(struct thread *td, struct preadv_args *uap)
315 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
318 error = kern_preadv(td, uap->fd, auio, uap->offset);
324 kern_preadv(td, fd, auio, offset)
334 error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp);
337 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
339 else if (offset < 0 &&
340 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
343 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
349 * Common code for readv and preadv that reads data in
350 * from a file using the passed in uio, offset, and flags.
353 dofileread(td, fd, fp, auio, offset, flags)
364 struct uio *ktruio = NULL;
369 /* Finish zero length reads right here */
370 if (auio->uio_resid == 0) {
371 td->td_retval[0] = 0;
374 auio->uio_rw = UIO_READ;
375 auio->uio_offset = offset;
378 if (KTRPOINT(td, KTR_GENIO))
379 ktruio = cloneuio(auio);
381 cnt = auio->uio_resid;
382 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
383 if (auio->uio_resid != cnt && (error == ERESTART ||
384 error == EINTR || error == EWOULDBLOCK))
387 cnt -= auio->uio_resid;
389 if (ktruio != NULL) {
390 ktruio->uio_resid = cnt;
391 ktrgenio(fd, UIO_READ, ktruio, error);
394 td->td_retval[0] = cnt;
398 #ifndef _SYS_SYSPROTO_H_
408 struct write_args *uap;
414 if (uap->nbyte > IOSIZE_MAX)
416 aiov.iov_base = (void *)(uintptr_t)uap->buf;
417 aiov.iov_len = uap->nbyte;
418 auio.uio_iov = &aiov;
420 auio.uio_resid = uap->nbyte;
421 auio.uio_segflg = UIO_USERSPACE;
422 error = kern_writev(td, uap->fd, &auio);
427 * Positioned write system call.
429 #ifndef _SYS_SYSPROTO_H_
439 sys_pwrite(struct thread *td, struct pwrite_args *uap)
442 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
446 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
453 if (nbyte > IOSIZE_MAX)
455 aiov.iov_base = (void *)(uintptr_t)buf;
456 aiov.iov_len = nbyte;
457 auio.uio_iov = &aiov;
459 auio.uio_resid = nbyte;
460 auio.uio_segflg = UIO_USERSPACE;
461 error = kern_pwritev(td, fd, &auio, offset);
465 #if defined(COMPAT_FREEBSD6)
467 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
470 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
475 * Gather write system call.
477 #ifndef _SYS_SYSPROTO_H_
485 sys_writev(struct thread *td, struct writev_args *uap)
490 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
493 error = kern_writev(td, uap->fd, auio);
499 kern_writev(struct thread *td, int fd, struct uio *auio)
505 error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp);
508 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
514 * Gather positioned write system call.
516 #ifndef _SYS_SYSPROTO_H_
517 struct pwritev_args {
525 sys_pwritev(struct thread *td, struct pwritev_args *uap)
530 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
533 error = kern_pwritev(td, uap->fd, auio, uap->offset);
539 kern_pwritev(td, fd, auio, offset)
549 error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp);
552 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
554 else if (offset < 0 &&
555 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
558 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
564 * Common code for writev and pwritev that writes data to
565 * a file using the passed in uio, offset, and flags.
568 dofilewrite(td, fd, fp, auio, offset, flags)
579 struct uio *ktruio = NULL;
583 auio->uio_rw = UIO_WRITE;
585 auio->uio_offset = offset;
587 if (KTRPOINT(td, KTR_GENIO))
588 ktruio = cloneuio(auio);
590 cnt = auio->uio_resid;
591 if (fp->f_type == DTYPE_VNODE &&
592 (fp->f_vnread_flags & FDEVFS_VNODE) == 0)
594 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
595 if (auio->uio_resid != cnt && (error == ERESTART ||
596 error == EINTR || error == EWOULDBLOCK))
598 /* Socket layer is responsible for issuing SIGPIPE. */
599 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
600 PROC_LOCK(td->td_proc);
601 tdsignal(td, SIGPIPE);
602 PROC_UNLOCK(td->td_proc);
605 cnt -= auio->uio_resid;
607 if (ktruio != NULL) {
608 ktruio->uio_resid = cnt;
609 ktrgenio(fd, UIO_WRITE, ktruio, error);
612 td->td_retval[0] = cnt;
617 * Truncate a file given a file descriptor.
619 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
620 * descriptor isn't writable.
623 kern_ftruncate(td, fd, length)
635 error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp);
638 AUDIT_ARG_FILE(td->td_proc, fp);
639 if (!(fp->f_flag & FWRITE)) {
643 error = fo_truncate(fp, length, td->td_ucred, td);
648 #ifndef _SYS_SYSPROTO_H_
649 struct ftruncate_args {
656 sys_ftruncate(td, uap)
658 struct ftruncate_args *uap;
661 return (kern_ftruncate(td, uap->fd, uap->length));
664 #if defined(COMPAT_43)
665 #ifndef _SYS_SYSPROTO_H_
666 struct oftruncate_args {
674 struct oftruncate_args *uap;
677 return (kern_ftruncate(td, uap->fd, uap->length));
679 #endif /* COMPAT_43 */
681 #ifndef _SYS_SYSPROTO_H_
690 sys_ioctl(struct thread *td, struct ioctl_args *uap)
692 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
698 if (uap->com > 0xffffffff) {
700 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
701 td->td_proc->p_pid, td->td_name, uap->com);
702 uap->com &= 0xffffffff;
707 * Interpret high order word to find amount of data to be
708 * copied to/from the user's address space.
710 size = IOCPARM_LEN(com);
711 if ((size > IOCPARM_MAX) ||
712 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
713 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
714 ((com & IOC_OUT) && size == 0) ||
716 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
718 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
722 if (com & IOC_VOID) {
723 /* Integer argument. */
724 arg = (intptr_t)uap->data;
728 if (size > SYS_IOCTL_SMALL_SIZE)
729 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
734 data = (void *)&uap->data;
736 error = copyin(uap->data, data, (u_int)size);
739 } else if (com & IOC_OUT) {
741 * Zero the buffer so the user always
742 * gets back something deterministic.
747 error = kern_ioctl(td, uap->fd, com, data);
749 if (error == 0 && (com & IOC_OUT))
750 error = copyout(data, uap->data, (u_int)size);
753 if (size > SYS_IOCTL_SMALL_SIZE)
754 free(data, M_IOCTLOPS);
759 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
762 struct filedesc *fdp;
766 int error, tmp, locked;
771 fdp = td->td_proc->p_fd;
784 locked = LA_UNLOCKED;
790 if ((fp = fget_locked(fdp, fd)) == NULL) {
794 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
795 fp = NULL; /* fhold() was not called yet */
799 if (locked == LA_SLOCKED) {
800 FILEDESC_SUNLOCK(fdp);
801 locked = LA_UNLOCKED;
804 error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
810 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
817 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
820 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
823 if ((tmp = *(int *)data))
824 atomic_set_int(&fp->f_flag, FNONBLOCK);
826 atomic_clear_int(&fp->f_flag, FNONBLOCK);
830 if ((tmp = *(int *)data))
831 atomic_set_int(&fp->f_flag, FASYNC);
833 atomic_clear_int(&fp->f_flag, FASYNC);
838 error = fo_ioctl(fp, com, data, td->td_ucred, td);
842 FILEDESC_XUNLOCK(fdp);
846 FILEDESC_SUNLOCK(fdp);
850 FILEDESC_UNLOCK_ASSERT(fdp);
859 poll_no_poll(int events)
862 * Return true for read/write. If the user asked for something
863 * special, return POLLNVAL, so that clients have a way of
864 * determining reliably whether or not the extended
865 * functionality is present without hard-coding knowledge
866 * of specific filesystem implementations.
868 if (events & ~POLLSTANDARD)
871 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
875 sys_pselect(struct thread *td, struct pselect_args *uap)
878 struct timeval tv, *tvp;
882 if (uap->ts != NULL) {
883 error = copyin(uap->ts, &ts, sizeof(ts));
886 TIMESPEC_TO_TIMEVAL(&tv, &ts);
890 if (uap->sm != NULL) {
891 error = copyin(uap->sm, &set, sizeof(set));
897 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
902 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
903 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
908 error = kern_sigprocmask(td, SIG_SETMASK, uset,
909 &td->td_oldsigmask, 0);
912 td->td_pflags |= TDP_OLDMASK;
914 * Make sure that ast() is called on return to
915 * usermode and TDP_OLDMASK is cleared, restoring old
919 td->td_flags |= TDF_ASTPENDING;
922 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
926 #ifndef _SYS_SYSPROTO_H_
929 fd_set *in, *ou, *ex;
934 sys_select(struct thread *td, struct select_args *uap)
936 struct timeval tv, *tvp;
939 if (uap->tv != NULL) {
940 error = copyin(uap->tv, &tv, sizeof(tv));
947 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
952 * In the unlikely case when user specified n greater then the last
953 * open file descriptor, check that no bits are set after the last
954 * valid fd. We must return EBADF if any is set.
956 * There are applications that rely on the behaviour.
958 * nd is fd_lastfile + 1.
961 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
967 if (nd >= ndu || fd_in == NULL)
971 bits = 0; /* silence gcc */
972 for (i = nd; i < ndu; i++) {
974 #if BYTE_ORDER == LITTLE_ENDIAN
975 addr = (char *)fd_in + b;
977 addr = (char *)fd_in;
978 if (abi_nfdbits == NFDBITS) {
979 addr += rounddown(b, sizeof(fd_mask)) +
980 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
982 addr += rounddown(b, sizeof(uint32_t)) +
983 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
993 if ((bits & (1 << (i % NBBY))) != 0)
1000 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1001 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1003 struct filedesc *fdp;
1005 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1006 * infds with the new FD_SETSIZE of 1024, and more than enough for
1007 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1010 fd_mask s_selbits[howmany(2048, NFDBITS)];
1011 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1013 sbintime_t asbt, precision, rsbt;
1014 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1019 fdp = td->td_proc->p_fd;
1021 lf = fdp->fd_lastfile;
1025 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1028 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1031 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1036 * Allocate just enough bits for the non-null fd_sets. Use the
1037 * preallocated auto buffer if possible.
1039 nfdbits = roundup(nd, NFDBITS);
1040 ncpbytes = nfdbits / NBBY;
1041 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1044 nbufbytes += 2 * ncpbytes;
1046 nbufbytes += 2 * ncpbytes;
1048 nbufbytes += 2 * ncpbytes;
1049 if (nbufbytes <= sizeof s_selbits)
1050 selbits = &s_selbits[0];
1052 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1055 * Assign pointers into the bit buffers and fetch the input bits.
1056 * Put the output buffers together so that they can be bzeroed
1060 #define getbits(name, x) \
1062 if (name == NULL) { \
1066 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1068 sbp += ncpbytes / sizeof *sbp; \
1069 error = copyin(name, ibits[x], ncpubytes); \
1072 bzero((char *)ibits[x] + ncpubytes, \
1073 ncpbytes - ncpubytes); \
1081 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1083 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1084 * we are running under 32-bit emulation. This should be more
1087 #define swizzle_fdset(bits) \
1088 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1090 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1091 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1094 #define swizzle_fdset(bits)
1097 /* Make sure the bit order makes it through an ABI transition */
1098 swizzle_fdset(ibits[0]);
1099 swizzle_fdset(ibits[1]);
1100 swizzle_fdset(ibits[2]);
1103 bzero(selbits, nbufbytes / 2);
1108 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1109 rtv.tv_usec >= 1000000) {
1113 if (!timevalisset(&rtv))
1115 else if (rtv.tv_sec <= INT32_MAX) {
1116 rsbt = tvtosbt(rtv);
1118 precision >>= tc_precexp;
1119 if (TIMESEL(&asbt, rsbt))
1120 asbt += tc_tick_sbt;
1121 if (asbt <= SBT_MAX - rsbt)
1130 /* Iterate until the timeout expires or descriptors become ready. */
1132 error = selscan(td, ibits, obits, nd);
1133 if (error || td->td_retval[0] != 0)
1135 error = seltdwait(td, asbt, precision);
1138 error = selrescan(td, ibits, obits);
1139 if (error || td->td_retval[0] != 0)
1145 /* select is not restarted after signals... */
1146 if (error == ERESTART)
1148 if (error == EWOULDBLOCK)
1151 /* swizzle bit order back, if necessary */
1152 swizzle_fdset(obits[0]);
1153 swizzle_fdset(obits[1]);
1154 swizzle_fdset(obits[2]);
1155 #undef swizzle_fdset
1157 #define putbits(name, x) \
1158 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1168 if (selbits != &s_selbits[0])
1169 free(selbits, M_SELECT);
1174 * Convert a select bit set to poll flags.
1176 * The backend always returns POLLHUP/POLLERR if appropriate and we
1177 * return this as a set bit in any set.
1179 static int select_flags[3] = {
1180 POLLRDNORM | POLLHUP | POLLERR,
1181 POLLWRNORM | POLLHUP | POLLERR,
1182 POLLRDBAND | POLLERR
1186 * Compute the fo_poll flags required for a fd given by the index and
1187 * bit position in the fd_mask array.
1190 selflags(fd_mask **ibits, int idx, fd_mask bit)
1196 for (msk = 0; msk < 3; msk++) {
1197 if (ibits[msk] == NULL)
1199 if ((ibits[msk][idx] & bit) == 0)
1201 flags |= select_flags[msk];
1207 * Set the appropriate output bits given a mask of fired events and the
1208 * input bits originally requested.
1211 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1217 for (msk = 0; msk < 3; msk++) {
1218 if ((events & select_flags[msk]) == 0)
1220 if (ibits[msk] == NULL)
1222 if ((ibits[msk][idx] & bit) == 0)
1225 * XXX Check for a duplicate set. This can occur because a
1226 * socket calls selrecord() twice for each poll() call
1227 * resulting in two selfds per real fd. selrescan() will
1228 * call selsetbits twice as a result.
1230 if ((obits[msk][idx] & bit) != 0)
1232 obits[msk][idx] |= bit;
1240 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1242 cap_rights_t rights;
1244 cap_rights_init(&rights, CAP_EVENT);
1246 return (fget_unlocked(fdp, fd, &rights, fpp, NULL));
1250 * Traverse the list of fds attached to this thread's seltd and check for
1254 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1256 struct filedesc *fdp;
1266 fdp = td->td_proc->p_fd;
1269 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1270 fd = (int)(uintptr_t)sfp->sf_cookie;
1272 selfdfree(stp, sfp);
1273 /* If the selinfo wasn't cleared the event didn't fire. */
1276 error = getselfd_cap(fdp, fd, &fp);
1280 bit = (fd_mask)1 << (fd % NFDBITS);
1281 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1284 n += selsetbits(ibits, obits, idx, bit, ev);
1287 td->td_retval[0] = n;
1292 * Perform the initial filedescriptor scan and register ourselves with
1296 selscan(td, ibits, obits, nfd)
1298 fd_mask **ibits, **obits;
1301 struct filedesc *fdp;
1304 int ev, flags, end, fd;
1308 fdp = td->td_proc->p_fd;
1310 for (idx = 0, fd = 0; fd < nfd; idx++) {
1311 end = imin(fd + NFDBITS, nfd);
1312 for (bit = 1; fd < end; bit <<= 1, fd++) {
1313 /* Compute the list of events we're interested in. */
1314 flags = selflags(ibits, idx, bit);
1317 error = getselfd_cap(fdp, fd, &fp);
1320 selfdalloc(td, (void *)(uintptr_t)fd);
1321 ev = fo_poll(fp, flags, td->td_ucred, td);
1324 n += selsetbits(ibits, obits, idx, bit, ev);
1328 td->td_retval[0] = n;
1333 sys_poll(struct thread *td, struct poll_args *uap)
1335 struct timespec ts, *tsp;
1337 if (uap->timeout != INFTIM) {
1338 if (uap->timeout < 0)
1340 ts.tv_sec = uap->timeout / 1000;
1341 ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1346 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1350 kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
1351 struct timespec *tsp, sigset_t *uset)
1353 struct pollfd *bits;
1354 struct pollfd smallbits[32];
1355 sbintime_t sbt, precision, tmp;
1363 if (tsp->tv_sec < 0)
1365 if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
1367 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1371 if (ts.tv_sec > INT32_MAX / 2) {
1372 over = ts.tv_sec - INT32_MAX / 2;
1378 precision >>= tc_precexp;
1379 if (TIMESEL(&sbt, tmp))
1386 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1388 ni = nfds * sizeof(struct pollfd);
1389 if (ni > sizeof(smallbits))
1390 bits = malloc(ni, M_TEMP, M_WAITOK);
1393 error = copyin(fds, bits, ni);
1398 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1399 &td->td_oldsigmask, 0);
1402 td->td_pflags |= TDP_OLDMASK;
1404 * Make sure that ast() is called on return to
1405 * usermode and TDP_OLDMASK is cleared, restoring old
1409 td->td_flags |= TDF_ASTPENDING;
1414 /* Iterate until the timeout expires or descriptors become ready. */
1416 error = pollscan(td, bits, nfds);
1417 if (error || td->td_retval[0] != 0)
1419 error = seltdwait(td, sbt, precision);
1422 error = pollrescan(td);
1423 if (error || td->td_retval[0] != 0)
1429 /* poll is not restarted after signals... */
1430 if (error == ERESTART)
1432 if (error == EWOULDBLOCK)
1435 error = pollout(td, bits, fds, nfds);
1440 if (ni > sizeof(smallbits))
1446 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1448 struct timespec ts, *tsp;
1452 if (uap->ts != NULL) {
1453 error = copyin(uap->ts, &ts, sizeof(ts));
1459 if (uap->set != NULL) {
1460 error = copyin(uap->set, &set, sizeof(set));
1467 * fds is still a pointer to user space. kern_poll() will
1468 * take care of copyin that array to the kernel space.
1471 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1475 pollrescan(struct thread *td)
1481 struct filedesc *fdp;
1485 cap_rights_t rights;
1490 fdp = td->td_proc->p_fd;
1492 FILEDESC_SLOCK(fdp);
1493 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1494 fd = (struct pollfd *)sfp->sf_cookie;
1496 selfdfree(stp, sfp);
1497 /* If the selinfo wasn't cleared the event didn't fire. */
1500 fp = fdp->fd_ofiles[fd->fd].fde_file;
1503 cap_check(cap_rights(fdp, fd->fd),
1504 cap_rights_init(&rights, CAP_EVENT)) != 0)
1509 fd->revents = POLLNVAL;
1515 * Note: backend also returns POLLHUP and
1516 * POLLERR if appropriate.
1518 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1519 if (fd->revents != 0)
1522 FILEDESC_SUNLOCK(fdp);
1524 td->td_retval[0] = n;
1530 pollout(td, fds, ufds, nfd)
1533 struct pollfd *ufds;
1540 for (i = 0; i < nfd; i++) {
1541 error = copyout(&fds->revents, &ufds->revents,
1542 sizeof(ufds->revents));
1545 if (fds->revents != 0)
1550 td->td_retval[0] = n;
1555 pollscan(td, fds, nfd)
1560 struct filedesc *fdp = td->td_proc->p_fd;
1563 cap_rights_t rights;
1567 FILEDESC_SLOCK(fdp);
1568 for (i = 0; i < nfd; i++, fds++) {
1569 if (fds->fd > fdp->fd_lastfile) {
1570 fds->revents = POLLNVAL;
1572 } else if (fds->fd < 0) {
1575 fp = fdp->fd_ofiles[fds->fd].fde_file;
1578 cap_check(cap_rights(fdp, fds->fd),
1579 cap_rights_init(&rights, CAP_EVENT)) != 0)
1584 fds->revents = POLLNVAL;
1588 * Note: backend also returns POLLHUP and
1589 * POLLERR if appropriate.
1591 selfdalloc(td, fds);
1592 fds->revents = fo_poll(fp, fds->events,
1595 * POSIX requires POLLOUT to be never
1596 * set simultaneously with POLLHUP.
1598 if ((fds->revents & POLLHUP) != 0)
1599 fds->revents &= ~POLLOUT;
1601 if (fds->revents != 0)
1606 FILEDESC_SUNLOCK(fdp);
1607 td->td_retval[0] = n;
1612 * XXX This was created specifically to support netncp and netsmb. This
1613 * allows the caller to specify a socket to wait for events on. It returns
1614 * 0 if any events matched and an error otherwise. There is no way to
1615 * determine which events fired.
1618 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1621 sbintime_t asbt, precision, rsbt;
1624 precision = 0; /* stupid gcc! */
1627 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1628 rtv.tv_usec >= 1000000)
1630 if (!timevalisset(&rtv))
1632 else if (rtv.tv_sec <= INT32_MAX) {
1633 rsbt = tvtosbt(rtv);
1635 precision >>= tc_precexp;
1636 if (TIMESEL(&asbt, rsbt))
1637 asbt += tc_tick_sbt;
1638 if (asbt <= SBT_MAX - rsbt)
1648 * Iterate until the timeout expires or the socket becomes ready.
1651 selfdalloc(td, NULL);
1652 error = sopoll(so, events, NULL, td);
1653 /* error here is actually the ready events. */
1656 error = seltdwait(td, asbt, precision);
1661 /* XXX Duplicates ncp/smb behavior. */
1662 if (error == ERESTART)
1668 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1669 * have two select sets, one for read and another for write.
1672 selfdalloc(struct thread *td, void *cookie)
1677 if (stp->st_free1 == NULL)
1678 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1679 stp->st_free1->sf_td = stp;
1680 stp->st_free1->sf_cookie = cookie;
1681 if (stp->st_free2 == NULL)
1682 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1683 stp->st_free2->sf_td = stp;
1684 stp->st_free2->sf_cookie = cookie;
1688 selfdfree(struct seltd *stp, struct selfd *sfp)
1690 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1691 if (sfp->sf_si != NULL) {
1692 mtx_lock(sfp->sf_mtx);
1693 if (sfp->sf_si != NULL) {
1694 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1695 refcount_release(&sfp->sf_refs);
1697 mtx_unlock(sfp->sf_mtx);
1699 if (refcount_release(&sfp->sf_refs))
1700 uma_zfree(selfd_zone, sfp);
1703 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1706 struct selinfo *sip;
1710 * This feature is already provided by doselwakeup(), thus it is
1711 * enough to go for it.
1712 * Eventually, the context, should take care to avoid races
1713 * between thread calling select()/poll() and file descriptor
1714 * detaching, but, again, the races are just the same as
1717 doselwakeup(sip, -1);
1721 * Record a select request.
1724 selrecord(selector, sip)
1725 struct thread *selector;
1726 struct selinfo *sip;
1732 stp = selector->td_sel;
1734 * Don't record when doing a rescan.
1736 if (stp->st_flags & SELTD_RESCAN)
1739 * Grab one of the preallocated descriptors.
1742 if ((sfp = stp->st_free1) != NULL)
1743 stp->st_free1 = NULL;
1744 else if ((sfp = stp->st_free2) != NULL)
1745 stp->st_free2 = NULL;
1747 panic("selrecord: No free selfd on selq");
1750 mtxp = mtx_pool_find(mtxpool_select, sip);
1752 * Initialize the sfp and queue it in the thread.
1756 refcount_init(&sfp->sf_refs, 2);
1757 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1759 * Now that we've locked the sip, check for initialization.
1762 if (sip->si_mtx == NULL) {
1764 TAILQ_INIT(&sip->si_tdlist);
1767 * Add this thread to the list of selfds listening on this selinfo.
1769 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1770 mtx_unlock(sip->si_mtx);
1773 /* Wake up a selecting thread. */
1776 struct selinfo *sip;
1778 doselwakeup(sip, -1);
1781 /* Wake up a selecting thread, and set its priority. */
1783 selwakeuppri(sip, pri)
1784 struct selinfo *sip;
1787 doselwakeup(sip, pri);
1791 * Do a wakeup when a selectable event occurs.
1794 doselwakeup(sip, pri)
1795 struct selinfo *sip;
1802 /* If it's not initialized there can't be any waiters. */
1803 if (sip->si_mtx == NULL)
1806 * Locking the selinfo locks all selfds associated with it.
1808 mtx_lock(sip->si_mtx);
1809 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1811 * Once we remove this sfp from the list and clear the
1812 * sf_si seltdclear will know to ignore this si.
1814 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1817 mtx_lock(&stp->st_mtx);
1818 stp->st_flags |= SELTD_PENDING;
1819 cv_broadcastpri(&stp->st_wait, pri);
1820 mtx_unlock(&stp->st_mtx);
1821 if (refcount_release(&sfp->sf_refs))
1822 uma_zfree(selfd_zone, sfp);
1824 mtx_unlock(sip->si_mtx);
1828 seltdinit(struct thread *td)
1832 if ((stp = td->td_sel) != NULL)
1834 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1835 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1836 cv_init(&stp->st_wait, "select");
1839 STAILQ_INIT(&stp->st_selq);
1843 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1850 * An event of interest may occur while we do not hold the seltd
1851 * locked so check the pending flag before we sleep.
1853 mtx_lock(&stp->st_mtx);
1855 * Any further calls to selrecord will be a rescan.
1857 stp->st_flags |= SELTD_RESCAN;
1858 if (stp->st_flags & SELTD_PENDING) {
1859 mtx_unlock(&stp->st_mtx);
1863 error = EWOULDBLOCK;
1865 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
1866 sbt, precision, C_ABSOLUTE);
1868 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1869 mtx_unlock(&stp->st_mtx);
1875 seltdfini(struct thread *td)
1883 uma_zfree(selfd_zone, stp->st_free1);
1885 uma_zfree(selfd_zone, stp->st_free2);
1887 cv_destroy(&stp->st_wait);
1888 mtx_destroy(&stp->st_mtx);
1889 free(stp, M_SELECT);
1893 * Remove the references to the thread from all of the objects we were
1897 seltdclear(struct thread *td)
1904 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1905 selfdfree(stp, sfp);
1909 static void selectinit(void *);
1910 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1912 selectinit(void *dummy __unused)
1915 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1916 NULL, NULL, UMA_ALIGN_PTR, 0);
1917 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1921 * Set up a syscall return value that follows the convention specified for
1922 * posix_* functions.
1925 kern_posix_error(struct thread *td, int error)
1930 td->td_errno = error;
1931 td->td_pflags |= TDP_NERRNO;
1932 td->td_retval[0] = error;