2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_capsicum.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/capsicum.h>
49 #include <sys/filedesc.h>
50 #include <sys/filio.h>
51 #include <sys/fcntl.h>
55 #include <sys/signalvar.h>
56 #include <sys/socketvar.h>
58 #include <sys/eventfd.h>
59 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/malloc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/selinfo.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/specialfd.h>
68 #include <sys/syscallsubr.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysent.h>
71 #include <sys/vnode.h>
74 #include <sys/condvar.h>
76 #include <sys/ktrace.h>
79 #include <security/audit/audit.h>
82 * The following macro defines how many bytes will be allocated from
83 * the stack instead of memory allocated when passing the IOCTL data
84 * structures from userspace and to the kernel. Some IOCTLs having
85 * small data structures are used very frequently and this small
86 * buffer on the stack gives a significant speedup improvement for
87 * those requests. The value of this define should be greater or equal
88 * to 64 bytes and should also be power of two. The data structure is
89 * currently hard-aligned to a 8-byte boundary on the stack. This
90 * should currently be sufficient for all supported platforms.
92 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
93 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
96 static int iosize_max_clamp = 0;
97 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
98 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
99 static int devfs_iosize_max_clamp = 1;
100 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
101 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
105 * Assert that the return value of read(2) and write(2) syscalls fits
106 * into a register. If not, an architecture will need to provide the
107 * usermode wrappers to reconstruct the result.
109 CTASSERT(sizeof(register_t) >= sizeof(size_t));
111 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
112 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
113 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
115 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
117 static int pollscan(struct thread *, struct pollfd *, u_int);
118 static int pollrescan(struct thread *);
119 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
120 static int selrescan(struct thread *, fd_mask **, fd_mask **);
121 static void selfdalloc(struct thread *, void *);
122 static void selfdfree(struct seltd *, struct selfd *);
123 static int dofileread(struct thread *, int, struct file *, struct uio *,
125 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
127 static void doselwakeup(struct selinfo *, int);
128 static void seltdinit(struct thread *);
129 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
130 static void seltdclear(struct thread *);
133 * One seltd per-thread allocated on demand as needed.
135 * t - protected by st_mtx
136 * k - Only accessed by curthread or read-only
139 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
140 struct selfd *st_free1; /* (k) free fd for read set. */
141 struct selfd *st_free2; /* (k) free fd for write set. */
142 struct mtx st_mtx; /* Protects struct seltd */
143 struct cv st_wait; /* (t) Wait channel. */
144 int st_flags; /* (t) SELTD_ flags. */
147 #define SELTD_PENDING 0x0001 /* We have pending events. */
148 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
151 * One selfd allocated per-thread per-file-descriptor.
152 * f - protected by sf_mtx
155 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
156 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
157 struct selinfo *sf_si; /* (f) selinfo when linked. */
158 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
159 struct seltd *sf_td; /* (k) owning seltd. */
160 void *sf_cookie; /* (k) fd or pollfd. */
163 MALLOC_DEFINE(M_SELFD, "selfd", "selfd");
164 static struct mtx_pool *mtxpool_select;
168 devfs_iosize_max(void)
171 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
172 INT_MAX : SSIZE_MAX);
179 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
180 INT_MAX : SSIZE_MAX);
184 #ifndef _SYS_SYSPROTO_H_
192 sys_read(struct thread *td, struct read_args *uap)
198 if (uap->nbyte > IOSIZE_MAX)
200 aiov.iov_base = uap->buf;
201 aiov.iov_len = uap->nbyte;
202 auio.uio_iov = &aiov;
204 auio.uio_resid = uap->nbyte;
205 auio.uio_segflg = UIO_USERSPACE;
206 error = kern_readv(td, uap->fd, &auio);
211 * Positioned read system call
213 #ifndef _SYS_SYSPROTO_H_
223 sys_pread(struct thread *td, struct pread_args *uap)
226 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
230 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
236 if (nbyte > IOSIZE_MAX)
239 aiov.iov_len = nbyte;
240 auio.uio_iov = &aiov;
242 auio.uio_resid = nbyte;
243 auio.uio_segflg = UIO_USERSPACE;
244 error = kern_preadv(td, fd, &auio, offset);
248 #if defined(COMPAT_FREEBSD6)
250 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
253 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
258 * Scatter read system call.
260 #ifndef _SYS_SYSPROTO_H_
268 sys_readv(struct thread *td, struct readv_args *uap)
273 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
276 error = kern_readv(td, uap->fd, auio);
282 kern_readv(struct thread *td, int fd, struct uio *auio)
287 error = fget_read(td, fd, &cap_read_rights, &fp);
290 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
296 * Scatter positioned read system call.
298 #ifndef _SYS_SYSPROTO_H_
307 sys_preadv(struct thread *td, struct preadv_args *uap)
312 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
315 error = kern_preadv(td, uap->fd, auio, uap->offset);
321 kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset)
326 error = fget_read(td, fd, &cap_pread_rights, &fp);
329 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
331 else if (offset < 0 &&
332 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
335 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
341 * Common code for readv and preadv that reads data in
342 * from a file using the passed in uio, offset, and flags.
345 dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio,
346 off_t offset, int flags)
351 struct uio *ktruio = NULL;
356 /* Finish zero length reads right here */
357 if (auio->uio_resid == 0) {
358 td->td_retval[0] = 0;
361 auio->uio_rw = UIO_READ;
362 auio->uio_offset = offset;
365 if (KTRPOINT(td, KTR_GENIO))
366 ktruio = cloneuio(auio);
368 cnt = auio->uio_resid;
369 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
370 if (auio->uio_resid != cnt && (error == ERESTART ||
371 error == EINTR || error == EWOULDBLOCK))
374 cnt -= auio->uio_resid;
376 if (ktruio != NULL) {
377 ktruio->uio_resid = cnt;
378 ktrgenio(fd, UIO_READ, ktruio, error);
381 td->td_retval[0] = cnt;
385 #ifndef _SYS_SYSPROTO_H_
393 sys_write(struct thread *td, struct write_args *uap)
399 if (uap->nbyte > IOSIZE_MAX)
401 aiov.iov_base = (void *)(uintptr_t)uap->buf;
402 aiov.iov_len = uap->nbyte;
403 auio.uio_iov = &aiov;
405 auio.uio_resid = uap->nbyte;
406 auio.uio_segflg = UIO_USERSPACE;
407 error = kern_writev(td, uap->fd, &auio);
412 * Positioned write system call.
414 #ifndef _SYS_SYSPROTO_H_
424 sys_pwrite(struct thread *td, struct pwrite_args *uap)
427 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
431 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
438 if (nbyte > IOSIZE_MAX)
440 aiov.iov_base = (void *)(uintptr_t)buf;
441 aiov.iov_len = nbyte;
442 auio.uio_iov = &aiov;
444 auio.uio_resid = nbyte;
445 auio.uio_segflg = UIO_USERSPACE;
446 error = kern_pwritev(td, fd, &auio, offset);
450 #if defined(COMPAT_FREEBSD6)
452 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
455 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
460 * Gather write system call.
462 #ifndef _SYS_SYSPROTO_H_
470 sys_writev(struct thread *td, struct writev_args *uap)
475 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
478 error = kern_writev(td, uap->fd, auio);
484 kern_writev(struct thread *td, int fd, struct uio *auio)
489 error = fget_write(td, fd, &cap_write_rights, &fp);
492 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
498 * Gather positioned write system call.
500 #ifndef _SYS_SYSPROTO_H_
501 struct pwritev_args {
509 sys_pwritev(struct thread *td, struct pwritev_args *uap)
514 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
517 error = kern_pwritev(td, uap->fd, auio, uap->offset);
523 kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset)
528 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
531 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
533 else if (offset < 0 &&
534 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
537 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
543 * Common code for writev and pwritev that writes data to
544 * a file using the passed in uio, offset, and flags.
547 dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio,
548 off_t offset, int flags)
553 struct uio *ktruio = NULL;
557 auio->uio_rw = UIO_WRITE;
559 auio->uio_offset = offset;
561 if (KTRPOINT(td, KTR_GENIO))
562 ktruio = cloneuio(auio);
564 cnt = auio->uio_resid;
565 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
566 if (auio->uio_resid != cnt && (error == ERESTART ||
567 error == EINTR || error == EWOULDBLOCK))
569 /* Socket layer is responsible for issuing SIGPIPE. */
570 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
571 PROC_LOCK(td->td_proc);
572 tdsignal(td, SIGPIPE);
573 PROC_UNLOCK(td->td_proc);
576 cnt -= auio->uio_resid;
578 if (ktruio != NULL) {
579 ktruio->uio_resid = cnt;
580 ktrgenio(fd, UIO_WRITE, ktruio, error);
583 td->td_retval[0] = cnt;
588 * Truncate a file given a file descriptor.
590 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
591 * descriptor isn't writable.
594 kern_ftruncate(struct thread *td, int fd, off_t length)
602 error = fget(td, fd, &cap_ftruncate_rights, &fp);
605 AUDIT_ARG_FILE(td->td_proc, fp);
606 if (!(fp->f_flag & FWRITE)) {
610 error = fo_truncate(fp, length, td->td_ucred, td);
615 #ifndef _SYS_SYSPROTO_H_
616 struct ftruncate_args {
623 sys_ftruncate(struct thread *td, struct ftruncate_args *uap)
626 return (kern_ftruncate(td, uap->fd, uap->length));
629 #if defined(COMPAT_43)
630 #ifndef _SYS_SYSPROTO_H_
631 struct oftruncate_args {
637 oftruncate(struct thread *td, struct oftruncate_args *uap)
640 return (kern_ftruncate(td, uap->fd, uap->length));
642 #endif /* COMPAT_43 */
644 #ifndef _SYS_SYSPROTO_H_
653 sys_ioctl(struct thread *td, struct ioctl_args *uap)
655 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
662 if (uap->com > 0xffffffff) {
664 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
665 td->td_proc->p_pid, td->td_name, uap->com);
668 com = (uint32_t)uap->com;
671 * Interpret high order word to find amount of data to be
672 * copied to/from the user's address space.
674 size = IOCPARM_LEN(com);
675 if ((size > IOCPARM_MAX) ||
676 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
677 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
678 ((com & IOC_OUT) && size == 0) ||
680 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
682 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
686 if (com & IOC_VOID) {
687 /* Integer argument. */
688 arg = (intptr_t)uap->data;
692 if (size > SYS_IOCTL_SMALL_SIZE)
693 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
698 data = (void *)&uap->data;
700 error = copyin(uap->data, data, (u_int)size);
703 } else if (com & IOC_OUT) {
705 * Zero the buffer so the user always
706 * gets back something deterministic.
711 error = kern_ioctl(td, uap->fd, com, data);
713 if (error == 0 && (com & IOC_OUT))
714 error = copyout(data, uap->data, (u_int)size);
717 if (size > SYS_IOCTL_SMALL_SIZE)
718 free(data, M_IOCTLOPS);
723 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
726 struct filedesc *fdp;
727 int error, tmp, locked;
732 fdp = td->td_proc->p_fd;
745 locked = LA_UNLOCKED;
751 if ((fp = fget_noref(fdp, fd)) == NULL) {
755 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
756 fp = NULL; /* fhold() was not called yet */
764 if (locked == LA_SLOCKED) {
765 FILEDESC_SUNLOCK(fdp);
766 locked = LA_UNLOCKED;
769 error = fget(td, fd, &cap_ioctl_rights, &fp);
775 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
782 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
785 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
788 if ((tmp = *(int *)data))
789 atomic_set_int(&fp->f_flag, FNONBLOCK);
791 atomic_clear_int(&fp->f_flag, FNONBLOCK);
795 if ((tmp = *(int *)data))
796 atomic_set_int(&fp->f_flag, FASYNC);
798 atomic_clear_int(&fp->f_flag, FASYNC);
803 error = fo_ioctl(fp, com, data, td->td_ucred, td);
807 FILEDESC_XUNLOCK(fdp);
811 FILEDESC_SUNLOCK(fdp);
815 FILEDESC_UNLOCK_ASSERT(fdp);
824 sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap)
828 error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len);
829 return (kern_posix_error(td, error));
833 kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len)
839 if (offset < 0 || len <= 0)
841 /* Check for wrap. */
842 if (offset > OFF_MAX - len)
845 error = fget(td, fd, &cap_pwrite_rights, &fp);
848 AUDIT_ARG_FILE(td->td_proc, fp);
849 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
853 if ((fp->f_flag & FWRITE) == 0) {
858 error = fo_fallocate(fp, offset, len, td);
865 sys_fspacectl(struct thread *td, struct fspacectl_args *uap)
867 struct spacectl_range rqsr, rmsr;
870 error = copyin(uap->rqsr, &rqsr, sizeof(rqsr));
874 error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags,
876 if (uap->rmsr != NULL) {
877 cerror = copyout(&rmsr, uap->rmsr, sizeof(rmsr));
885 kern_fspacectl(struct thread *td, int fd, int cmd,
886 const struct spacectl_range *rqsr, int flags, struct spacectl_range *rmsrp)
889 struct spacectl_range rmsr;
894 AUDIT_ARG_FFLAGS(flags);
902 if (cmd != SPACECTL_DEALLOC ||
903 rqsr->r_offset < 0 || rqsr->r_len <= 0 ||
904 rqsr->r_offset > OFF_MAX - rqsr->r_len ||
905 (flags & ~SPACECTL_F_SUPPORTED) != 0)
908 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
911 AUDIT_ARG_FILE(td->td_proc, fp);
912 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
916 if ((fp->f_flag & FWRITE) == 0) {
921 error = fo_fspacectl(fp, cmd, &rmsr.r_offset, &rmsr.r_len, flags,
923 /* fspacectl is not restarted after signals if the file is modified. */
924 if (rmsr.r_len != rqsr->r_len && (error == ERESTART ||
925 error == EINTR || error == EWOULDBLOCK))
935 kern_specialfd(struct thread *td, int type, void *arg)
938 struct specialfd_eventfd *ae;
939 int error, fd, fflags;
942 error = falloc_noinstall(td, &fp);
947 case SPECIALFD_EVENTFD:
949 if ((ae->flags & EFD_CLOEXEC) != 0)
951 error = eventfd_create_file(td, fp, ae->initval, ae->flags);
959 error = finstall(td, fp, &fd, fflags, NULL);
962 td->td_retval[0] = fd;
967 sys___specialfd(struct thread *td, struct __specialfd_args *args)
969 struct specialfd_eventfd ae;
972 switch (args->type) {
973 case SPECIALFD_EVENTFD:
974 if (args->len != sizeof(struct specialfd_eventfd)) {
978 error = copyin(args->req, &ae, sizeof(ae));
981 if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK |
982 EFD_SEMAPHORE)) != 0) {
986 error = kern_specialfd(td, args->type, &ae);
996 poll_no_poll(int events)
999 * Return true for read/write. If the user asked for something
1000 * special, return POLLNVAL, so that clients have a way of
1001 * determining reliably whether or not the extended
1002 * functionality is present without hard-coding knowledge
1003 * of specific filesystem implementations.
1005 if (events & ~POLLSTANDARD)
1008 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1012 sys_pselect(struct thread *td, struct pselect_args *uap)
1015 struct timeval tv, *tvp;
1016 sigset_t set, *uset;
1019 if (uap->ts != NULL) {
1020 error = copyin(uap->ts, &ts, sizeof(ts));
1023 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1027 if (uap->sm != NULL) {
1028 error = copyin(uap->sm, &set, sizeof(set));
1034 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1039 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
1040 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
1045 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1046 &td->td_oldsigmask, 0);
1049 td->td_pflags |= TDP_OLDMASK;
1051 * Make sure that ast() is called on return to
1052 * usermode and TDP_OLDMASK is cleared, restoring old
1055 ast_sched(td, TDA_SIGSUSPEND);
1057 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
1061 #ifndef _SYS_SYSPROTO_H_
1062 struct select_args {
1064 fd_set *in, *ou, *ex;
1069 sys_select(struct thread *td, struct select_args *uap)
1071 struct timeval tv, *tvp;
1074 if (uap->tv != NULL) {
1075 error = copyin(uap->tv, &tv, sizeof(tv));
1082 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1087 * In the unlikely case when user specified n greater then the last
1088 * open file descriptor, check that no bits are set after the last
1089 * valid fd. We must return EBADF if any is set.
1091 * There are applications that rely on the behaviour.
1096 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
1102 if (nd >= ndu || fd_in == NULL)
1106 bits = 0; /* silence gcc */
1107 for (i = nd; i < ndu; i++) {
1109 #if BYTE_ORDER == LITTLE_ENDIAN
1110 addr = (char *)fd_in + b;
1112 addr = (char *)fd_in;
1113 if (abi_nfdbits == NFDBITS) {
1114 addr += rounddown(b, sizeof(fd_mask)) +
1115 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
1117 addr += rounddown(b, sizeof(uint32_t)) +
1118 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
1121 if (addr != oaddr) {
1128 if ((bits & (1 << (i % NBBY))) != 0)
1135 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1136 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1138 struct filedesc *fdp;
1140 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1141 * infds with the new FD_SETSIZE of 1024, and more than enough for
1142 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1145 fd_mask s_selbits[howmany(2048, NFDBITS)];
1146 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1148 sbintime_t asbt, precision, rsbt;
1149 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1154 fdp = td->td_proc->p_fd;
1156 lf = fdp->fd_nfiles;
1160 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1163 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1166 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1171 * Allocate just enough bits for the non-null fd_sets. Use the
1172 * preallocated auto buffer if possible.
1174 nfdbits = roundup(nd, NFDBITS);
1175 ncpbytes = nfdbits / NBBY;
1176 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1179 nbufbytes += 2 * ncpbytes;
1181 nbufbytes += 2 * ncpbytes;
1183 nbufbytes += 2 * ncpbytes;
1184 if (nbufbytes <= sizeof s_selbits)
1185 selbits = &s_selbits[0];
1187 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1190 * Assign pointers into the bit buffers and fetch the input bits.
1191 * Put the output buffers together so that they can be bzeroed
1195 #define getbits(name, x) \
1197 if (name == NULL) { \
1201 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1203 sbp += ncpbytes / sizeof *sbp; \
1204 error = copyin(name, ibits[x], ncpubytes); \
1207 if (ncpbytes != ncpubytes) \
1208 bzero((char *)ibits[x] + ncpubytes, \
1209 ncpbytes - ncpubytes); \
1217 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1219 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1220 * we are running under 32-bit emulation. This should be more
1223 #define swizzle_fdset(bits) \
1224 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1226 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1227 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1230 #define swizzle_fdset(bits)
1233 /* Make sure the bit order makes it through an ABI transition */
1234 swizzle_fdset(ibits[0]);
1235 swizzle_fdset(ibits[1]);
1236 swizzle_fdset(ibits[2]);
1239 bzero(selbits, nbufbytes / 2);
1244 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1245 rtv.tv_usec >= 1000000) {
1249 if (!timevalisset(&rtv))
1251 else if (rtv.tv_sec <= INT32_MAX) {
1252 rsbt = tvtosbt(rtv);
1254 precision >>= tc_precexp;
1255 if (TIMESEL(&asbt, rsbt))
1256 asbt += tc_tick_sbt;
1257 if (asbt <= SBT_MAX - rsbt)
1266 /* Iterate until the timeout expires or descriptors become ready. */
1268 error = selscan(td, ibits, obits, nd);
1269 if (error || td->td_retval[0] != 0)
1271 error = seltdwait(td, asbt, precision);
1274 error = selrescan(td, ibits, obits);
1275 if (error || td->td_retval[0] != 0)
1281 /* select is not restarted after signals... */
1282 if (error == ERESTART)
1284 if (error == EWOULDBLOCK)
1287 /* swizzle bit order back, if necessary */
1288 swizzle_fdset(obits[0]);
1289 swizzle_fdset(obits[1]);
1290 swizzle_fdset(obits[2]);
1291 #undef swizzle_fdset
1293 #define putbits(name, x) \
1294 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1304 if (selbits != &s_selbits[0])
1305 free(selbits, M_SELECT);
1310 * Convert a select bit set to poll flags.
1312 * The backend always returns POLLHUP/POLLERR if appropriate and we
1313 * return this as a set bit in any set.
1315 static const int select_flags[3] = {
1316 POLLRDNORM | POLLHUP | POLLERR,
1317 POLLWRNORM | POLLHUP | POLLERR,
1318 POLLRDBAND | POLLERR
1322 * Compute the fo_poll flags required for a fd given by the index and
1323 * bit position in the fd_mask array.
1326 selflags(fd_mask **ibits, int idx, fd_mask bit)
1332 for (msk = 0; msk < 3; msk++) {
1333 if (ibits[msk] == NULL)
1335 if ((ibits[msk][idx] & bit) == 0)
1337 flags |= select_flags[msk];
1343 * Set the appropriate output bits given a mask of fired events and the
1344 * input bits originally requested.
1347 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1353 for (msk = 0; msk < 3; msk++) {
1354 if ((events & select_flags[msk]) == 0)
1356 if (ibits[msk] == NULL)
1358 if ((ibits[msk][idx] & bit) == 0)
1361 * XXX Check for a duplicate set. This can occur because a
1362 * socket calls selrecord() twice for each poll() call
1363 * resulting in two selfds per real fd. selrescan() will
1364 * call selsetbits twice as a result.
1366 if ((obits[msk][idx] & bit) != 0)
1368 obits[msk][idx] |= bit;
1376 * Traverse the list of fds attached to this thread's seltd and check for
1380 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1382 struct filedesc *fdp;
1393 fdp = td->td_proc->p_fd;
1396 only_user = FILEDESC_IS_ONLY_USER(fdp);
1397 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1398 fd = (int)(uintptr_t)sfp->sf_cookie;
1400 selfdfree(stp, sfp);
1401 /* If the selinfo wasn't cleared the event didn't fire. */
1405 error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1407 error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1408 if (__predict_false(error != 0))
1411 bit = (fd_mask)1 << (fd % NFDBITS);
1412 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1414 fput_only_user(fdp, fp);
1418 n += selsetbits(ibits, obits, idx, bit, ev);
1421 td->td_retval[0] = n;
1426 * Perform the initial filedescriptor scan and register ourselves with
1430 selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd)
1432 struct filedesc *fdp;
1435 int ev, flags, end, fd;
1440 fdp = td->td_proc->p_fd;
1442 only_user = FILEDESC_IS_ONLY_USER(fdp);
1443 for (idx = 0, fd = 0; fd < nfd; idx++) {
1444 end = imin(fd + NFDBITS, nfd);
1445 for (bit = 1; fd < end; bit <<= 1, fd++) {
1446 /* Compute the list of events we're interested in. */
1447 flags = selflags(ibits, idx, bit);
1451 error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1453 error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1454 if (__predict_false(error != 0))
1456 selfdalloc(td, (void *)(uintptr_t)fd);
1457 ev = fo_poll(fp, flags, td->td_ucred, td);
1459 fput_only_user(fdp, fp);
1463 n += selsetbits(ibits, obits, idx, bit, ev);
1467 td->td_retval[0] = n;
1472 sys_poll(struct thread *td, struct poll_args *uap)
1474 struct timespec ts, *tsp;
1476 if (uap->timeout != INFTIM) {
1477 if (uap->timeout < 0)
1479 ts.tv_sec = uap->timeout / 1000;
1480 ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1485 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1489 * kfds points to an array in the kernel.
1492 kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds,
1493 struct timespec *tsp, sigset_t *uset)
1495 sbintime_t sbt, precision, tmp;
1502 if (!timespecvalid_interval(tsp))
1504 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1508 if (ts.tv_sec > INT32_MAX / 2) {
1509 over = ts.tv_sec - INT32_MAX / 2;
1515 precision >>= tc_precexp;
1516 if (TIMESEL(&sbt, tmp))
1524 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1525 &td->td_oldsigmask, 0);
1528 td->td_pflags |= TDP_OLDMASK;
1530 * Make sure that ast() is called on return to
1531 * usermode and TDP_OLDMASK is cleared, restoring old
1534 ast_sched(td, TDA_SIGSUSPEND);
1538 /* Iterate until the timeout expires or descriptors become ready. */
1540 error = pollscan(td, kfds, nfds);
1541 if (error || td->td_retval[0] != 0)
1543 error = seltdwait(td, sbt, precision);
1546 error = pollrescan(td);
1547 if (error || td->td_retval[0] != 0)
1552 /* poll is not restarted after signals... */
1553 if (error == ERESTART)
1555 if (error == EWOULDBLOCK)
1561 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1563 struct timespec ts, *tsp;
1567 if (uap->ts != NULL) {
1568 error = copyin(uap->ts, &ts, sizeof(ts));
1574 if (uap->set != NULL) {
1575 error = copyin(uap->set, &set, sizeof(set));
1581 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1585 * ufds points to an array in user space.
1588 kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds,
1589 struct timespec *tsp, sigset_t *set)
1591 struct pollfd *kfds;
1592 struct pollfd stackfds[32];
1595 if (kern_poll_maxfds(nfds))
1597 if (nfds > nitems(stackfds))
1598 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
1601 error = copyin(ufds, kfds, nfds * sizeof(*kfds));
1605 error = kern_poll_kfds(td, kfds, nfds, tsp, set);
1607 error = pollout(td, kfds, ufds, nfds);
1610 if (nfds > nitems(stackfds))
1616 kern_poll_maxfds(u_int nfds)
1620 * This is kinda bogus. We have fd limits, but that is not
1621 * really related to the size of the pollfd array. Make sure
1622 * we let the process use at least FD_SETSIZE entries and at
1623 * least enough for the system-wide limits. We want to be reasonably
1624 * safe, but not overly restrictive.
1626 return (nfds > maxfilesperproc && nfds > FD_SETSIZE);
1630 pollrescan(struct thread *td)
1636 struct filedesc *fdp;
1643 fdp = td->td_proc->p_fd;
1645 only_user = FILEDESC_IS_ONLY_USER(fdp);
1646 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1647 fd = (struct pollfd *)sfp->sf_cookie;
1649 selfdfree(stp, sfp);
1650 /* If the selinfo wasn't cleared the event didn't fire. */
1654 error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp);
1656 error = fget_unlocked(td, fd->fd, &cap_event_rights, &fp);
1657 if (__predict_false(error != 0)) {
1658 fd->revents = POLLNVAL;
1663 * Note: backend also returns POLLHUP and
1664 * POLLERR if appropriate.
1666 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1668 fput_only_user(fdp, fp);
1671 if (fd->revents != 0)
1675 td->td_retval[0] = n;
1680 pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
1686 for (i = 0; i < nfd; i++) {
1687 error = copyout(&fds->revents, &ufds->revents,
1688 sizeof(ufds->revents));
1691 if (fds->revents != 0)
1696 td->td_retval[0] = n;
1701 pollscan(struct thread *td, struct pollfd *fds, u_int nfd)
1703 struct filedesc *fdp;
1709 fdp = td->td_proc->p_fd;
1710 only_user = FILEDESC_IS_ONLY_USER(fdp);
1711 for (i = 0; i < nfd; i++, fds++) {
1717 error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp);
1719 error = fget_unlocked(td, fds->fd, &cap_event_rights, &fp);
1720 if (__predict_false(error != 0)) {
1721 fds->revents = POLLNVAL;
1726 * Note: backend also returns POLLHUP and
1727 * POLLERR if appropriate.
1729 selfdalloc(td, fds);
1730 fds->revents = fo_poll(fp, fds->events,
1733 fput_only_user(fdp, fp);
1737 * POSIX requires POLLOUT to be never
1738 * set simultaneously with POLLHUP.
1740 if ((fds->revents & POLLHUP) != 0)
1741 fds->revents &= ~POLLOUT;
1743 if (fds->revents != 0)
1746 td->td_retval[0] = n;
1751 * XXX This was created specifically to support netncp and netsmb. This
1752 * allows the caller to specify a socket to wait for events on. It returns
1753 * 0 if any events matched and an error otherwise. There is no way to
1754 * determine which events fired.
1757 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1760 sbintime_t asbt, precision, rsbt;
1763 precision = 0; /* stupid gcc! */
1766 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1767 rtv.tv_usec >= 1000000)
1769 if (!timevalisset(&rtv))
1771 else if (rtv.tv_sec <= INT32_MAX) {
1772 rsbt = tvtosbt(rtv);
1774 precision >>= tc_precexp;
1775 if (TIMESEL(&asbt, rsbt))
1776 asbt += tc_tick_sbt;
1777 if (asbt <= SBT_MAX - rsbt)
1787 * Iterate until the timeout expires or the socket becomes ready.
1790 selfdalloc(td, NULL);
1791 if (sopoll(so, events, NULL, td) != 0) {
1795 error = seltdwait(td, asbt, precision);
1800 /* XXX Duplicates ncp/smb behavior. */
1801 if (error == ERESTART)
1807 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1808 * have two select sets, one for read and another for write.
1811 selfdalloc(struct thread *td, void *cookie)
1816 if (stp->st_free1 == NULL)
1817 stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO);
1818 stp->st_free1->sf_td = stp;
1819 stp->st_free1->sf_cookie = cookie;
1820 if (stp->st_free2 == NULL)
1821 stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO);
1822 stp->st_free2->sf_td = stp;
1823 stp->st_free2->sf_cookie = cookie;
1827 selfdfree(struct seltd *stp, struct selfd *sfp)
1829 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1831 * Paired with doselwakeup.
1833 if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) {
1834 mtx_lock(sfp->sf_mtx);
1835 if (sfp->sf_si != NULL) {
1836 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1838 mtx_unlock(sfp->sf_mtx);
1843 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1845 seldrain(struct selinfo *sip)
1849 * This feature is already provided by doselwakeup(), thus it is
1850 * enough to go for it.
1851 * Eventually, the context, should take care to avoid races
1852 * between thread calling select()/poll() and file descriptor
1853 * detaching, but, again, the races are just the same as
1856 doselwakeup(sip, -1);
1860 * Record a select request.
1863 selrecord(struct thread *selector, struct selinfo *sip)
1869 stp = selector->td_sel;
1871 * Don't record when doing a rescan.
1873 if (stp->st_flags & SELTD_RESCAN)
1876 * Grab one of the preallocated descriptors.
1879 if ((sfp = stp->st_free1) != NULL)
1880 stp->st_free1 = NULL;
1881 else if ((sfp = stp->st_free2) != NULL)
1882 stp->st_free2 = NULL;
1884 panic("selrecord: No free selfd on selq");
1887 mtxp = mtx_pool_find(mtxpool_select, sip);
1889 * Initialize the sfp and queue it in the thread.
1893 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1895 * Now that we've locked the sip, check for initialization.
1898 if (sip->si_mtx == NULL) {
1900 TAILQ_INIT(&sip->si_tdlist);
1903 * Add this thread to the list of selfds listening on this selinfo.
1905 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1906 mtx_unlock(sip->si_mtx);
1909 /* Wake up a selecting thread. */
1911 selwakeup(struct selinfo *sip)
1913 doselwakeup(sip, -1);
1916 /* Wake up a selecting thread, and set its priority. */
1918 selwakeuppri(struct selinfo *sip, int pri)
1920 doselwakeup(sip, pri);
1924 * Do a wakeup when a selectable event occurs.
1927 doselwakeup(struct selinfo *sip, int pri)
1933 /* If it's not initialized there can't be any waiters. */
1934 if (sip->si_mtx == NULL)
1937 * Locking the selinfo locks all selfds associated with it.
1939 mtx_lock(sip->si_mtx);
1940 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1942 * Once we remove this sfp from the list and clear the
1943 * sf_si seltdclear will know to ignore this si.
1945 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1947 mtx_lock(&stp->st_mtx);
1948 stp->st_flags |= SELTD_PENDING;
1949 cv_broadcastpri(&stp->st_wait, pri);
1950 mtx_unlock(&stp->st_mtx);
1952 * Paired with selfdfree.
1954 * Storing this only after the wakeup provides an invariant that
1955 * stp is not used after selfdfree returns.
1957 atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL);
1959 mtx_unlock(sip->si_mtx);
1963 seltdinit(struct thread *td)
1969 MPASS(stp->st_flags == 0);
1970 MPASS(STAILQ_EMPTY(&stp->st_selq));
1973 stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1974 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1975 cv_init(&stp->st_wait, "select");
1977 STAILQ_INIT(&stp->st_selq);
1982 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1989 * An event of interest may occur while we do not hold the seltd
1990 * locked so check the pending flag before we sleep.
1992 mtx_lock(&stp->st_mtx);
1994 * Any further calls to selrecord will be a rescan.
1996 stp->st_flags |= SELTD_RESCAN;
1997 if (stp->st_flags & SELTD_PENDING) {
1998 mtx_unlock(&stp->st_mtx);
2002 error = EWOULDBLOCK;
2004 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
2005 sbt, precision, C_ABSOLUTE);
2007 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
2008 mtx_unlock(&stp->st_mtx);
2014 seltdfini(struct thread *td)
2021 MPASS(stp->st_flags == 0);
2022 MPASS(STAILQ_EMPTY(&stp->st_selq));
2024 free(stp->st_free1, M_SELFD);
2026 free(stp->st_free2, M_SELFD);
2028 cv_destroy(&stp->st_wait);
2029 mtx_destroy(&stp->st_mtx);
2030 free(stp, M_SELECT);
2034 * Remove the references to the thread from all of the objects we were
2038 seltdclear(struct thread *td)
2045 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
2046 selfdfree(stp, sfp);
2050 static void selectinit(void *);
2051 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
2053 selectinit(void *dummy __unused)
2056 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
2060 * Set up a syscall return value that follows the convention specified for
2061 * posix_* functions.
2064 kern_posix_error(struct thread *td, int error)
2069 td->td_errno = error;
2070 td->td_pflags |= TDP_NERRNO;
2071 td->td_retval[0] = error;