2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/capability.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
53 #include <sys/signalvar.h>
54 #include <sys/socketvar.h>
56 #include <sys/kernel.h>
58 #include <sys/limits.h>
59 #include <sys/malloc.h>
61 #include <sys/resourcevar.h>
62 #include <sys/selinfo.h>
63 #include <sys/sleepqueue.h>
64 #include <sys/syscallsubr.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
67 #include <sys/vnode.h>
70 #include <sys/condvar.h>
72 #include <sys/ktrace.h>
75 #include <security/audit/audit.h>
77 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
78 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
79 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
81 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
83 static int pollscan(struct thread *, struct pollfd *, u_int);
84 static int pollrescan(struct thread *);
85 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
86 static int selrescan(struct thread *, fd_mask **, fd_mask **);
87 static void selfdalloc(struct thread *, void *);
88 static void selfdfree(struct seltd *, struct selfd *);
89 static int dofileread(struct thread *, int, struct file *, struct uio *,
91 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
93 static void doselwakeup(struct selinfo *, int);
94 static void seltdinit(struct thread *);
95 static int seltdwait(struct thread *, int);
96 static void seltdclear(struct thread *);
99 * One seltd per-thread allocated on demand as needed.
101 * t - protected by st_mtx
102 * k - Only accessed by curthread or read-only
105 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
106 struct selfd *st_free1; /* (k) free fd for read set. */
107 struct selfd *st_free2; /* (k) free fd for write set. */
108 struct mtx st_mtx; /* Protects struct seltd */
109 struct cv st_wait; /* (t) Wait channel. */
110 int st_flags; /* (t) SELTD_ flags. */
113 #define SELTD_PENDING 0x0001 /* We have pending events. */
114 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
117 * One selfd allocated per-thread per-file-descriptor.
118 * f - protected by sf_mtx
121 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
122 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
123 struct selinfo *sf_si; /* (f) selinfo when linked. */
124 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
125 struct seltd *sf_td; /* (k) owning seltd. */
126 void *sf_cookie; /* (k) fd or pollfd. */
129 static uma_zone_t selfd_zone;
130 static struct mtx_pool *mtxpool_select;
132 #ifndef _SYS_SYSPROTO_H_
142 struct read_args *uap;
148 if (uap->nbyte > INT_MAX)
150 aiov.iov_base = uap->buf;
151 aiov.iov_len = uap->nbyte;
152 auio.uio_iov = &aiov;
154 auio.uio_resid = uap->nbyte;
155 auio.uio_segflg = UIO_USERSPACE;
156 error = kern_readv(td, uap->fd, &auio);
161 * Positioned read system call
163 #ifndef _SYS_SYSPROTO_H_
175 struct pread_args *uap;
181 if (uap->nbyte > INT_MAX)
183 aiov.iov_base = uap->buf;
184 aiov.iov_len = uap->nbyte;
185 auio.uio_iov = &aiov;
187 auio.uio_resid = uap->nbyte;
188 auio.uio_segflg = UIO_USERSPACE;
189 error = kern_preadv(td, uap->fd, &auio, uap->offset);
194 freebsd6_pread(td, uap)
196 struct freebsd6_pread_args *uap;
198 struct pread_args oargs;
201 oargs.buf = uap->buf;
202 oargs.nbyte = uap->nbyte;
203 oargs.offset = uap->offset;
204 return (sys_pread(td, &oargs));
208 * Scatter read system call.
210 #ifndef _SYS_SYSPROTO_H_
218 sys_readv(struct thread *td, struct readv_args *uap)
223 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
226 error = kern_readv(td, uap->fd, auio);
232 kern_readv(struct thread *td, int fd, struct uio *auio)
237 error = fget_read(td, fd, CAP_READ | CAP_SEEK, &fp);
240 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
246 * Scatter positioned read system call.
248 #ifndef _SYS_SYSPROTO_H_
257 sys_preadv(struct thread *td, struct preadv_args *uap)
262 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
265 error = kern_preadv(td, uap->fd, auio, uap->offset);
271 kern_preadv(td, fd, auio, offset)
280 error = fget_read(td, fd, CAP_READ, &fp);
283 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
285 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
288 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
294 * Common code for readv and preadv that reads data in
295 * from a file using the passed in uio, offset, and flags.
298 dofileread(td, fd, fp, auio, offset, flags)
309 struct uio *ktruio = NULL;
312 /* Finish zero length reads right here */
313 if (auio->uio_resid == 0) {
314 td->td_retval[0] = 0;
317 auio->uio_rw = UIO_READ;
318 auio->uio_offset = offset;
321 if (KTRPOINT(td, KTR_GENIO))
322 ktruio = cloneuio(auio);
324 cnt = auio->uio_resid;
325 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
326 if (auio->uio_resid != cnt && (error == ERESTART ||
327 error == EINTR || error == EWOULDBLOCK))
330 cnt -= auio->uio_resid;
332 if (ktruio != NULL) {
333 ktruio->uio_resid = cnt;
334 ktrgenio(fd, UIO_READ, ktruio, error);
337 td->td_retval[0] = cnt;
341 #ifndef _SYS_SYSPROTO_H_
351 struct write_args *uap;
357 if (uap->nbyte > INT_MAX)
359 aiov.iov_base = (void *)(uintptr_t)uap->buf;
360 aiov.iov_len = uap->nbyte;
361 auio.uio_iov = &aiov;
363 auio.uio_resid = uap->nbyte;
364 auio.uio_segflg = UIO_USERSPACE;
365 error = kern_writev(td, uap->fd, &auio);
370 * Positioned write system call.
372 #ifndef _SYS_SYSPROTO_H_
384 struct pwrite_args *uap;
390 if (uap->nbyte > INT_MAX)
392 aiov.iov_base = (void *)(uintptr_t)uap->buf;
393 aiov.iov_len = uap->nbyte;
394 auio.uio_iov = &aiov;
396 auio.uio_resid = uap->nbyte;
397 auio.uio_segflg = UIO_USERSPACE;
398 error = kern_pwritev(td, uap->fd, &auio, uap->offset);
403 freebsd6_pwrite(td, uap)
405 struct freebsd6_pwrite_args *uap;
407 struct pwrite_args oargs;
410 oargs.buf = uap->buf;
411 oargs.nbyte = uap->nbyte;
412 oargs.offset = uap->offset;
413 return (sys_pwrite(td, &oargs));
417 * Gather write system call.
419 #ifndef _SYS_SYSPROTO_H_
427 sys_writev(struct thread *td, struct writev_args *uap)
432 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
435 error = kern_writev(td, uap->fd, auio);
441 kern_writev(struct thread *td, int fd, struct uio *auio)
446 error = fget_write(td, fd, CAP_WRITE | CAP_SEEK, &fp);
449 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
455 * Gather positioned write system call.
457 #ifndef _SYS_SYSPROTO_H_
458 struct pwritev_args {
466 sys_pwritev(struct thread *td, struct pwritev_args *uap)
471 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
474 error = kern_pwritev(td, uap->fd, auio, uap->offset);
480 kern_pwritev(td, fd, auio, offset)
489 error = fget_write(td, fd, CAP_WRITE, &fp);
492 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
494 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
497 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
503 * Common code for writev and pwritev that writes data to
504 * a file using the passed in uio, offset, and flags.
507 dofilewrite(td, fd, fp, auio, offset, flags)
518 struct uio *ktruio = NULL;
521 auio->uio_rw = UIO_WRITE;
523 auio->uio_offset = offset;
525 if (KTRPOINT(td, KTR_GENIO))
526 ktruio = cloneuio(auio);
528 cnt = auio->uio_resid;
529 if (fp->f_type == DTYPE_VNODE)
531 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
532 if (auio->uio_resid != cnt && (error == ERESTART ||
533 error == EINTR || error == EWOULDBLOCK))
535 /* Socket layer is responsible for issuing SIGPIPE. */
536 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
537 PROC_LOCK(td->td_proc);
538 tdsignal(td, SIGPIPE);
539 PROC_UNLOCK(td->td_proc);
542 cnt -= auio->uio_resid;
544 if (ktruio != NULL) {
545 ktruio->uio_resid = cnt;
546 ktrgenio(fd, UIO_WRITE, ktruio, error);
549 td->td_retval[0] = cnt;
554 * Truncate a file given a file descriptor.
556 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
557 * descriptor isn't writable.
560 kern_ftruncate(td, fd, length)
571 error = fget(td, fd, CAP_FTRUNCATE, &fp);
574 AUDIT_ARG_FILE(td->td_proc, fp);
575 if (!(fp->f_flag & FWRITE)) {
579 error = fo_truncate(fp, length, td->td_ucred, td);
584 #ifndef _SYS_SYSPROTO_H_
585 struct ftruncate_args {
592 sys_ftruncate(td, uap)
594 struct ftruncate_args *uap;
597 return (kern_ftruncate(td, uap->fd, uap->length));
600 #if defined(COMPAT_43)
601 #ifndef _SYS_SYSPROTO_H_
602 struct oftruncate_args {
610 struct oftruncate_args *uap;
613 return (kern_ftruncate(td, uap->fd, uap->length));
615 #endif /* COMPAT_43 */
617 #ifndef _SYS_SYSPROTO_H_
626 sys_ioctl(struct thread *td, struct ioctl_args *uap)
633 if (uap->com > 0xffffffff) {
635 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
636 td->td_proc->p_pid, td->td_name, uap->com);
637 uap->com &= 0xffffffff;
642 * Interpret high order word to find amount of data to be
643 * copied to/from the user's address space.
645 size = IOCPARM_LEN(com);
646 if ((size > IOCPARM_MAX) ||
647 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
648 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
649 ((com & IOC_OUT) && size == 0) ||
651 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
653 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
657 if (com & IOC_VOID) {
658 /* Integer argument. */
659 arg = (intptr_t)uap->data;
663 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
665 data = (void *)&uap->data;
667 error = copyin(uap->data, data, (u_int)size);
670 free(data, M_IOCTLOPS);
673 } else if (com & IOC_OUT) {
675 * Zero the buffer so the user always
676 * gets back something deterministic.
681 error = kern_ioctl(td, uap->fd, com, data);
683 if (error == 0 && (com & IOC_OUT))
684 error = copyout(data, uap->data, (u_int)size);
687 free(data, M_IOCTLOPS);
692 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
695 struct filedesc *fdp;
701 if ((error = fget(td, fd, CAP_IOCTL, &fp)) != 0)
703 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
707 fdp = td->td_proc->p_fd;
711 fdp->fd_ofileflags[fd] &= ~UF_EXCLOSE;
712 FILEDESC_XUNLOCK(fdp);
716 fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
717 FILEDESC_XUNLOCK(fdp);
720 if ((tmp = *(int *)data))
721 atomic_set_int(&fp->f_flag, FNONBLOCK);
723 atomic_clear_int(&fp->f_flag, FNONBLOCK);
727 if ((tmp = *(int *)data))
728 atomic_set_int(&fp->f_flag, FASYNC);
730 atomic_clear_int(&fp->f_flag, FASYNC);
735 error = fo_ioctl(fp, com, data, td->td_ucred, td);
742 poll_no_poll(int events)
745 * Return true for read/write. If the user asked for something
746 * special, return POLLNVAL, so that clients have a way of
747 * determining reliably whether or not the extended
748 * functionality is present without hard-coding knowledge
749 * of specific filesystem implementations.
751 if (events & ~POLLSTANDARD)
754 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
758 sys_pselect(struct thread *td, struct pselect_args *uap)
761 struct timeval tv, *tvp;
765 if (uap->ts != NULL) {
766 error = copyin(uap->ts, &ts, sizeof(ts));
769 TIMESPEC_TO_TIMEVAL(&tv, &ts);
773 if (uap->sm != NULL) {
774 error = copyin(uap->sm, &set, sizeof(set));
780 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
785 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
786 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
791 error = kern_sigprocmask(td, SIG_SETMASK, uset,
792 &td->td_oldsigmask, 0);
795 td->td_pflags |= TDP_OLDMASK;
797 * Make sure that ast() is called on return to
798 * usermode and TDP_OLDMASK is cleared, restoring old
802 td->td_flags |= TDF_ASTPENDING;
805 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
809 #ifndef _SYS_SYSPROTO_H_
812 fd_set *in, *ou, *ex;
817 sys_select(struct thread *td, struct select_args *uap)
819 struct timeval tv, *tvp;
822 if (uap->tv != NULL) {
823 error = copyin(uap->tv, &tv, sizeof(tv));
830 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
835 * In the unlikely case when user specified n greater then the last
836 * open file descriptor, check that no bits are set after the last
837 * valid fd. We must return EBADF if any is set.
839 * There are applications that rely on the behaviour.
841 * nd is fd_lastfile + 1.
844 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
850 if (nd >= ndu || fd_in == NULL)
854 bits = 0; /* silence gcc */
855 for (i = nd; i < ndu; i++) {
857 #if BYTE_ORDER == LITTLE_ENDIAN
858 addr = (char *)fd_in + b;
860 addr = (char *)fd_in;
861 if (abi_nfdbits == NFDBITS) {
862 addr += rounddown(b, sizeof(fd_mask)) +
863 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
865 addr += rounddown(b, sizeof(uint32_t)) +
866 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
876 if ((bits & (1 << (i % NBBY))) != 0)
883 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
884 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
886 struct filedesc *fdp;
888 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
889 * infds with the new FD_SETSIZE of 1024, and more than enough for
890 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
893 fd_mask s_selbits[howmany(2048, NFDBITS)];
894 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
895 struct timeval atv, rtv, ttv;
896 int error, lf, ndu, timo;
897 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
901 fdp = td->td_proc->p_fd;
903 lf = fdp->fd_lastfile;
907 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
910 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
913 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
918 * Allocate just enough bits for the non-null fd_sets. Use the
919 * preallocated auto buffer if possible.
921 nfdbits = roundup(nd, NFDBITS);
922 ncpbytes = nfdbits / NBBY;
923 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
926 nbufbytes += 2 * ncpbytes;
928 nbufbytes += 2 * ncpbytes;
930 nbufbytes += 2 * ncpbytes;
931 if (nbufbytes <= sizeof s_selbits)
932 selbits = &s_selbits[0];
934 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
937 * Assign pointers into the bit buffers and fetch the input bits.
938 * Put the output buffers together so that they can be bzeroed
942 #define getbits(name, x) \
944 if (name == NULL) { \
948 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
950 sbp += ncpbytes / sizeof *sbp; \
951 error = copyin(name, ibits[x], ncpubytes); \
954 bzero((char *)ibits[x] + ncpubytes, \
955 ncpbytes - ncpubytes); \
963 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
965 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
966 * we are running under 32-bit emulation. This should be more
969 #define swizzle_fdset(bits) \
970 if (abi_nfdbits != NFDBITS && bits != NULL) { \
972 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
973 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
976 #define swizzle_fdset(bits)
979 /* Make sure the bit order makes it through an ABI transition */
980 swizzle_fdset(ibits[0]);
981 swizzle_fdset(ibits[1]);
982 swizzle_fdset(ibits[2]);
985 bzero(selbits, nbufbytes / 2);
989 if (itimerfix(&atv)) {
993 getmicrouptime(&rtv);
994 timevaladd(&atv, &rtv);
1001 /* Iterate until the timeout expires or descriptors become ready. */
1003 error = selscan(td, ibits, obits, nd);
1004 if (error || td->td_retval[0] != 0)
1006 if (atv.tv_sec || atv.tv_usec) {
1007 getmicrouptime(&rtv);
1008 if (timevalcmp(&rtv, &atv, >=))
1011 timevalsub(&ttv, &rtv);
1012 timo = ttv.tv_sec > 24 * 60 * 60 ?
1013 24 * 60 * 60 * hz : tvtohz(&ttv);
1015 error = seltdwait(td, timo);
1018 error = selrescan(td, ibits, obits);
1019 if (error || td->td_retval[0] != 0)
1025 /* select is not restarted after signals... */
1026 if (error == ERESTART)
1028 if (error == EWOULDBLOCK)
1031 /* swizzle bit order back, if necessary */
1032 swizzle_fdset(obits[0]);
1033 swizzle_fdset(obits[1]);
1034 swizzle_fdset(obits[2]);
1035 #undef swizzle_fdset
1037 #define putbits(name, x) \
1038 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1048 if (selbits != &s_selbits[0])
1049 free(selbits, M_SELECT);
1054 * Convert a select bit set to poll flags.
1056 * The backend always returns POLLHUP/POLLERR if appropriate and we
1057 * return this as a set bit in any set.
1059 static int select_flags[3] = {
1060 POLLRDNORM | POLLHUP | POLLERR,
1061 POLLWRNORM | POLLHUP | POLLERR,
1062 POLLRDBAND | POLLERR
1066 * Compute the fo_poll flags required for a fd given by the index and
1067 * bit position in the fd_mask array.
1070 selflags(fd_mask **ibits, int idx, fd_mask bit)
1076 for (msk = 0; msk < 3; msk++) {
1077 if (ibits[msk] == NULL)
1079 if ((ibits[msk][idx] & bit) == 0)
1081 flags |= select_flags[msk];
1087 * Set the appropriate output bits given a mask of fired events and the
1088 * input bits originally requested.
1091 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1097 for (msk = 0; msk < 3; msk++) {
1098 if ((events & select_flags[msk]) == 0)
1100 if (ibits[msk] == NULL)
1102 if ((ibits[msk][idx] & bit) == 0)
1105 * XXX Check for a duplicate set. This can occur because a
1106 * socket calls selrecord() twice for each poll() call
1107 * resulting in two selfds per real fd. selrescan() will
1108 * call selsetbits twice as a result.
1110 if ((obits[msk][idx] & bit) != 0)
1112 obits[msk][idx] |= bit;
1120 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1124 struct file *fp_fromcap;
1128 if ((fp = fget_unlocked(fdp, fd)) == NULL)
1132 * If the file descriptor is for a capability, test rights and use
1133 * the file descriptor references by the capability.
1135 error = cap_funwrap(fp, CAP_POLL_EVENT, &fp_fromcap);
1137 fdrop(fp, curthread);
1140 if (fp != fp_fromcap) {
1142 fdrop(fp, curthread);
1145 #endif /* CAPABILITIES */
1151 * Traverse the list of fds attached to this thread's seltd and check for
1155 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1157 struct filedesc *fdp;
1167 fdp = td->td_proc->p_fd;
1170 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1171 fd = (int)(uintptr_t)sfp->sf_cookie;
1173 selfdfree(stp, sfp);
1174 /* If the selinfo wasn't cleared the event didn't fire. */
1177 error = getselfd_cap(fdp, fd, &fp);
1181 bit = (fd_mask)1 << (fd % NFDBITS);
1182 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1185 n += selsetbits(ibits, obits, idx, bit, ev);
1188 td->td_retval[0] = n;
1193 * Perform the initial filedescriptor scan and register ourselves with
1197 selscan(td, ibits, obits, nfd)
1199 fd_mask **ibits, **obits;
1202 struct filedesc *fdp;
1205 int ev, flags, end, fd;
1209 fdp = td->td_proc->p_fd;
1211 for (idx = 0, fd = 0; fd < nfd; idx++) {
1212 end = imin(fd + NFDBITS, nfd);
1213 for (bit = 1; fd < end; bit <<= 1, fd++) {
1214 /* Compute the list of events we're interested in. */
1215 flags = selflags(ibits, idx, bit);
1218 error = getselfd_cap(fdp, fd, &fp);
1221 selfdalloc(td, (void *)(uintptr_t)fd);
1222 ev = fo_poll(fp, flags, td->td_ucred, td);
1225 n += selsetbits(ibits, obits, idx, bit, ev);
1229 td->td_retval[0] = n;
1233 #ifndef _SYS_SYSPROTO_H_
1243 struct poll_args *uap;
1245 struct pollfd *bits;
1246 struct pollfd smallbits[32];
1247 struct timeval atv, rtv, ttv;
1248 int error = 0, timo;
1253 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1255 ni = nfds * sizeof(struct pollfd);
1256 if (ni > sizeof(smallbits))
1257 bits = malloc(ni, M_TEMP, M_WAITOK);
1260 error = copyin(uap->fds, bits, ni);
1263 if (uap->timeout != INFTIM) {
1264 atv.tv_sec = uap->timeout / 1000;
1265 atv.tv_usec = (uap->timeout % 1000) * 1000;
1266 if (itimerfix(&atv)) {
1270 getmicrouptime(&rtv);
1271 timevaladd(&atv, &rtv);
1278 /* Iterate until the timeout expires or descriptors become ready. */
1280 error = pollscan(td, bits, nfds);
1281 if (error || td->td_retval[0] != 0)
1283 if (atv.tv_sec || atv.tv_usec) {
1284 getmicrouptime(&rtv);
1285 if (timevalcmp(&rtv, &atv, >=))
1288 timevalsub(&ttv, &rtv);
1289 timo = ttv.tv_sec > 24 * 60 * 60 ?
1290 24 * 60 * 60 * hz : tvtohz(&ttv);
1292 error = seltdwait(td, timo);
1295 error = pollrescan(td);
1296 if (error || td->td_retval[0] != 0)
1302 /* poll is not restarted after signals... */
1303 if (error == ERESTART)
1305 if (error == EWOULDBLOCK)
1308 error = pollout(td, bits, uap->fds, nfds);
1313 if (ni > sizeof(smallbits))
1319 pollrescan(struct thread *td)
1325 struct filedesc *fdp;
1331 fdp = td->td_proc->p_fd;
1333 FILEDESC_SLOCK(fdp);
1334 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1335 fd = (struct pollfd *)sfp->sf_cookie;
1337 selfdfree(stp, sfp);
1338 /* If the selinfo wasn't cleared the event didn't fire. */
1341 fp = fdp->fd_ofiles[fd->fd];
1344 || (cap_funwrap(fp, CAP_POLL_EVENT, &fp) != 0)) {
1348 fd->revents = POLLNVAL;
1354 * Note: backend also returns POLLHUP and
1355 * POLLERR if appropriate.
1357 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1358 if (fd->revents != 0)
1361 FILEDESC_SUNLOCK(fdp);
1363 td->td_retval[0] = n;
1369 pollout(td, fds, ufds, nfd)
1372 struct pollfd *ufds;
1379 for (i = 0; i < nfd; i++) {
1380 error = copyout(&fds->revents, &ufds->revents,
1381 sizeof(ufds->revents));
1384 if (fds->revents != 0)
1389 td->td_retval[0] = n;
1394 pollscan(td, fds, nfd)
1399 struct filedesc *fdp = td->td_proc->p_fd;
1404 FILEDESC_SLOCK(fdp);
1405 for (i = 0; i < nfd; i++, fds++) {
1406 if (fds->fd >= fdp->fd_nfiles) {
1407 fds->revents = POLLNVAL;
1409 } else if (fds->fd < 0) {
1412 fp = fdp->fd_ofiles[fds->fd];
1415 || (cap_funwrap(fp, CAP_POLL_EVENT, &fp) != 0)) {
1419 fds->revents = POLLNVAL;
1423 * Note: backend also returns POLLHUP and
1424 * POLLERR if appropriate.
1426 selfdalloc(td, fds);
1427 fds->revents = fo_poll(fp, fds->events,
1430 * POSIX requires POLLOUT to be never
1431 * set simultaneously with POLLHUP.
1433 if ((fds->revents & POLLHUP) != 0)
1434 fds->revents &= ~POLLOUT;
1436 if (fds->revents != 0)
1441 FILEDESC_SUNLOCK(fdp);
1442 td->td_retval[0] = n;
1447 * OpenBSD poll system call.
1449 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1451 #ifndef _SYS_SYSPROTO_H_
1452 struct openbsd_poll_args {
1459 sys_openbsd_poll(td, uap)
1460 register struct thread *td;
1461 register struct openbsd_poll_args *uap;
1463 return (sys_poll(td, (struct poll_args *)uap));
1467 * XXX This was created specifically to support netncp and netsmb. This
1468 * allows the caller to specify a socket to wait for events on. It returns
1469 * 0 if any events matched and an error otherwise. There is no way to
1470 * determine which events fired.
1473 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1475 struct timeval atv, rtv, ttv;
1480 if (itimerfix(&atv))
1482 getmicrouptime(&rtv);
1483 timevaladd(&atv, &rtv);
1492 * Iterate until the timeout expires or the socket becomes ready.
1495 selfdalloc(td, NULL);
1496 error = sopoll(so, events, NULL, td);
1497 /* error here is actually the ready events. */
1500 if (atv.tv_sec || atv.tv_usec) {
1501 getmicrouptime(&rtv);
1502 if (timevalcmp(&rtv, &atv, >=)) {
1504 return (EWOULDBLOCK);
1507 timevalsub(&ttv, &rtv);
1508 timo = ttv.tv_sec > 24 * 60 * 60 ?
1509 24 * 60 * 60 * hz : tvtohz(&ttv);
1511 error = seltdwait(td, timo);
1516 /* XXX Duplicates ncp/smb behavior. */
1517 if (error == ERESTART)
1523 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1524 * have two select sets, one for read and another for write.
1527 selfdalloc(struct thread *td, void *cookie)
1532 if (stp->st_free1 == NULL)
1533 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1534 stp->st_free1->sf_td = stp;
1535 stp->st_free1->sf_cookie = cookie;
1536 if (stp->st_free2 == NULL)
1537 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1538 stp->st_free2->sf_td = stp;
1539 stp->st_free2->sf_cookie = cookie;
1543 selfdfree(struct seltd *stp, struct selfd *sfp)
1545 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1546 mtx_lock(sfp->sf_mtx);
1548 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1549 mtx_unlock(sfp->sf_mtx);
1550 uma_zfree(selfd_zone, sfp);
1553 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1556 struct selinfo *sip;
1560 * This feature is already provided by doselwakeup(), thus it is
1561 * enough to go for it.
1562 * Eventually, the context, should take care to avoid races
1563 * between thread calling select()/poll() and file descriptor
1564 * detaching, but, again, the races are just the same as
1567 doselwakeup(sip, -1);
1571 * Record a select request.
1574 selrecord(selector, sip)
1575 struct thread *selector;
1576 struct selinfo *sip;
1582 stp = selector->td_sel;
1584 * Don't record when doing a rescan.
1586 if (stp->st_flags & SELTD_RESCAN)
1589 * Grab one of the preallocated descriptors.
1592 if ((sfp = stp->st_free1) != NULL)
1593 stp->st_free1 = NULL;
1594 else if ((sfp = stp->st_free2) != NULL)
1595 stp->st_free2 = NULL;
1597 panic("selrecord: No free selfd on selq");
1600 mtxp = mtx_pool_find(mtxpool_select, sip);
1602 * Initialize the sfp and queue it in the thread.
1606 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1608 * Now that we've locked the sip, check for initialization.
1611 if (sip->si_mtx == NULL) {
1613 TAILQ_INIT(&sip->si_tdlist);
1616 * Add this thread to the list of selfds listening on this selinfo.
1618 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1619 mtx_unlock(sip->si_mtx);
1622 /* Wake up a selecting thread. */
1625 struct selinfo *sip;
1627 doselwakeup(sip, -1);
1630 /* Wake up a selecting thread, and set its priority. */
1632 selwakeuppri(sip, pri)
1633 struct selinfo *sip;
1636 doselwakeup(sip, pri);
1640 * Do a wakeup when a selectable event occurs.
1643 doselwakeup(sip, pri)
1644 struct selinfo *sip;
1651 /* If it's not initialized there can't be any waiters. */
1652 if (sip->si_mtx == NULL)
1655 * Locking the selinfo locks all selfds associated with it.
1657 mtx_lock(sip->si_mtx);
1658 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1660 * Once we remove this sfp from the list and clear the
1661 * sf_si seltdclear will know to ignore this si.
1663 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1666 mtx_lock(&stp->st_mtx);
1667 stp->st_flags |= SELTD_PENDING;
1668 cv_broadcastpri(&stp->st_wait, pri);
1669 mtx_unlock(&stp->st_mtx);
1671 mtx_unlock(sip->si_mtx);
1675 seltdinit(struct thread *td)
1679 if ((stp = td->td_sel) != NULL)
1681 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1682 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1683 cv_init(&stp->st_wait, "select");
1686 STAILQ_INIT(&stp->st_selq);
1690 seltdwait(struct thread *td, int timo)
1697 * An event of interest may occur while we do not hold the seltd
1698 * locked so check the pending flag before we sleep.
1700 mtx_lock(&stp->st_mtx);
1702 * Any further calls to selrecord will be a rescan.
1704 stp->st_flags |= SELTD_RESCAN;
1705 if (stp->st_flags & SELTD_PENDING) {
1706 mtx_unlock(&stp->st_mtx);
1710 error = cv_timedwait_sig(&stp->st_wait, &stp->st_mtx, timo);
1712 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1713 mtx_unlock(&stp->st_mtx);
1719 seltdfini(struct thread *td)
1727 uma_zfree(selfd_zone, stp->st_free1);
1729 uma_zfree(selfd_zone, stp->st_free2);
1731 free(stp, M_SELECT);
1735 * Remove the references to the thread from all of the objects we were
1739 seltdclear(struct thread *td)
1746 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1747 selfdfree(stp, sfp);
1751 static void selectinit(void *);
1752 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1754 selectinit(void *dummy __unused)
1757 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1758 NULL, NULL, UMA_ALIGN_PTR, 0);
1759 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);