2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/mutex.h>
37 #include <sys/malloc.h>
38 #include <sys/unistd.h>
40 #include <sys/filedesc.h>
41 #include <sys/filio.h>
42 #include <sys/fcntl.h>
43 #include <sys/kthread.h>
44 #include <sys/selinfo.h>
45 #include <sys/queue.h>
46 #include <sys/event.h>
47 #include <sys/eventvar.h>
49 #include <sys/protosw.h>
50 #include <sys/sigio.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysproto.h>
57 #include <sys/taskqueue.h>
62 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
65 * This lock is used if multiple kq locks are required. This possibly
66 * should be made into a per proc lock.
68 static struct mtx kq_global;
69 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
70 #define KQ_GLOBAL_LOCK(lck, haslck) do { \
75 #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \
81 TASKQUEUE_DEFINE_THREAD(kqueue);
83 static int kqueue_aquire(struct file *fp, struct kqueue **kqp);
84 static void kqueue_release(struct kqueue *kq, int locked);
85 static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
86 uintptr_t ident, int waitok);
87 static void kqueue_task(void *arg, int pending);
88 static int kqueue_scan(struct kqueue *kq, int maxevents,
89 struct kevent *ulistp, const struct timespec *timeout,
90 struct kevent *keva, struct thread *td);
91 static void kqueue_wakeup(struct kqueue *kq);
92 static struct filterops *kqueue_fo_find(int filt);
93 static void kqueue_fo_release(int filt);
95 static fo_rdwr_t kqueue_read;
96 static fo_rdwr_t kqueue_write;
97 static fo_ioctl_t kqueue_ioctl;
98 static fo_poll_t kqueue_poll;
99 static fo_kqfilter_t kqueue_kqfilter;
100 static fo_stat_t kqueue_stat;
101 static fo_close_t kqueue_close;
103 static struct fileops kqueueops = {
104 .fo_read = kqueue_read,
105 .fo_write = kqueue_write,
106 .fo_ioctl = kqueue_ioctl,
107 .fo_poll = kqueue_poll,
108 .fo_kqfilter = kqueue_kqfilter,
109 .fo_stat = kqueue_stat,
110 .fo_close = kqueue_close,
113 static int knote_attach(struct knote *kn, struct kqueue *kq);
114 static void knote_drop(struct knote *kn, struct thread *td);
115 static void knote_enqueue(struct knote *kn);
116 static void knote_dequeue(struct knote *kn);
117 static void knote_init(void);
118 static struct knote *knote_alloc(int waitok);
119 static void knote_free(struct knote *kn);
121 static void filt_kqdetach(struct knote *kn);
122 static int filt_kqueue(struct knote *kn, long hint);
123 static int filt_procattach(struct knote *kn);
124 static void filt_procdetach(struct knote *kn);
125 static int filt_proc(struct knote *kn, long hint);
126 static int filt_fileattach(struct knote *kn);
127 static void filt_timerexpire(void *knx);
128 static int filt_timerattach(struct knote *kn);
129 static void filt_timerdetach(struct knote *kn);
130 static int filt_timer(struct knote *kn, long hint);
132 static struct filterops file_filtops =
133 { 1, filt_fileattach, NULL, NULL };
134 static struct filterops kqread_filtops =
135 { 1, NULL, filt_kqdetach, filt_kqueue };
136 /* XXX - move to kern_proc.c? */
137 static struct filterops proc_filtops =
138 { 0, filt_procattach, filt_procdetach, filt_proc };
139 static struct filterops timer_filtops =
140 { 0, filt_timerattach, filt_timerdetach, filt_timer };
142 static uma_zone_t knote_zone;
143 static int kq_ncallouts = 0;
144 static int kq_calloutmax = (4 * 1024);
145 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
146 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
148 /* XXX - ensure not KN_INFLUX?? */
149 #define KNOTE_ACTIVATE(kn, islock) do { \
151 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \
153 KQ_LOCK((kn)->kn_kq); \
154 (kn)->kn_status |= KN_ACTIVE; \
155 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
156 knote_enqueue((kn)); \
158 KQ_UNLOCK((kn)->kn_kq); \
160 #define KQ_LOCK(kq) do { \
161 mtx_lock(&(kq)->kq_lock); \
163 #define KQ_FLUX_WAKEUP(kq) do { \
164 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
165 (kq)->kq_state &= ~KQ_FLUXWAIT; \
169 #define KQ_UNLOCK_FLUX(kq) do { \
170 KQ_FLUX_WAKEUP(kq); \
171 mtx_unlock(&(kq)->kq_lock); \
173 #define KQ_UNLOCK(kq) do { \
174 mtx_unlock(&(kq)->kq_lock); \
176 #define KQ_OWNED(kq) do { \
177 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
179 #define KQ_NOTOWNED(kq) do { \
180 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
182 #define KN_LIST_LOCK(kn) do { \
183 if (kn->kn_knlist != NULL) \
184 mtx_lock(kn->kn_knlist->kl_lock); \
186 #define KN_LIST_UNLOCK(kn) do { \
187 if (kn->kn_knlist != NULL) \
188 mtx_unlock(kn->kn_knlist->kl_lock); \
191 #define KN_HASHSIZE 64 /* XXX should be tunable */
192 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
195 filt_nullattach(struct knote *kn)
201 struct filterops null_filtops =
202 { 0, filt_nullattach, NULL, NULL };
204 /* XXX - make SYSINIT to add these, and move into respective modules. */
205 extern struct filterops sig_filtops;
206 extern struct filterops fs_filtops;
209 * Table for for all system-defined filters.
211 static struct mtx filterops_lock;
212 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
215 struct filterops *for_fop;
217 } sysfilt_ops[EVFILT_SYSCOUNT] = {
218 { &file_filtops }, /* EVFILT_READ */
219 { &file_filtops }, /* EVFILT_WRITE */
220 { &null_filtops }, /* EVFILT_AIO */
221 { &file_filtops }, /* EVFILT_VNODE */
222 { &proc_filtops }, /* EVFILT_PROC */
223 { &sig_filtops }, /* EVFILT_SIGNAL */
224 { &timer_filtops }, /* EVFILT_TIMER */
225 { &file_filtops }, /* EVFILT_NETDEV */
226 { &fs_filtops }, /* EVFILT_FS */
230 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
234 filt_fileattach(struct knote *kn)
237 return (fo_kqfilter(kn->kn_fp, kn));
242 kqueue_kqfilter(struct file *fp, struct knote *kn)
244 struct kqueue *kq = kn->kn_fp->f_data;
246 if (kn->kn_filter != EVFILT_READ)
249 kn->kn_status |= KN_KQUEUE;
250 kn->kn_fop = &kqread_filtops;
251 knlist_add(&kq->kq_sel.si_note, kn, 0);
257 filt_kqdetach(struct knote *kn)
259 struct kqueue *kq = kn->kn_fp->f_data;
261 knlist_remove(&kq->kq_sel.si_note, kn, 0);
266 filt_kqueue(struct knote *kn, long hint)
268 struct kqueue *kq = kn->kn_fp->f_data;
270 kn->kn_data = kq->kq_count;
271 return (kn->kn_data > 0);
274 /* XXX - move to kern_proc.c? */
276 filt_procattach(struct knote *kn)
283 p = pfind(kn->kn_id);
284 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
285 p = zpfind(kn->kn_id);
287 } else if (p != NULL && (p->p_flag & P_WEXIT)) {
293 if ((error = p_cansee(curthread, p)))
296 kn->kn_ptr.p_proc = p;
297 kn->kn_flags |= EV_CLEAR; /* automatically set */
300 * internal flag indicating registration done by kernel
302 if (kn->kn_flags & EV_FLAG1) {
303 kn->kn_data = kn->kn_sdata; /* ppid */
304 kn->kn_fflags = NOTE_CHILD;
305 kn->kn_flags &= ~EV_FLAG1;
309 knlist_add(&p->p_klist, kn, 1);
312 * Immediately activate any exit notes if the target process is a
313 * zombie. This is necessary to handle the case where the target
314 * process, e.g. a child, dies before the kevent is registered.
316 if (immediate && filt_proc(kn, NOTE_EXIT))
317 KNOTE_ACTIVATE(kn, 0);
325 * The knote may be attached to a different process, which may exit,
326 * leaving nothing for the knote to be attached to. So when the process
327 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
328 * it will be deleted when read out. However, as part of the knote deletion,
329 * this routine is called, so a check is needed to avoid actually performing
330 * a detach, because the original process does not exist any more.
332 /* XXX - move to kern_proc.c? */
334 filt_procdetach(struct knote *kn)
338 p = kn->kn_ptr.p_proc;
339 knlist_remove(&p->p_klist, kn, 0);
340 kn->kn_ptr.p_proc = NULL;
343 /* XXX - move to kern_proc.c? */
345 filt_proc(struct knote *kn, long hint)
347 struct proc *p = kn->kn_ptr.p_proc;
351 * mask off extra data
353 event = (u_int)hint & NOTE_PCTRLMASK;
356 * if the user is interested in this event, record it.
358 if (kn->kn_sfflags & event)
359 kn->kn_fflags |= event;
362 * process is gone, so flag the event as finished.
364 if (event == NOTE_EXIT) {
365 if (!(kn->kn_status & KN_DETACHED))
366 knlist_remove_inevent(&p->p_klist, kn);
367 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
368 kn->kn_ptr.p_proc = NULL;
373 * process forked, and user wants to track the new process,
374 * so attach a new knote to it, and immediately report an
375 * event with the parent's pid.
377 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
382 * register knote with new process.
384 kev.ident = hint & NOTE_PDATAMASK; /* pid */
385 kev.filter = kn->kn_filter;
386 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
387 kev.fflags = kn->kn_sfflags;
388 kev.data = kn->kn_id; /* parent */
389 kev.udata = kn->kn_kevent.udata; /* preserve udata */
390 error = kqueue_register(kn->kn_kq, &kev, NULL, 0);
392 kn->kn_fflags |= NOTE_TRACKERR;
395 return (kn->kn_fflags != 0);
399 timertoticks(intptr_t data)
404 tv.tv_sec = data / 1000;
405 tv.tv_usec = (data % 1000) * 1000;
406 tticks = tvtohz(&tv);
411 /* XXX - move to kern_timeout.c? */
413 filt_timerexpire(void *knx)
415 struct knote *kn = knx;
416 struct callout *calloutp;
419 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */
421 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
422 calloutp = (struct callout *)kn->kn_hook;
423 callout_reset(calloutp, timertoticks(kn->kn_sdata),
424 filt_timerexpire, kn);
429 * data contains amount of time to sleep, in milliseconds
431 /* XXX - move to kern_timeout.c? */
433 filt_timerattach(struct knote *kn)
435 struct callout *calloutp;
437 atomic_add_int(&kq_ncallouts, 1);
439 if (kq_ncallouts >= kq_calloutmax) {
440 atomic_add_int(&kq_ncallouts, -1);
444 kn->kn_flags |= EV_CLEAR; /* automatically set */
445 kn->kn_status &= ~KN_DETACHED; /* knlist_add usually sets it */
446 MALLOC(calloutp, struct callout *, sizeof(*calloutp),
448 callout_init(calloutp, CALLOUT_MPSAFE);
449 kn->kn_hook = calloutp;
450 callout_reset(calloutp, timertoticks(kn->kn_sdata), filt_timerexpire,
456 /* XXX - move to kern_timeout.c? */
458 filt_timerdetach(struct knote *kn)
460 struct callout *calloutp;
462 calloutp = (struct callout *)kn->kn_hook;
463 callout_drain(calloutp);
464 FREE(calloutp, M_KQUEUE);
465 atomic_add_int(&kq_ncallouts, -1);
466 kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */
469 /* XXX - move to kern_timeout.c? */
471 filt_timer(struct knote *kn, long hint)
474 return (kn->kn_data != 0);
481 kqueue(struct thread *td, struct kqueue_args *uap)
483 struct filedesc *fdp;
488 fdp = td->td_proc->p_fd;
489 error = falloc(td, &fp, &fd);
493 /* An extra reference on `nfp' has been held for us by falloc(). */
494 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
495 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
496 TAILQ_INIT(&kq->kq_head);
498 knlist_init(&kq->kq_sel.si_note, &kq->kq_lock);
499 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
501 FILEDESC_LOCK_FAST(fdp);
502 SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
503 FILEDESC_UNLOCK_FAST(fdp);
506 fp->f_flag = FREAD | FWRITE;
507 fp->f_type = DTYPE_KQUEUE;
508 fp->f_ops = &kqueueops;
513 td->td_retval[0] = fd;
518 #ifndef _SYS_SYSPROTO_H_
521 const struct kevent *changelist;
523 struct kevent *eventlist;
525 const struct timespec *timeout;
532 kevent(struct thread *td, struct kevent_args *uap)
534 struct kevent keva[KQ_NEVENTS];
539 int i, n, nerrors, error;
541 if ((error = fget(td, uap->fd, &fp)) != 0)
543 if ((error = kqueue_aquire(fp, &kq)) != 0)
546 if (uap->timeout != NULL) {
547 error = copyin(uap->timeout, &ts, sizeof(ts));
555 while (uap->nchanges > 0) {
556 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
557 error = copyin(uap->changelist, keva,
561 for (i = 0; i < n; i++) {
563 kevp->flags &= ~EV_SYSFLAGS;
564 error = kqueue_register(kq, kevp, td, 1);
566 if (uap->nevents != 0) {
567 kevp->flags = EV_ERROR;
581 uap->changelist += n;
584 td->td_retval[0] = nerrors;
589 error = kqueue_scan(kq, uap->nevents, uap->eventlist, uap->timeout,
592 kqueue_release(kq, 0);
600 kqueue_add_filteropts(int filt, struct filterops *filtops)
604 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
606 "trying to add a filterop that is out of range: %d is beyond %d\n",
607 ~filt, EVFILT_SYSCOUNT);
610 mtx_lock(&filterops_lock);
611 if (sysfilt_ops[~filt].for_fop != &null_filtops &&
612 sysfilt_ops[~filt].for_fop != NULL)
615 sysfilt_ops[~filt].for_fop = filtops;
616 sysfilt_ops[~filt].for_refcnt = 0;
618 mtx_unlock(&filterops_lock);
624 kqueue_del_filteropts(int filt)
629 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
632 mtx_lock(&filterops_lock);
633 if (sysfilt_ops[~filt].for_fop == &null_filtops ||
634 sysfilt_ops[~filt].for_fop == NULL)
636 else if (sysfilt_ops[~filt].for_refcnt != 0)
639 sysfilt_ops[~filt].for_fop = &null_filtops;
640 sysfilt_ops[~filt].for_refcnt = 0;
642 mtx_unlock(&filterops_lock);
647 static struct filterops *
648 kqueue_fo_find(int filt)
651 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
654 mtx_lock(&filterops_lock);
655 sysfilt_ops[~filt].for_refcnt++;
656 if (sysfilt_ops[~filt].for_fop == NULL)
657 sysfilt_ops[~filt].for_fop = &null_filtops;
658 mtx_unlock(&filterops_lock);
660 return sysfilt_ops[~filt].for_fop;
664 kqueue_fo_release(int filt)
667 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
670 mtx_lock(&filterops_lock);
671 KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
672 ("filter object refcount not valid on release"));
673 sysfilt_ops[~filt].for_refcnt--;
674 mtx_unlock(&filterops_lock);
678 * A ref to kq (obtained via kqueue_aquire) should be held. waitok will
679 * influence if memory allocation should wait. Make sure it is 0 if you
683 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
685 struct filedesc *fdp;
686 struct filterops *fops;
688 struct knote *kn, *tkn;
689 int error, filt, event;
700 fops = kqueue_fo_find(filt);
704 tkn = knote_alloc(waitok); /* prevent waiting with locks */
708 KASSERT(td != NULL, ("td is NULL"));
709 fdp = td->td_proc->p_fd;
711 /* validate descriptor */
713 if (fd < 0 || fd >= fdp->fd_nfiles ||
714 (fp = fdp->fd_ofiles[fd]) == NULL) {
715 FILEDESC_UNLOCK(fdp);
721 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
722 kev->ident, 0) != 0) {
723 /* unlock and try again */
724 FILEDESC_UNLOCK(fdp);
727 error = kqueue_expand(kq, fops, kev->ident, waitok);
733 if (fp->f_type == DTYPE_KQUEUE) {
735 * if we add some inteligence about what we are doing,
736 * we should be able to support events on ourselves.
737 * We need to know when we are doing this to prevent
738 * getting both the knlist lock and the kq lock since
739 * they are the same thing.
741 if (fp->f_data == kq) {
742 FILEDESC_UNLOCK(fdp);
747 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
750 FILEDESC_UNLOCK(fdp);
752 if (kev->ident < kq->kq_knlistsize) {
753 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
754 if (kev->filter == kn->kn_filter)
758 if ((kev->flags & EV_ADD) == EV_ADD)
759 kqueue_expand(kq, fops, kev->ident, waitok);
762 if (kq->kq_knhashmask != 0) {
765 list = &kq->kq_knhash[
766 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
767 SLIST_FOREACH(kn, list, kn_link)
768 if (kev->ident == kn->kn_id &&
769 kev->filter == kn->kn_filter)
774 /* knote is in the process of changing, wait for it to stablize. */
775 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
780 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
781 kq->kq_state |= KQ_FLUXWAIT;
782 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
786 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
793 * kn now contains the matching knote, or NULL if no match
795 if (kev->flags & EV_ADD) {
807 * apply reference counts to knote structure, and
808 * do not release it at the end of this routine.
813 kn->kn_sfflags = kev->fflags;
814 kn->kn_sdata = kev->data;
817 kn->kn_kevent = *kev;
818 kn->kn_status = KN_INFLUX|KN_DETACHED;
820 error = knote_attach(kn, kq);
827 if ((error = kn->kn_fop->f_attach(kn)) != 0) {
834 * The user may change some filter values after the
835 * initial EV_ADD, but doing so will not reset any
836 * filter which has already been triggered.
838 kn->kn_status |= KN_INFLUX;
841 kn->kn_sfflags = kev->fflags;
842 kn->kn_sdata = kev->data;
843 kn->kn_kevent.udata = kev->udata;
847 * We can get here with kn->kn_knlist == NULL.
848 * This can happen when the initial attach event decides that
849 * the event is "completed" already. i.e. filt_procattach
850 * is called on a zombie process. It will call filt_proc
851 * which will remove it from the list, and NULL kn_knlist.
853 event = kn->kn_fop->f_event(kn, 0);
857 KNOTE_ACTIVATE(kn, 1);
858 kn->kn_status &= ~KN_INFLUX;
859 } else if (kev->flags & EV_DELETE) {
860 kn->kn_status |= KN_INFLUX;
862 if (!(kn->kn_status & KN_DETACHED))
863 kn->kn_fop->f_detach(kn);
868 if ((kev->flags & EV_DISABLE) &&
869 ((kn->kn_status & KN_DISABLED) == 0)) {
870 kn->kn_status |= KN_DISABLED;
873 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
874 kn->kn_status &= ~KN_DISABLED;
875 if ((kn->kn_status & KN_ACTIVE) &&
876 ((kn->kn_status & KN_QUEUED) == 0))
882 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
889 kqueue_fo_release(filt);
894 kqueue_aquire(struct file *fp, struct kqueue **kqp)
904 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) {
910 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
924 kqueue_release(struct kqueue *kq, int locked)
931 if (kq->kq_refcnt == 1)
932 wakeup(&kq->kq_refcnt);
938 kqueue_schedtask(struct kqueue *kq)
942 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
943 ("scheduling kqueue task while draining"));
945 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
946 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
947 kq->kq_state |= KQ_TASKSCHED;
952 * Expand the kq to make sure we have storage for fops/ident pair.
954 * Return 0 on success (or no work necessary), return errno on failure.
956 * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
957 * If kqueue_register is called from a non-fd context, there usually/should
961 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
964 struct klist *list, *tmp_knhash;
965 u_long tmp_knhashmask;
968 int mflag = waitok ? M_WAITOK : M_NOWAIT;
974 if (kq->kq_knlistsize <= fd) {
975 size = kq->kq_knlistsize;
978 MALLOC(list, struct klist *,
979 size * sizeof list, M_KQUEUE, mflag);
983 if (kq->kq_knlistsize > fd) {
984 FREE(list, M_KQUEUE);
987 if (kq->kq_knlist != NULL) {
988 bcopy(kq->kq_knlist, list,
989 kq->kq_knlistsize * sizeof list);
990 FREE(kq->kq_knlist, M_KQUEUE);
991 kq->kq_knlist = NULL;
993 bzero((caddr_t)list +
994 kq->kq_knlistsize * sizeof list,
995 (size - kq->kq_knlistsize) * sizeof list);
996 kq->kq_knlistsize = size;
997 kq->kq_knlist = list;
1002 if (kq->kq_knhashmask == 0) {
1003 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1005 if (tmp_knhash == NULL)
1008 if (kq->kq_knhashmask == 0) {
1009 kq->kq_knhash = tmp_knhash;
1010 kq->kq_knhashmask = tmp_knhashmask;
1012 free(tmp_knhash, M_KQUEUE);
1023 kqueue_task(void *arg, int pending)
1031 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1034 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1036 kq->kq_state &= ~KQ_TASKSCHED;
1037 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1038 wakeup(&kq->kq_state);
1041 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1045 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1046 * We treat KN_MARKER knotes as if they are INFLUX.
1049 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
1050 const struct timespec *tsp, struct kevent *keva, struct thread *td)
1052 struct kevent *kevp;
1053 struct timeval atv, rtv, ttv;
1054 struct knote *kn, *marker;
1055 int count, timeout, nkev, error;
1067 TIMESPEC_TO_TIMEVAL(&atv, tsp);
1068 if (itimerfix(&atv)) {
1072 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1075 timeout = atv.tv_sec > 24 * 60 * 60 ?
1076 24 * 60 * 60 * hz : tvtohz(&atv);
1077 getmicrouptime(&rtv);
1078 timevaladd(&atv, &rtv);
1084 marker = knote_alloc(1);
1085 if (marker == NULL) {
1089 marker->kn_status = KN_MARKER;
1094 if (atv.tv_sec || atv.tv_usec) {
1095 getmicrouptime(&rtv);
1096 if (timevalcmp(&rtv, &atv, >=))
1099 timevalsub(&ttv, &rtv);
1100 timeout = ttv.tv_sec > 24 * 60 * 60 ?
1101 24 * 60 * 60 * hz : tvtohz(&ttv);
1106 if (kq->kq_count == 0) {
1108 error = EWOULDBLOCK;
1110 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1111 kq->kq_state |= KQ_SLEEP;
1112 error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
1117 /* don't restart after signals... */
1118 if (error == ERESTART)
1120 else if (error == EWOULDBLOCK)
1125 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1128 kn = TAILQ_FIRST(&kq->kq_head);
1130 if ((kn->kn_status == KN_MARKER && kn != marker) ||
1131 (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1132 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1133 kq->kq_state |= KQ_FLUXWAIT;
1134 error = msleep(kq, &kq->kq_lock, PSOCK,
1139 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1140 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1141 kn->kn_status &= ~KN_QUEUED;
1147 if (count == maxevents)
1151 KASSERT((kn->kn_status & KN_INFLUX) == 0,
1152 ("KN_INFLUX set when not suppose to be"));
1154 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1155 kn->kn_status &= ~KN_QUEUED;
1156 kn->kn_status |= KN_INFLUX;
1160 * We don't need to lock the list since we've marked
1163 *kevp = kn->kn_kevent;
1164 if (!(kn->kn_status & KN_DETACHED))
1165 kn->kn_fop->f_detach(kn);
1170 kn->kn_status |= KN_INFLUX;
1172 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1173 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1175 if (kn->kn_fop->f_event(kn, 0) == 0) {
1179 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1183 *kevp = kn->kn_kevent;
1185 if (kn->kn_flags & EV_CLEAR) {
1188 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1191 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1193 kn->kn_status &= ~(KN_INFLUX);
1196 /* we are returning a copy to the user */
1201 if (nkev == KQ_NEVENTS) {
1203 error = copyout(keva, ulistp, sizeof *keva * nkev);
1212 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1216 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1221 error = copyout(keva, ulistp, sizeof *keva * nkev);
1222 td->td_retval[0] = maxevents - count;
1228 * This could be expanded to call kqueue_scan, if desired.
1232 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1233 int flags, struct thread *td)
1240 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1241 int flags, struct thread *td)
1248 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1249 struct ucred *active_cred, struct thread *td)
1252 * Enabling sigio causes two major problems:
1253 * 1) infinite recursion:
1254 * Synopsys: kevent is being used to track signals and have FIOASYNC
1255 * set. On receipt of a signal this will cause a kqueue to recurse
1256 * into itself over and over. Sending the sigio causes the kqueue
1257 * to become ready, which in turn posts sigio again, forever.
1258 * Solution: this can be solved by setting a flag in the kqueue that
1259 * we have a SIGIO in progress.
1260 * 2) locking problems:
1261 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1262 * us above the proc and pgrp locks.
1263 * Solution: Post a signal using an async mechanism, being sure to
1264 * record a generation count in the delivery so that we do not deliver
1265 * a signal to the wrong process.
1267 * Note, these two mechanisms are somewhat mutually exclusive!
1276 kq->kq_state |= KQ_ASYNC;
1278 kq->kq_state &= ~KQ_ASYNC;
1283 return (fsetown(*(int *)data, &kq->kq_sigio));
1286 *(int *)data = fgetown(&kq->kq_sigio);
1296 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1303 if ((error = kqueue_aquire(fp, &kq)))
1307 if (events & (POLLIN | POLLRDNORM)) {
1309 revents |= events & (POLLIN | POLLRDNORM);
1311 selrecord(td, &kq->kq_sel);
1312 kq->kq_state |= KQ_SEL;
1315 kqueue_release(kq, 1);
1322 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1331 kqueue_close(struct file *fp, struct thread *td)
1333 struct kqueue *kq = fp->f_data;
1334 struct filedesc *fdp;
1339 if ((error = kqueue_aquire(fp, &kq)))
1344 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1345 ("kqueue already closing"));
1346 kq->kq_state |= KQ_CLOSING;
1347 if (kq->kq_refcnt > 1)
1348 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1350 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1353 KASSERT(knlist_empty(&kq->kq_sel.si_note),
1354 ("kqueue's knlist not empty"));
1356 for (i = 0; i < kq->kq_knlistsize; i++) {
1357 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1358 KASSERT((kn->kn_status & KN_INFLUX) == 0,
1359 ("KN_INFLUX set when not suppose to be"));
1360 kn->kn_status |= KN_INFLUX;
1362 if (!(kn->kn_status & KN_DETACHED))
1363 kn->kn_fop->f_detach(kn);
1368 if (kq->kq_knhashmask != 0) {
1369 for (i = 0; i <= kq->kq_knhashmask; i++) {
1370 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1371 KASSERT((kn->kn_status & KN_INFLUX) == 0,
1372 ("KN_INFLUX set when not suppose to be"));
1373 kn->kn_status |= KN_INFLUX;
1375 if (!(kn->kn_status & KN_DETACHED))
1376 kn->kn_fop->f_detach(kn);
1383 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1384 kq->kq_state |= KQ_TASKDRAIN;
1385 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1388 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1389 kq->kq_state &= ~KQ_SEL;
1390 selwakeuppri(&kq->kq_sel, PSOCK);
1395 FILEDESC_LOCK_FAST(fdp);
1396 SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
1397 FILEDESC_UNLOCK_FAST(fdp);
1399 knlist_destroy(&kq->kq_sel.si_note);
1400 mtx_destroy(&kq->kq_lock);
1403 if (kq->kq_knhash != NULL)
1404 free(kq->kq_knhash, M_KQUEUE);
1405 if (kq->kq_knlist != NULL)
1406 free(kq->kq_knlist, M_KQUEUE);
1408 funsetown(&kq->kq_sigio);
1416 kqueue_wakeup(struct kqueue *kq)
1420 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1421 kq->kq_state &= ~KQ_SLEEP;
1424 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1425 kq->kq_state &= ~KQ_SEL;
1426 selwakeuppri(&kq->kq_sel, PSOCK);
1428 if (!knlist_empty(&kq->kq_sel.si_note))
1429 kqueue_schedtask(kq);
1430 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1431 pgsigio(&kq->kq_sigio, SIGIO, 0);
1436 * Walk down a list of knotes, activating them if their event has triggered.
1438 * There is a possibility to optimize in the case of one kq watching another.
1439 * Instead of scheduling a task to wake it up, you could pass enough state
1440 * down the chain to make up the parent kqueue. Make this code functional
1444 knote(struct knlist *list, long hint, int islocked)
1452 mtx_assert(list->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
1454 mtx_lock(list->kl_lock);
1456 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1457 * the kqueue scheduling, but this will introduce four
1458 * lock/unlock's for each knote to test. If we do, continue to use
1459 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1460 * only safe if you want to remove the current item, which we are
1463 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1465 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1467 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1468 kn->kn_status |= KN_HASKQLOCK;
1469 if (kn->kn_fop->f_event(kn, hint))
1470 KNOTE_ACTIVATE(kn, 1);
1471 kn->kn_status &= ~KN_HASKQLOCK;
1478 mtx_unlock(list->kl_lock);
1482 * add a knote to a knlist
1485 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1487 mtx_assert(knl->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
1488 KQ_NOTOWNED(kn->kn_kq);
1489 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1490 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1492 mtx_lock(knl->kl_lock);
1493 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1495 mtx_unlock(knl->kl_lock);
1497 kn->kn_knlist = knl;
1498 kn->kn_status &= ~KN_DETACHED;
1499 KQ_UNLOCK(kn->kn_kq);
1503 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1505 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1506 mtx_assert(knl->kl_lock, knlislocked ? MA_OWNED : MA_NOTOWNED);
1507 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1509 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1510 ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1512 mtx_lock(knl->kl_lock);
1513 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1514 kn->kn_knlist = NULL;
1516 mtx_unlock(knl->kl_lock);
1519 kn->kn_status |= KN_DETACHED;
1521 KQ_UNLOCK(kn->kn_kq);
1525 * remove all knotes from a specified klist
1528 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1531 knlist_remove_kq(knl, kn, islocked, 0);
1535 * remove knote from a specified klist while in f_event handler.
1538 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1541 knlist_remove_kq(knl, kn, 1,
1542 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1546 knlist_empty(struct knlist *knl)
1549 mtx_assert(knl->kl_lock, MA_OWNED);
1550 return SLIST_EMPTY(&knl->kl_list);
1553 static struct mtx knlist_lock;
1554 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1558 knlist_init(struct knlist *knl, struct mtx *mtx)
1562 knl->kl_lock = &knlist_lock;
1566 SLIST_INIT(&knl->kl_list);
1570 knlist_destroy(struct knlist *knl)
1575 * if we run across this error, we need to find the offending
1576 * driver and have it call knlist_clear.
1578 if (!SLIST_EMPTY(&knl->kl_list))
1579 printf("WARNING: destroying knlist w/ knotes on it!\n");
1582 knl->kl_lock = NULL;
1583 SLIST_INIT(&knl->kl_list);
1587 * Even if we are locked, we may need to drop the lock to allow any influx
1588 * knotes time to "settle".
1591 knlist_clear(struct knlist *knl, int islocked)
1597 mtx_assert(knl->kl_lock, MA_OWNED);
1599 mtx_assert(knl->kl_lock, MA_NOTOWNED);
1600 again: /* need to reaquire lock since we have dropped it */
1601 mtx_lock(knl->kl_lock);
1604 SLIST_FOREACH(kn, &knl->kl_list, kn_selnext) {
1607 if ((kn->kn_status & KN_INFLUX) &&
1608 (kn->kn_status & KN_DETACHED) != KN_DETACHED) {
1612 /* Make sure cleared knotes disappear soon */
1613 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1614 knlist_remove_kq(knl, kn, 1, 1);
1619 if (!SLIST_EMPTY(&knl->kl_list)) {
1620 /* there are still KN_INFLUX remaining */
1621 kn = SLIST_FIRST(&knl->kl_list);
1624 KASSERT(kn->kn_status & KN_INFLUX,
1625 ("knote removed w/o list lock"));
1626 mtx_unlock(knl->kl_lock);
1627 kq->kq_state |= KQ_FLUXWAIT;
1628 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
1633 SLIST_INIT(&knl->kl_list);
1636 mtx_assert(knl->kl_lock, MA_OWNED);
1638 mtx_unlock(knl->kl_lock);
1639 mtx_assert(knl->kl_lock, MA_NOTOWNED);
1644 * remove all knotes referencing a specified fd
1645 * must be called with FILEDESC lock. This prevents a race where a new fd
1646 * comes along and occupies the entry and we attach a knote to the fd.
1649 knote_fdclose(struct thread *td, int fd)
1651 struct filedesc *fdp = td->td_proc->p_fd;
1656 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
1659 * We shouldn't have to worry about new kevents appearing on fd
1660 * since filedesc is locked.
1662 SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
1667 while (kq->kq_knlistsize > fd &&
1668 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
1669 if (kn->kn_status & KN_INFLUX) {
1670 /* someone else might be waiting on our knote */
1673 kq->kq_state |= KQ_FLUXWAIT;
1674 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
1677 kn->kn_status |= KN_INFLUX;
1679 if (!(kn->kn_status & KN_DETACHED))
1680 kn->kn_fop->f_detach(kn);
1690 knote_attach(struct knote *kn, struct kqueue *kq)
1694 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
1697 if (kn->kn_fop->f_isfd) {
1698 if (kn->kn_id >= kq->kq_knlistsize)
1700 list = &kq->kq_knlist[kn->kn_id];
1702 if (kq->kq_knhash == NULL)
1704 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1707 SLIST_INSERT_HEAD(list, kn, kn_link);
1713 * knote must already have been detatched using the f_detach method.
1714 * no lock need to be held, it is assumed that the KN_INFLUX flag is set
1715 * to prevent other removal.
1718 knote_drop(struct knote *kn, struct thread *td)
1726 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
1727 ("knote_drop called without KN_INFLUX set in kn_status"));
1730 if (kn->kn_fop->f_isfd)
1731 list = &kq->kq_knlist[kn->kn_id];
1733 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
1735 SLIST_REMOVE(list, kn, knote, kn_link);
1736 if (kn->kn_status & KN_QUEUED)
1740 if (kn->kn_fop->f_isfd) {
1741 fdrop(kn->kn_fp, td);
1744 kqueue_fo_release(kn->kn_kevent.filter);
1750 knote_enqueue(struct knote *kn)
1752 struct kqueue *kq = kn->kn_kq;
1754 KQ_OWNED(kn->kn_kq);
1755 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
1757 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1758 kn->kn_status |= KN_QUEUED;
1764 knote_dequeue(struct knote *kn)
1766 struct kqueue *kq = kn->kn_kq;
1768 KQ_OWNED(kn->kn_kq);
1769 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
1771 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1772 kn->kn_status &= ~KN_QUEUED;
1780 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
1781 NULL, NULL, UMA_ALIGN_PTR, 0);
1783 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
1785 static struct knote *
1786 knote_alloc(int waitok)
1788 return ((struct knote *)uma_zalloc(knote_zone,
1789 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
1793 knote_free(struct knote *kn)
1796 uma_zfree(knote_zone, kn);