2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Micro event library for FreeBSD, designed for a single i/o thread
31 * using kqueue, and having events be persistent by default.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
46 #include <sys/types.h>
47 #ifndef WITHOUT_CAPSICUM
48 #include <sys/capsicum.h>
50 #include <sys/event.h>
54 #include <pthread_np.h>
63 #define MEV_DEL_PENDING 4
67 static pthread_t mevent_tid;
68 static int mevent_timid = 43;
69 static int mevent_pipefd[2];
70 static pthread_mutex_t mevent_lmutex = PTHREAD_MUTEX_INITIALIZER;
73 void (*me_func)(int, enum ev_type, void *);
74 #define me_msecs me_fd
82 LIST_ENTRY(mevent) me_list;
85 static LIST_HEAD(listhead, mevent) global_head, change_head;
90 pthread_mutex_lock(&mevent_lmutex);
96 pthread_mutex_unlock(&mevent_lmutex);
100 mevent_pipe_read(int fd, enum ev_type type, void *param)
102 char buf[MEVENT_MAX];
106 * Drain the pipe read side. The fd is non-blocking so this is
110 status = read(fd, buf, sizeof(buf));
111 } while (status == MEVENT_MAX);
120 * If calling from outside the i/o thread, write a byte on the
121 * pipe to force the i/o thread to exit the blocking kevent call.
123 if (mevent_pipefd[1] != 0 && pthread_self() != mevent_tid) {
124 write(mevent_pipefd[1], &c, 1);
129 mevent_kq_filter(struct mevent *mevp)
135 if (mevp->me_type == EVF_READ)
136 retval = EVFILT_READ;
138 if (mevp->me_type == EVF_WRITE)
139 retval = EVFILT_WRITE;
141 if (mevp->me_type == EVF_TIMER)
142 retval = EVFILT_TIMER;
144 if (mevp->me_type == EVF_SIGNAL)
145 retval = EVFILT_SIGNAL;
151 mevent_kq_flags(struct mevent *mevp)
155 switch (mevp->me_state) {
157 ret = EV_ADD; /* implicitly enabled */
165 case MEV_DEL_PENDING:
177 mevent_kq_fflags(struct mevent *mevp)
179 /* XXX nothing yet, perhaps EV_EOF for reads ? */
184 mevent_build(int mfd, struct kevent *kev)
186 struct mevent *mevp, *tmpp;
193 LIST_FOREACH_SAFE(mevp, &change_head, me_list, tmpp) {
194 if (mevp->me_closefd) {
196 * A close of the file descriptor will remove the
201 if (mevp->me_type == EVF_TIMER) {
202 kev[i].ident = mevp->me_timid;
203 kev[i].data = mevp->me_msecs;
205 kev[i].ident = mevp->me_fd;
208 kev[i].filter = mevent_kq_filter(mevp);
209 kev[i].flags = mevent_kq_flags(mevp);
210 kev[i].fflags = mevent_kq_fflags(mevp);
216 LIST_REMOVE(mevp, me_list);
218 if (mevp->me_state == MEV_DEL_PENDING) {
221 LIST_INSERT_HEAD(&global_head, mevp, me_list);
224 assert(i < MEVENT_MAX);
233 mevent_handle(struct kevent *kev, int numev)
238 for (i = 0; i < numev; i++) {
241 /* XXX check for EV_ERROR ? */
243 (*mevp->me_func)(mevp->me_fd, mevp->me_type, mevp->me_param);
248 mevent_add(int tfd, enum ev_type type,
249 void (*func)(int, enum ev_type, void *), void *param)
251 struct mevent *lp, *mevp;
253 if (tfd < 0 || func == NULL) {
262 * Verify that the fd/type tuple is not present in any list
264 LIST_FOREACH(lp, &global_head, me_list) {
265 if (type != EVF_TIMER && lp->me_fd == tfd &&
266 lp->me_type == type) {
271 LIST_FOREACH(lp, &change_head, me_list) {
272 if (type != EVF_TIMER && lp->me_fd == tfd &&
273 lp->me_type == type) {
279 * Allocate an entry, populate it, and add it to the change list.
281 mevp = calloc(1, sizeof(struct mevent));
286 if (type == EVF_TIMER) {
287 mevp->me_msecs = tfd;
288 mevp->me_timid = mevent_timid++;
291 mevp->me_type = type;
292 mevp->me_func = func;
293 mevp->me_param = param;
295 LIST_INSERT_HEAD(&change_head, mevp, me_list);
297 mevp->me_state = MEV_ADD;
307 mevent_update(struct mevent *evp, int newstate)
310 * It's not possible to enable/disable a deleted event
312 if (evp->me_state == MEV_DEL_PENDING)
316 * No update needed if state isn't changing
318 if (evp->me_state == newstate)
323 evp->me_state = newstate;
326 * Place the entry onto the changed list if not already there.
328 if (evp->me_cq == 0) {
330 LIST_REMOVE(evp, me_list);
331 LIST_INSERT_HEAD(&change_head, evp, me_list);
341 mevent_enable(struct mevent *evp)
344 return (mevent_update(evp, MEV_ENABLE));
348 mevent_disable(struct mevent *evp)
351 return (mevent_update(evp, MEV_DISABLE));
355 mevent_delete_event(struct mevent *evp, int closefd)
360 * Place the entry onto the changed list if not already there, and
361 * mark as to be deleted.
363 if (evp->me_cq == 0) {
365 LIST_REMOVE(evp, me_list);
366 LIST_INSERT_HEAD(&change_head, evp, me_list);
369 evp->me_state = MEV_DEL_PENDING;
380 mevent_delete(struct mevent *evp)
383 return (mevent_delete_event(evp, 0));
387 mevent_delete_close(struct mevent *evp)
390 return (mevent_delete_event(evp, 1));
394 mevent_set_name(void)
397 pthread_set_name_np(mevent_tid, "mevent");
401 mevent_dispatch(void)
403 struct kevent changelist[MEVENT_MAX];
404 struct kevent eventlist[MEVENT_MAX];
405 struct mevent *pipev;
409 #ifndef WITHOUT_CAPSICUM
413 mevent_tid = pthread_self();
419 #ifndef WITHOUT_CAPSICUM
420 cap_rights_init(&rights, CAP_KQUEUE);
421 if (cap_rights_limit(mfd, &rights) == -1 && errno != ENOSYS)
422 errx(EX_OSERR, "Unable to apply rights for sandbox");
426 * Open the pipe that will be used for other threads to force
427 * the blocking kqueue call to exit by writing to it. Set the
428 * descriptor to non-blocking.
430 ret = pipe(mevent_pipefd);
436 #ifndef WITHOUT_CAPSICUM
437 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
438 if (cap_rights_limit(mevent_pipefd[0], &rights) == -1 && errno != ENOSYS)
439 errx(EX_OSERR, "Unable to apply rights for sandbox");
440 if (cap_rights_limit(mevent_pipefd[1], &rights) == -1 && errno != ENOSYS)
441 errx(EX_OSERR, "Unable to apply rights for sandbox");
445 * Add internal event handler for the pipe write fd
447 pipev = mevent_add(mevent_pipefd[0], EVF_READ, mevent_pipe_read, NULL);
448 assert(pipev != NULL);
452 * Build changelist if required.
453 * XXX the changelist can be put into the blocking call
454 * to eliminate the extra syscall. Currently better for
457 numev = mevent_build(mfd, changelist);
459 ret = kevent(mfd, changelist, numev, NULL, 0, NULL);
461 perror("Error return from kevent change");
466 * Block awaiting events
468 ret = kevent(mfd, NULL, 0, eventlist, MEVENT_MAX, NULL);
469 if (ret == -1 && errno != EINTR) {
470 perror("Error return from kevent monitor");
474 * Handle reported events
476 mevent_handle(eventlist, ret);