2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Micro event library for FreeBSD, designed for a single i/o thread
33 * using kqueue, and having events be persistent by default.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
48 #include <sys/types.h>
49 #ifndef WITHOUT_CAPSICUM
50 #include <sys/capsicum.h>
52 #include <sys/event.h>
56 #include <pthread_np.h>
65 #define MEV_DEL_PENDING 4
69 static pthread_t mevent_tid;
70 static int mevent_timid = 43;
71 static int mevent_pipefd[2];
72 static pthread_mutex_t mevent_lmutex = PTHREAD_MUTEX_INITIALIZER;
75 void (*me_func)(int, enum ev_type, void *);
76 #define me_msecs me_fd
84 LIST_ENTRY(mevent) me_list;
87 static LIST_HEAD(listhead, mevent) global_head, change_head;
92 pthread_mutex_lock(&mevent_lmutex);
98 pthread_mutex_unlock(&mevent_lmutex);
102 mevent_pipe_read(int fd, enum ev_type type, void *param)
104 char buf[MEVENT_MAX];
108 * Drain the pipe read side. The fd is non-blocking so this is
112 status = read(fd, buf, sizeof(buf));
113 } while (status == MEVENT_MAX);
122 * If calling from outside the i/o thread, write a byte on the
123 * pipe to force the i/o thread to exit the blocking kevent call.
125 if (mevent_pipefd[1] != 0 && pthread_self() != mevent_tid) {
126 write(mevent_pipefd[1], &c, 1);
131 mevent_kq_filter(struct mevent *mevp)
137 if (mevp->me_type == EVF_READ)
138 retval = EVFILT_READ;
140 if (mevp->me_type == EVF_WRITE)
141 retval = EVFILT_WRITE;
143 if (mevp->me_type == EVF_TIMER)
144 retval = EVFILT_TIMER;
146 if (mevp->me_type == EVF_SIGNAL)
147 retval = EVFILT_SIGNAL;
153 mevent_kq_flags(struct mevent *mevp)
157 switch (mevp->me_state) {
159 ret = EV_ADD; /* implicitly enabled */
167 case MEV_DEL_PENDING:
179 mevent_kq_fflags(struct mevent *mevp)
181 /* XXX nothing yet, perhaps EV_EOF for reads ? */
186 mevent_build(int mfd, struct kevent *kev)
188 struct mevent *mevp, *tmpp;
195 LIST_FOREACH_SAFE(mevp, &change_head, me_list, tmpp) {
196 if (mevp->me_closefd) {
198 * A close of the file descriptor will remove the
203 if (mevp->me_type == EVF_TIMER) {
204 kev[i].ident = mevp->me_timid;
205 kev[i].data = mevp->me_msecs;
207 kev[i].ident = mevp->me_fd;
210 kev[i].filter = mevent_kq_filter(mevp);
211 kev[i].flags = mevent_kq_flags(mevp);
212 kev[i].fflags = mevent_kq_fflags(mevp);
218 LIST_REMOVE(mevp, me_list);
220 if (mevp->me_state == MEV_DEL_PENDING) {
223 LIST_INSERT_HEAD(&global_head, mevp, me_list);
226 assert(i < MEVENT_MAX);
235 mevent_handle(struct kevent *kev, int numev)
240 for (i = 0; i < numev; i++) {
243 /* XXX check for EV_ERROR ? */
245 (*mevp->me_func)(mevp->me_fd, mevp->me_type, mevp->me_param);
250 mevent_add(int tfd, enum ev_type type,
251 void (*func)(int, enum ev_type, void *), void *param)
253 struct mevent *lp, *mevp;
255 if (tfd < 0 || func == NULL) {
264 * Verify that the fd/type tuple is not present in any list
266 LIST_FOREACH(lp, &global_head, me_list) {
267 if (type != EVF_TIMER && lp->me_fd == tfd &&
268 lp->me_type == type) {
273 LIST_FOREACH(lp, &change_head, me_list) {
274 if (type != EVF_TIMER && lp->me_fd == tfd &&
275 lp->me_type == type) {
281 * Allocate an entry, populate it, and add it to the change list.
283 mevp = calloc(1, sizeof(struct mevent));
288 if (type == EVF_TIMER) {
289 mevp->me_msecs = tfd;
290 mevp->me_timid = mevent_timid++;
293 mevp->me_type = type;
294 mevp->me_func = func;
295 mevp->me_param = param;
297 LIST_INSERT_HEAD(&change_head, mevp, me_list);
299 mevp->me_state = MEV_ADD;
309 mevent_update(struct mevent *evp, int newstate)
312 * It's not possible to enable/disable a deleted event
314 if (evp->me_state == MEV_DEL_PENDING)
318 * No update needed if state isn't changing
320 if (evp->me_state == newstate)
325 evp->me_state = newstate;
328 * Place the entry onto the changed list if not already there.
330 if (evp->me_cq == 0) {
332 LIST_REMOVE(evp, me_list);
333 LIST_INSERT_HEAD(&change_head, evp, me_list);
343 mevent_enable(struct mevent *evp)
346 return (mevent_update(evp, MEV_ENABLE));
350 mevent_disable(struct mevent *evp)
353 return (mevent_update(evp, MEV_DISABLE));
357 mevent_delete_event(struct mevent *evp, int closefd)
362 * Place the entry onto the changed list if not already there, and
363 * mark as to be deleted.
365 if (evp->me_cq == 0) {
367 LIST_REMOVE(evp, me_list);
368 LIST_INSERT_HEAD(&change_head, evp, me_list);
371 evp->me_state = MEV_DEL_PENDING;
382 mevent_delete(struct mevent *evp)
385 return (mevent_delete_event(evp, 0));
389 mevent_delete_close(struct mevent *evp)
392 return (mevent_delete_event(evp, 1));
396 mevent_set_name(void)
399 pthread_set_name_np(mevent_tid, "mevent");
403 mevent_dispatch(void)
405 struct kevent changelist[MEVENT_MAX];
406 struct kevent eventlist[MEVENT_MAX];
407 struct mevent *pipev;
411 #ifndef WITHOUT_CAPSICUM
415 mevent_tid = pthread_self();
421 #ifndef WITHOUT_CAPSICUM
422 cap_rights_init(&rights, CAP_KQUEUE);
423 if (cap_rights_limit(mfd, &rights) == -1 && errno != ENOSYS)
424 errx(EX_OSERR, "Unable to apply rights for sandbox");
428 * Open the pipe that will be used for other threads to force
429 * the blocking kqueue call to exit by writing to it. Set the
430 * descriptor to non-blocking.
432 ret = pipe(mevent_pipefd);
438 #ifndef WITHOUT_CAPSICUM
439 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
440 if (cap_rights_limit(mevent_pipefd[0], &rights) == -1 && errno != ENOSYS)
441 errx(EX_OSERR, "Unable to apply rights for sandbox");
442 if (cap_rights_limit(mevent_pipefd[1], &rights) == -1 && errno != ENOSYS)
443 errx(EX_OSERR, "Unable to apply rights for sandbox");
447 * Add internal event handler for the pipe write fd
449 pipev = mevent_add(mevent_pipefd[0], EVF_READ, mevent_pipe_read, NULL);
450 assert(pipev != NULL);
454 * Build changelist if required.
455 * XXX the changelist can be put into the blocking call
456 * to eliminate the extra syscall. Currently better for
459 numev = mevent_build(mfd, changelist);
461 ret = kevent(mfd, changelist, numev, NULL, 0, NULL);
463 perror("Error return from kevent change");
468 * Block awaiting events
470 ret = kevent(mfd, NULL, 0, eventlist, MEVENT_MAX, NULL);
471 if (ret == -1 && errno != EINTR) {
472 perror("Error return from kevent monitor");
476 * Handle reported events
478 mevent_handle(eventlist, ret);