2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_UNISTD_H
56 #include "event2/event.h"
57 #include "event2/event_struct.h"
58 #include "event2/event_compat.h"
59 #include "event-internal.h"
60 #include "defer-internal.h"
61 #include "evthread-internal.h"
62 #include "event2/thread.h"
63 #include "event2/util.h"
64 #include "log-internal.h"
65 #include "evmap-internal.h"
66 #include "iocp-internal.h"
67 #include "changelist-internal.h"
68 #define HT_NO_CACHE_HASH_VALUES
69 #include "ht-internal.h"
70 #include "util-internal.h"
73 #ifdef EVENT__HAVE_WORKING_KQUEUE
74 #include "kqueue-internal.h"
77 #ifdef EVENT__HAVE_EVENT_PORTS
78 extern const struct eventop evportops;
80 #ifdef EVENT__HAVE_SELECT
81 extern const struct eventop selectops;
83 #ifdef EVENT__HAVE_POLL
84 extern const struct eventop pollops;
86 #ifdef EVENT__HAVE_EPOLL
87 extern const struct eventop epollops;
89 #ifdef EVENT__HAVE_WORKING_KQUEUE
90 extern const struct eventop kqops;
92 #ifdef EVENT__HAVE_DEVPOLL
93 extern const struct eventop devpollops;
96 extern const struct eventop win32ops;
99 /* Array of backends in order of preference. */
100 static const struct eventop *eventops[] = {
101 #ifdef EVENT__HAVE_EVENT_PORTS
104 #ifdef EVENT__HAVE_WORKING_KQUEUE
107 #ifdef EVENT__HAVE_EPOLL
110 #ifdef EVENT__HAVE_DEVPOLL
113 #ifdef EVENT__HAVE_POLL
116 #ifdef EVENT__HAVE_SELECT
125 /* Global state; deprecated */
126 struct event_base *event_global_current_base_ = NULL;
127 #define current_base event_global_current_base_
131 static void *event_self_cbarg_ptr_ = NULL;
134 static void event_queue_insert_active(struct event_base *, struct event_callback *);
135 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
136 static void event_queue_insert_timeout(struct event_base *, struct event *);
137 static void event_queue_insert_inserted(struct event_base *, struct event *);
138 static void event_queue_remove_active(struct event_base *, struct event_callback *);
139 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
140 static void event_queue_remove_timeout(struct event_base *, struct event *);
141 static void event_queue_remove_inserted(struct event_base *, struct event *);
142 static void event_queue_make_later_events_active(struct event_base *base);
144 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
145 static int event_del_(struct event *ev, int blocking);
147 #ifdef USE_REINSERT_TIMEOUT
148 /* This code seems buggy; only turn it on if we find out what the trouble is. */
149 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
152 static int event_haveevents(struct event_base *);
154 static int event_process_active(struct event_base *);
156 static int timeout_next(struct event_base *, struct timeval **);
157 static void timeout_process(struct event_base *);
159 static inline void event_signal_closure(struct event_base *, struct event *ev);
160 static inline void event_persist_closure(struct event_base *, struct event *ev);
162 static int evthread_notify_base(struct event_base *base);
164 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
167 #ifndef EVENT__DISABLE_DEBUG_MODE
168 /* These functions implement a hashtable of which 'struct event *' structures
169 * have been setup or added. We don't want to trust the content of the struct
170 * event itself, since we're trying to work through cases where an event gets
171 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
174 struct event_debug_entry {
175 HT_ENTRY(event_debug_entry) node;
176 const struct event *ptr;
180 static inline unsigned
181 hash_debug_entry(const struct event_debug_entry *e)
183 /* We need to do this silliness to convince compilers that we
184 * honestly mean to cast e->ptr to an integer, and discard any
185 * part of it that doesn't fit in an unsigned.
187 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
188 /* Our hashtable implementation is pretty sensitive to low bits,
189 * and every struct event is over 64 bytes in size, so we can
195 eq_debug_entry(const struct event_debug_entry *a,
196 const struct event_debug_entry *b)
198 return a->ptr == b->ptr;
201 int event_debug_mode_on_ = 0;
204 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
206 * @brief debug mode variable which is set for any function/structure that needs
207 * to be shared across threads (if thread support is enabled).
209 * When and if evthreads are initialized, this variable will be evaluated,
210 * and if set to something other than zero, this means the evthread setup
211 * functions were called out of order.
213 * See: "Locks and threading" in the documentation.
215 int event_debug_created_threadable_ctx_ = 0;
218 /* Set if it's too late to enable event_debug_mode. */
219 static int event_debug_mode_too_late = 0;
220 #ifndef EVENT__DISABLE_THREAD_SUPPORT
221 static void *event_debug_map_lock_ = NULL;
223 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
226 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
228 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
229 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
231 /* Macro: record that ev is now setup (that is, ready for an add) */
232 #define event_debug_note_setup_(ev) do { \
233 if (event_debug_mode_on_) { \
234 struct event_debug_entry *dent,find; \
236 EVLOCK_LOCK(event_debug_map_lock_, 0); \
237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
241 dent = mm_malloc(sizeof(*dent)); \
244 "Out of memory in debugging code"); \
247 HT_INSERT(event_debug_map, &global_debug_map, dent); \
249 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
251 event_debug_mode_too_late = 1; \
253 /* Macro: record that ev is no longer setup */
254 #define event_debug_note_teardown_(ev) do { \
255 if (event_debug_mode_on_) { \
256 struct event_debug_entry *dent,find; \
258 EVLOCK_LOCK(event_debug_map_lock_, 0); \
259 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
262 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
264 event_debug_mode_too_late = 1; \
266 /* Macro: record that ev is now added */
267 #define event_debug_note_add_(ev) do { \
268 if (event_debug_mode_on_) { \
269 struct event_debug_entry *dent,find; \
271 EVLOCK_LOCK(event_debug_map_lock_, 0); \
272 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
276 event_errx(EVENT_ERR_ABORT_, \
277 "%s: noting an add on a non-setup event %p" \
278 " (events: 0x%x, fd: "EV_SOCK_FMT \
280 __func__, (ev), (ev)->ev_events, \
281 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
283 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
285 event_debug_mode_too_late = 1; \
287 /* Macro: record that ev is no longer added */
288 #define event_debug_note_del_(ev) do { \
289 if (event_debug_mode_on_) { \
290 struct event_debug_entry *dent,find; \
292 EVLOCK_LOCK(event_debug_map_lock_, 0); \
293 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
297 event_errx(EVENT_ERR_ABORT_, \
298 "%s: noting a del on a non-setup event %p" \
299 " (events: 0x%x, fd: "EV_SOCK_FMT \
301 __func__, (ev), (ev)->ev_events, \
302 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
304 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
306 event_debug_mode_too_late = 1; \
308 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
309 #define event_debug_assert_is_setup_(ev) do { \
310 if (event_debug_mode_on_) { \
311 struct event_debug_entry *dent,find; \
313 EVLOCK_LOCK(event_debug_map_lock_, 0); \
314 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
316 event_errx(EVENT_ERR_ABORT_, \
317 "%s called on a non-initialized event %p" \
318 " (events: 0x%x, fd: "EV_SOCK_FMT\
320 __func__, (ev), (ev)->ev_events, \
321 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
323 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
326 /* Macro: assert that ev is not added (i.e., okay to tear down or set
328 #define event_debug_assert_not_added_(ev) do { \
329 if (event_debug_mode_on_) { \
330 struct event_debug_entry *dent,find; \
332 EVLOCK_LOCK(event_debug_map_lock_, 0); \
333 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
334 if (dent && dent->added) { \
335 event_errx(EVENT_ERR_ABORT_, \
336 "%s called on an already added event %p" \
337 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
339 __func__, (ev), (ev)->ev_events, \
340 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
342 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
346 #define event_debug_note_setup_(ev) \
348 #define event_debug_note_teardown_(ev) \
350 #define event_debug_note_add_(ev) \
352 #define event_debug_note_del_(ev) \
354 #define event_debug_assert_is_setup_(ev) \
356 #define event_debug_assert_not_added_(ev) \
360 #define EVENT_BASE_ASSERT_LOCKED(base) \
361 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
363 /* How often (in seconds) do we check for changes in wall clock time relative
364 * to monotonic time? Set this to -1 for 'never.' */
365 #define CLOCK_SYNC_INTERVAL 5
367 /** Set 'tp' to the current time according to 'base'. We must hold the lock
368 * on 'base'. If there is a cached time, return it. Otherwise, use
369 * clock_gettime or gettimeofday as appropriate to find out the right time.
370 * Return 0 on success, -1 on failure.
373 gettime(struct event_base *base, struct timeval *tp)
375 EVENT_BASE_ASSERT_LOCKED(base);
377 if (base->tv_cache.tv_sec) {
378 *tp = base->tv_cache;
382 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
386 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
389 evutil_gettimeofday(&tv,NULL);
390 evutil_timersub(&tv, tp, &base->tv_clock_diff);
391 base->last_updated_clock_diff = tp->tv_sec;
398 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
404 return evutil_gettimeofday(tv, NULL);
407 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
408 if (base->tv_cache.tv_sec == 0) {
409 r = evutil_gettimeofday(tv, NULL);
411 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
414 EVBASE_RELEASE_LOCK(base, th_base_lock);
418 /** Make 'base' have no current cached time. */
420 clear_time_cache(struct event_base *base)
422 base->tv_cache.tv_sec = 0;
425 /** Replace the cached time in 'base' with the current time. */
427 update_time_cache(struct event_base *base)
429 base->tv_cache.tv_sec = 0;
430 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
431 gettime(base, &base->tv_cache);
435 event_base_update_cache_time(struct event_base *base)
444 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
445 if (base->running_loop)
446 update_time_cache(base);
447 EVBASE_RELEASE_LOCK(base, th_base_lock);
451 static inline struct event *
452 event_callback_to_event(struct event_callback *evcb)
454 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
455 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
458 static inline struct event_callback *
459 event_to_event_callback(struct event *ev)
461 return &ev->ev_evcallback;
467 struct event_base *base = event_base_new_with_config(NULL);
470 event_errx(1, "%s: Unable to construct event_base", __func__);
482 struct event_base *base = NULL;
483 struct event_config *cfg = event_config_new();
485 base = event_base_new_with_config(cfg);
486 event_config_free(cfg);
491 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
494 event_config_is_avoided_method(const struct event_config *cfg,
497 struct event_config_entry *entry;
499 TAILQ_FOREACH(entry, &cfg->entries, next) {
500 if (entry->avoid_method != NULL &&
501 strcmp(entry->avoid_method, method) == 0)
508 /** Return true iff 'method' is disabled according to the environment. */
510 event_is_method_disabled(const char *name)
512 char environment[64];
515 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
516 for (i = 8; environment[i] != '\0'; ++i)
517 environment[i] = EVUTIL_TOUPPER_(environment[i]);
518 /* Note that evutil_getenv_() ignores the environment entirely if
520 return (evutil_getenv_(environment) != NULL);
524 event_base_get_features(const struct event_base *base)
526 return base->evsel->features;
530 event_enable_debug_mode(void)
532 #ifndef EVENT__DISABLE_DEBUG_MODE
533 if (event_debug_mode_on_)
534 event_errx(1, "%s was called twice!", __func__);
535 if (event_debug_mode_too_late)
536 event_errx(1, "%s must be called *before* creating any events "
537 "or event_bases",__func__);
539 event_debug_mode_on_ = 1;
541 HT_INIT(event_debug_map, &global_debug_map);
546 event_disable_debug_mode(void)
548 #ifndef EVENT__DISABLE_DEBUG_MODE
549 struct event_debug_entry **ent, *victim;
551 EVLOCK_LOCK(event_debug_map_lock_, 0);
552 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
554 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
557 HT_CLEAR(event_debug_map, &global_debug_map);
558 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
560 event_debug_mode_on_ = 0;
565 event_base_new_with_config(const struct event_config *cfg)
568 struct event_base *base;
569 int should_check_environment;
571 #ifndef EVENT__DISABLE_DEBUG_MODE
572 event_debug_mode_too_late = 1;
575 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
576 event_warn("%s: calloc", __func__);
581 base->flags = cfg->flags;
583 should_check_environment =
584 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
589 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
591 if (should_check_environment && !precise_time) {
592 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
593 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
595 flags = precise_time ? EV_MONOT_PRECISE : 0;
596 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
601 min_heap_ctor_(&base->timeheap);
603 base->sig.ev_signal_pair[0] = -1;
604 base->sig.ev_signal_pair[1] = -1;
605 base->th_notify_fd[0] = -1;
606 base->th_notify_fd[1] = -1;
608 TAILQ_INIT(&base->active_later_queue);
610 evmap_io_initmap_(&base->io);
611 evmap_signal_initmap_(&base->sigmap);
612 event_changelist_init_(&base->changelist);
617 memcpy(&base->max_dispatch_time,
618 &cfg->max_dispatch_interval, sizeof(struct timeval));
619 base->limit_callbacks_after_prio =
620 cfg->limit_callbacks_after_prio;
622 base->max_dispatch_time.tv_sec = -1;
623 base->limit_callbacks_after_prio = 1;
625 if (cfg && cfg->max_dispatch_callbacks >= 0) {
626 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
628 base->max_dispatch_callbacks = INT_MAX;
630 if (base->max_dispatch_callbacks == INT_MAX &&
631 base->max_dispatch_time.tv_sec == -1)
632 base->limit_callbacks_after_prio = INT_MAX;
634 for (i = 0; eventops[i] && !base->evbase; i++) {
636 /* determine if this backend should be avoided */
637 if (event_config_is_avoided_method(cfg,
640 if ((eventops[i]->features & cfg->require_features)
641 != cfg->require_features)
645 /* also obey the environment variables */
646 if (should_check_environment &&
647 event_is_method_disabled(eventops[i]->name))
650 base->evsel = eventops[i];
652 base->evbase = base->evsel->init(base);
655 if (base->evbase == NULL) {
656 event_warnx("%s: no event mechanism available",
659 event_base_free(base);
663 if (evutil_getenv_("EVENT_SHOW_METHOD"))
664 event_msgx("libevent using: %s", base->evsel->name);
666 /* allocate a single active event queue */
667 if (event_base_priority_init(base, 1) < 0) {
668 event_base_free(base);
672 /* prepare for threading */
674 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
675 event_debug_created_threadable_ctx_ = 1;
678 #ifndef EVENT__DISABLE_THREAD_SUPPORT
679 if (EVTHREAD_LOCKING_ENABLED() &&
680 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
682 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
683 EVTHREAD_ALLOC_COND(base->current_event_cond);
684 r = evthread_make_base_notifiable(base);
686 event_warnx("%s: Unable to make base notifiable.", __func__);
687 event_base_free(base);
694 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
695 event_base_start_iocp_(base, cfg->n_cpus_hint);
702 event_base_start_iocp_(struct event_base *base, int n_cpus)
707 base->iocp = event_iocp_port_launch_(n_cpus);
709 event_warnx("%s: Couldn't launch IOCP", __func__);
719 event_base_stop_iocp_(struct event_base *base)
726 rv = event_iocp_shutdown_(base->iocp, -1);
727 EVUTIL_ASSERT(rv >= 0);
733 event_base_cancel_single_callback_(struct event_base *base,
734 struct event_callback *evcb,
739 if (evcb->evcb_flags & EVLIST_INIT) {
740 struct event *ev = event_callback_to_event(evcb);
741 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
742 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
746 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
747 event_callback_cancel_nolock_(base, evcb, 1);
748 EVBASE_RELEASE_LOCK(base, th_base_lock);
752 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
753 switch (evcb->evcb_closure) {
754 case EV_CLOSURE_EVENT_FINALIZE:
755 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
756 struct event *ev = event_callback_to_event(evcb);
757 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
758 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
762 case EV_CLOSURE_CB_FINALIZE:
763 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
772 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
776 for (i = 0; i < base->nactivequeues; ++i) {
777 struct event_callback *evcb, *next;
778 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
779 next = TAILQ_NEXT(evcb, evcb_active_next);
780 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
786 struct event_callback *evcb;
787 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
788 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
796 event_base_free_(struct event_base *base, int run_finalizers)
800 /* XXXX grab the lock? If there is contention when one thread frees
801 * the base, then the contending thread will be very sad soon. */
803 /* event_base_free(NULL) is how to free the current_base if we
804 * made it with event_init and forgot to hold a reference to it. */
805 if (base == NULL && current_base)
807 /* Don't actually free NULL. */
809 event_warnx("%s: no base to free", __func__);
812 /* XXX(niels) - check for internal events first */
815 event_base_stop_iocp_(base);
818 /* threading fds if we have them */
819 if (base->th_notify_fd[0] != -1) {
820 event_del(&base->th_notify);
821 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
822 if (base->th_notify_fd[1] != -1)
823 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
824 base->th_notify_fd[0] = -1;
825 base->th_notify_fd[1] = -1;
826 event_debug_unassign(&base->th_notify);
829 /* Delete all non-internal events. */
830 evmap_delete_all_(base);
832 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
836 for (i = 0; i < base->n_common_timeouts; ++i) {
837 struct common_timeout_list *ctl =
838 base->common_timeout_queues[i];
839 event_del(&ctl->timeout_event); /* Internal; doesn't count */
840 event_debug_unassign(&ctl->timeout_event);
841 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
842 struct event *next = TAILQ_NEXT(ev,
843 ev_timeout_pos.ev_next_with_common_timeout);
844 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
852 if (base->common_timeout_queues)
853 mm_free(base->common_timeout_queues);
856 /* For finalizers we can register yet another finalizer out from
857 * finalizer, and iff finalizer will be in active_later_queue we can
858 * add finalizer to activequeues, and we will have events in
859 * activequeues after this function returns, which is not what we want
860 * (we even have an assertion for this).
862 * A simple case is bufferevent with underlying (i.e. filters).
864 int i = event_base_free_queues_(base, run_finalizers);
872 event_debug(("%s: %d events were still set in base",
873 __func__, n_deleted));
875 while (LIST_FIRST(&base->once_events)) {
876 struct event_once *eonce = LIST_FIRST(&base->once_events);
877 LIST_REMOVE(eonce, next_once);
881 if (base->evsel != NULL && base->evsel->dealloc != NULL)
882 base->evsel->dealloc(base);
884 for (i = 0; i < base->nactivequeues; ++i)
885 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
887 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
888 min_heap_dtor_(&base->timeheap);
890 mm_free(base->activequeues);
892 evmap_io_clear_(&base->io);
893 evmap_signal_clear_(&base->sigmap);
894 event_changelist_freemem_(&base->changelist);
896 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
897 EVTHREAD_FREE_COND(base->current_event_cond);
899 /* If we're freeing current_base, there won't be a current_base. */
900 if (base == current_base)
906 event_base_free_nofinalize(struct event_base *base)
908 event_base_free_(base, 0);
912 event_base_free(struct event_base *base)
914 event_base_free_(base, 1);
917 /* Fake eventop; used to disable the backend temporarily inside event_reinit
918 * so that we can call event_del() on an event without telling the backend.
921 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
922 short events, void *fdinfo)
926 const struct eventop nil_eventop = {
928 NULL, /* init: unused. */
929 NULL, /* add: unused. */
930 nil_backend_del, /* del: used, so needs to be killed. */
931 NULL, /* dispatch: unused. */
932 NULL, /* dealloc: unused. */
936 /* reinitialize the event base after a fork */
938 event_reinit(struct event_base *base)
940 const struct eventop *evsel;
942 int was_notifiable = 0;
943 int had_signal_added = 0;
945 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
949 /* check if this event mechanism requires reinit on the backend */
950 if (evsel->need_reinit) {
951 /* We're going to call event_del() on our notify events (the
952 * ones that tell about signals and wakeup events). But we
953 * don't actually want to tell the backend to change its
954 * state, since it might still share some resource (a kqueue,
955 * an epoll fd) with the parent process, and we don't want to
956 * delete the fds from _that_ backend, we temporarily stub out
957 * the evsel with a replacement.
959 base->evsel = &nil_eventop;
962 /* We need to re-create a new signal-notification fd and a new
963 * thread-notification fd. Otherwise, we'll still share those with
964 * the parent process, which would make any notification sent to them
965 * get received by one or both of the event loops, more or less at
968 if (base->sig.ev_signal_added) {
969 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
970 event_debug_unassign(&base->sig.ev_signal);
971 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
972 had_signal_added = 1;
973 base->sig.ev_signal_added = 0;
975 if (base->sig.ev_signal_pair[0] != -1)
976 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
977 if (base->sig.ev_signal_pair[1] != -1)
978 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
979 if (base->th_notify_fn != NULL) {
981 base->th_notify_fn = NULL;
983 if (base->th_notify_fd[0] != -1) {
984 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
985 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
986 if (base->th_notify_fd[1] != -1)
987 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
988 base->th_notify_fd[0] = -1;
989 base->th_notify_fd[1] = -1;
990 event_debug_unassign(&base->th_notify);
993 /* Replace the original evsel. */
996 if (evsel->need_reinit) {
997 /* Reconstruct the backend through brute-force, so that we do
998 * not share any structures with the parent process. For some
999 * backends, this is necessary: epoll and kqueue, for
1000 * instance, have events associated with a kernel
1001 * structure. If didn't reinitialize, we'd share that
1002 * structure with the parent process, and any changes made by
1003 * the parent would affect our backend's behavior (and vice
1006 if (base->evsel->dealloc != NULL)
1007 base->evsel->dealloc(base);
1008 base->evbase = evsel->init(base);
1009 if (base->evbase == NULL) {
1011 "%s: could not reinitialize event mechanism",
1017 /* Empty out the changelist (if any): we are starting from a
1019 event_changelist_freemem_(&base->changelist);
1021 /* Tell the event maps to re-inform the backend about all
1022 * pending events. This will make the signal notification
1023 * event get re-created if necessary. */
1024 if (evmap_reinit_(base) < 0)
1027 res = evsig_init_(base);
1028 if (res == 0 && had_signal_added) {
1029 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1031 base->sig.ev_signal_added = 1;
1035 /* If we were notifiable before, and nothing just exploded, become
1036 * notifiable again. */
1037 if (was_notifiable && res == 0)
1038 res = evthread_make_base_notifiable_nolock_(base);
1041 EVBASE_RELEASE_LOCK(base, th_base_lock);
1045 /* Get the monotonic time for this event_base' timer */
1047 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1052 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1053 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1054 EVBASE_RELEASE_LOCK(base, th_base_lock);
1061 event_get_supported_methods(void)
1063 static const char **methods = NULL;
1064 const struct eventop **method;
1068 /* count all methods */
1069 for (method = &eventops[0]; *method != NULL; ++method) {
1073 /* allocate one more than we need for the NULL pointer */
1074 tmp = mm_calloc((i + 1), sizeof(char *));
1078 /* populate the array with the supported methods */
1079 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1080 tmp[i++] = eventops[k]->name;
1084 if (methods != NULL)
1085 mm_free((char**)methods);
1092 struct event_config *
1093 event_config_new(void)
1095 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1100 TAILQ_INIT(&cfg->entries);
1101 cfg->max_dispatch_interval.tv_sec = -1;
1102 cfg->max_dispatch_callbacks = INT_MAX;
1103 cfg->limit_callbacks_after_prio = 1;
1109 event_config_entry_free(struct event_config_entry *entry)
1111 if (entry->avoid_method != NULL)
1112 mm_free((char *)entry->avoid_method);
1117 event_config_free(struct event_config *cfg)
1119 struct event_config_entry *entry;
1121 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1122 TAILQ_REMOVE(&cfg->entries, entry, next);
1123 event_config_entry_free(entry);
1129 event_config_set_flag(struct event_config *cfg, int flag)
1138 event_config_avoid_method(struct event_config *cfg, const char *method)
1140 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1144 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1149 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1155 event_config_require_features(struct event_config *cfg,
1160 cfg->require_features = features;
1165 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1169 cfg->n_cpus_hint = cpus;
1174 event_config_set_max_dispatch_interval(struct event_config *cfg,
1175 const struct timeval *max_interval, int max_callbacks, int min_priority)
1178 memcpy(&cfg->max_dispatch_interval, max_interval,
1179 sizeof(struct timeval));
1181 cfg->max_dispatch_interval.tv_sec = -1;
1182 cfg->max_dispatch_callbacks =
1183 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1184 if (min_priority < 0)
1186 cfg->limit_callbacks_after_prio = min_priority;
1191 event_priority_init(int npriorities)
1193 return event_base_priority_init(current_base, npriorities);
1197 event_base_priority_init(struct event_base *base, int npriorities)
1202 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1204 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1205 || npriorities >= EVENT_MAX_PRIORITIES)
1208 if (npriorities == base->nactivequeues)
1211 if (base->nactivequeues) {
1212 mm_free(base->activequeues);
1213 base->nactivequeues = 0;
1216 /* Allocate our priority queues */
1217 base->activequeues = (struct evcallback_list *)
1218 mm_calloc(npriorities, sizeof(struct evcallback_list));
1219 if (base->activequeues == NULL) {
1220 event_warn("%s: calloc", __func__);
1223 base->nactivequeues = npriorities;
1225 for (i = 0; i < base->nactivequeues; ++i) {
1226 TAILQ_INIT(&base->activequeues[i]);
1232 EVBASE_RELEASE_LOCK(base, th_base_lock);
1237 event_base_get_npriorities(struct event_base *base)
1242 base = current_base;
1244 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1245 n = base->nactivequeues;
1246 EVBASE_RELEASE_LOCK(base, th_base_lock);
1251 event_base_get_num_events(struct event_base *base, unsigned int type)
1255 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1257 if (type & EVENT_BASE_COUNT_ACTIVE)
1258 r += base->event_count_active;
1260 if (type & EVENT_BASE_COUNT_VIRTUAL)
1261 r += base->virtual_event_count;
1263 if (type & EVENT_BASE_COUNT_ADDED)
1264 r += base->event_count;
1266 EVBASE_RELEASE_LOCK(base, th_base_lock);
1272 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1276 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1278 if (type & EVENT_BASE_COUNT_ACTIVE) {
1279 r += base->event_count_active_max;
1281 base->event_count_active_max = 0;
1284 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1285 r += base->virtual_event_count_max;
1287 base->virtual_event_count_max = 0;
1290 if (type & EVENT_BASE_COUNT_ADDED) {
1291 r += base->event_count_max;
1293 base->event_count_max = 0;
1296 EVBASE_RELEASE_LOCK(base, th_base_lock);
1301 /* Returns true iff we're currently watching any events. */
1303 event_haveevents(struct event_base *base)
1305 /* Caller must hold th_base_lock */
1306 return (base->virtual_event_count > 0 || base->event_count > 0);
1309 /* "closure" function called when processing active signal events */
1311 event_signal_closure(struct event_base *base, struct event *ev)
1316 /* Allows deletes to work */
1317 ncalls = ev->ev_ncalls;
1319 ev->ev_pncalls = &ncalls;
1320 EVBASE_RELEASE_LOCK(base, th_base_lock);
1323 ev->ev_ncalls = ncalls;
1325 ev->ev_pncalls = NULL;
1326 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1328 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1329 should_break = base->event_break;
1330 EVBASE_RELEASE_LOCK(base, th_base_lock);
1334 ev->ev_pncalls = NULL;
1340 /* Common timeouts are special timeouts that are handled as queues rather than
1341 * in the minheap. This is more efficient than the minheap if we happen to
1342 * know that we're going to get several thousands of timeout events all with
1343 * the same timeout value.
1345 * Since all our timeout handling code assumes timevals can be copied,
1346 * assigned, etc, we can't use "magic pointer" to encode these common
1347 * timeouts. Searching through a list to see if every timeout is common could
1348 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1349 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1350 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1351 * of index into the event_base's aray of common timeouts.
1354 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1355 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1356 #define COMMON_TIMEOUT_IDX_SHIFT 20
1357 #define COMMON_TIMEOUT_MASK 0xf0000000
1358 #define COMMON_TIMEOUT_MAGIC 0x50000000
1360 #define COMMON_TIMEOUT_IDX(tv) \
1361 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1363 /** Return true iff if 'tv' is a common timeout in 'base' */
1365 is_common_timeout(const struct timeval *tv,
1366 const struct event_base *base)
1369 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1371 idx = COMMON_TIMEOUT_IDX(tv);
1372 return idx < base->n_common_timeouts;
1375 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1376 * one is a common timeout. */
1378 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1380 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1381 (tv2->tv_usec & ~MICROSECONDS_MASK);
1384 /** Requires that 'tv' is a common timeout. Return the corresponding
1385 * common_timeout_list. */
1386 static inline struct common_timeout_list *
1387 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1389 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1394 common_timeout_ok(const struct timeval *tv,
1395 struct event_base *base)
1397 const struct timeval *expect =
1398 &get_common_timeout_list(base, tv)->duration;
1399 return tv->tv_sec == expect->tv_sec &&
1400 tv->tv_usec == expect->tv_usec;
1404 /* Add the timeout for the first event in given common timeout list to the
1405 * event_base's minheap. */
1407 common_timeout_schedule(struct common_timeout_list *ctl,
1408 const struct timeval *now, struct event *head)
1410 struct timeval timeout = head->ev_timeout;
1411 timeout.tv_usec &= MICROSECONDS_MASK;
1412 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1415 /* Callback: invoked when the timeout for a common timeout queue triggers.
1416 * This means that (at least) the first event in that queue should be run,
1417 * and the timeout should be rescheduled if there are more events. */
1419 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1422 struct common_timeout_list *ctl = arg;
1423 struct event_base *base = ctl->base;
1424 struct event *ev = NULL;
1425 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1426 gettime(base, &now);
1428 ev = TAILQ_FIRST(&ctl->events);
1429 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1430 (ev->ev_timeout.tv_sec == now.tv_sec &&
1431 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1433 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1434 event_active_nolock_(ev, EV_TIMEOUT, 1);
1437 common_timeout_schedule(ctl, &now, ev);
1438 EVBASE_RELEASE_LOCK(base, th_base_lock);
1441 #define MAX_COMMON_TIMEOUTS 256
1443 const struct timeval *
1444 event_base_init_common_timeout(struct event_base *base,
1445 const struct timeval *duration)
1449 const struct timeval *result=NULL;
1450 struct common_timeout_list *new_ctl;
1452 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1453 if (duration->tv_usec > 1000000) {
1454 memcpy(&tv, duration, sizeof(struct timeval));
1455 if (is_common_timeout(duration, base))
1456 tv.tv_usec &= MICROSECONDS_MASK;
1457 tv.tv_sec += tv.tv_usec / 1000000;
1458 tv.tv_usec %= 1000000;
1461 for (i = 0; i < base->n_common_timeouts; ++i) {
1462 const struct common_timeout_list *ctl =
1463 base->common_timeout_queues[i];
1464 if (duration->tv_sec == ctl->duration.tv_sec &&
1465 duration->tv_usec ==
1466 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1467 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1468 result = &ctl->duration;
1472 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1473 event_warnx("%s: Too many common timeouts already in use; "
1474 "we only support %d per event_base", __func__,
1475 MAX_COMMON_TIMEOUTS);
1478 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1479 int n = base->n_common_timeouts < 16 ? 16 :
1480 base->n_common_timeouts*2;
1481 struct common_timeout_list **newqueues =
1482 mm_realloc(base->common_timeout_queues,
1483 n*sizeof(struct common_timeout_queue *));
1485 event_warn("%s: realloc",__func__);
1488 base->n_common_timeouts_allocated = n;
1489 base->common_timeout_queues = newqueues;
1491 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1493 event_warn("%s: calloc",__func__);
1496 TAILQ_INIT(&new_ctl->events);
1497 new_ctl->duration.tv_sec = duration->tv_sec;
1498 new_ctl->duration.tv_usec =
1499 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1500 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1501 evtimer_assign(&new_ctl->timeout_event, base,
1502 common_timeout_callback, new_ctl);
1503 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1504 event_priority_set(&new_ctl->timeout_event, 0);
1505 new_ctl->base = base;
1506 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1507 result = &new_ctl->duration;
1511 EVUTIL_ASSERT(is_common_timeout(result, base));
1513 EVBASE_RELEASE_LOCK(base, th_base_lock);
1517 /* Closure function invoked when we're activating a persistent event. */
1519 event_persist_closure(struct event_base *base, struct event *ev)
1521 void (*evcb_callback)(evutil_socket_t, short, void *);
1523 // Other fields of *ev that must be stored before executing
1524 evutil_socket_t evcb_fd;
1528 /* reschedule the persistent event if we have a timeout. */
1529 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1530 /* If there was a timeout, we want it to run at an interval of
1531 * ev_io_timeout after the last time it was _scheduled_ for,
1532 * not ev_io_timeout after _now_. If it fired for another
1533 * reason, though, the timeout ought to start ticking _now_. */
1534 struct timeval run_at, relative_to, delay, now;
1535 ev_uint32_t usec_mask = 0;
1536 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1537 &ev->ev_io_timeout));
1538 gettime(base, &now);
1539 if (is_common_timeout(&ev->ev_timeout, base)) {
1540 delay = ev->ev_io_timeout;
1541 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1542 delay.tv_usec &= MICROSECONDS_MASK;
1543 if (ev->ev_res & EV_TIMEOUT) {
1544 relative_to = ev->ev_timeout;
1545 relative_to.tv_usec &= MICROSECONDS_MASK;
1550 delay = ev->ev_io_timeout;
1551 if (ev->ev_res & EV_TIMEOUT) {
1552 relative_to = ev->ev_timeout;
1557 evutil_timeradd(&relative_to, &delay, &run_at);
1558 if (evutil_timercmp(&run_at, &now, <)) {
1559 /* Looks like we missed at least one invocation due to
1560 * a clock jump, not running the event loop for a
1561 * while, really slow callbacks, or
1562 * something. Reschedule relative to now.
1564 evutil_timeradd(&now, &delay, &run_at);
1566 run_at.tv_usec |= usec_mask;
1567 event_add_nolock_(ev, &run_at, 1);
1570 // Save our callback before we release the lock
1571 evcb_callback = ev->ev_callback;
1572 evcb_fd = ev->ev_fd;
1573 evcb_res = ev->ev_res;
1574 evcb_arg = ev->ev_arg;
1577 EVBASE_RELEASE_LOCK(base, th_base_lock);
1579 // Execute the callback
1580 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1584 Helper for event_process_active to process all the events in a single queue,
1585 releasing the lock as we go. This function requires that the lock be held
1586 when it's invoked. Returns -1 if we get a signal or an event_break that
1587 means we should stop processing any active events now. Otherwise returns
1588 the number of non-internal event_callbacks that we processed.
1591 event_process_active_single_queue(struct event_base *base,
1592 struct evcallback_list *activeq,
1593 int max_to_process, const struct timeval *endtime)
1595 struct event_callback *evcb;
1598 EVUTIL_ASSERT(activeq != NULL);
1600 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1601 struct event *ev=NULL;
1602 if (evcb->evcb_flags & EVLIST_INIT) {
1603 ev = event_callback_to_event(evcb);
1605 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1606 event_queue_remove_active(base, evcb);
1608 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1610 "event_process_active: event: %p, %s%s%scall %p",
1612 ev->ev_res & EV_READ ? "EV_READ " : " ",
1613 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1614 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1617 event_queue_remove_active(base, evcb);
1618 event_debug(("event_process_active: event_callback %p, "
1619 "closure %d, call %p",
1620 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1623 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1627 base->current_event = evcb;
1628 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1629 base->current_event_waiters = 0;
1632 switch (evcb->evcb_closure) {
1633 case EV_CLOSURE_EVENT_SIGNAL:
1634 EVUTIL_ASSERT(ev != NULL);
1635 event_signal_closure(base, ev);
1637 case EV_CLOSURE_EVENT_PERSIST:
1638 EVUTIL_ASSERT(ev != NULL);
1639 event_persist_closure(base, ev);
1641 case EV_CLOSURE_EVENT: {
1642 void (*evcb_callback)(evutil_socket_t, short, void *);
1643 EVUTIL_ASSERT(ev != NULL);
1644 evcb_callback = *ev->ev_callback;
1645 EVBASE_RELEASE_LOCK(base, th_base_lock);
1646 evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1649 case EV_CLOSURE_CB_SELF: {
1650 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1651 EVBASE_RELEASE_LOCK(base, th_base_lock);
1652 evcb_selfcb(evcb, evcb->evcb_arg);
1655 case EV_CLOSURE_EVENT_FINALIZE:
1656 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1657 void (*evcb_evfinalize)(struct event *, void *);
1658 int evcb_closure = evcb->evcb_closure;
1659 EVUTIL_ASSERT(ev != NULL);
1660 base->current_event = NULL;
1661 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1662 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1663 EVBASE_RELEASE_LOCK(base, th_base_lock);
1664 evcb_evfinalize(ev, ev->ev_arg);
1665 event_debug_note_teardown_(ev);
1666 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1670 case EV_CLOSURE_CB_FINALIZE: {
1671 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1672 base->current_event = NULL;
1673 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1674 EVBASE_RELEASE_LOCK(base, th_base_lock);
1675 evcb_cbfinalize(evcb, evcb->evcb_arg);
1682 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1683 base->current_event = NULL;
1684 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1685 if (base->current_event_waiters) {
1686 base->current_event_waiters = 0;
1687 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1691 if (base->event_break)
1693 if (count >= max_to_process)
1695 if (count && endtime) {
1697 update_time_cache(base);
1698 gettime(base, &now);
1699 if (evutil_timercmp(&now, endtime, >=))
1702 if (base->event_continue)
1709 * Active events are stored in priority queues. Lower priorities are always
1710 * process before higher priorities. Low priority events can starve high
1715 event_process_active(struct event_base *base)
1717 /* Caller must hold th_base_lock */
1718 struct evcallback_list *activeq = NULL;
1720 const struct timeval *endtime;
1722 const int maxcb = base->max_dispatch_callbacks;
1723 const int limit_after_prio = base->limit_callbacks_after_prio;
1724 if (base->max_dispatch_time.tv_sec >= 0) {
1725 update_time_cache(base);
1727 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1733 for (i = 0; i < base->nactivequeues; ++i) {
1734 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1735 base->event_running_priority = i;
1736 activeq = &base->activequeues[i];
1737 if (i < limit_after_prio)
1738 c = event_process_active_single_queue(base, activeq,
1741 c = event_process_active_single_queue(base, activeq,
1746 break; /* Processed a real event; do not
1747 * consider lower-priority events */
1748 /* If we get here, all of the events we processed
1749 * were internal. Continue. */
1754 base->event_running_priority = -1;
1760 * Wait continuously for events. We exit only if no events are left.
1764 event_dispatch(void)
1766 return (event_loop(0));
1770 event_base_dispatch(struct event_base *event_base)
1772 return (event_base_loop(event_base, 0));
1776 event_base_get_method(const struct event_base *base)
1778 EVUTIL_ASSERT(base);
1779 return (base->evsel->name);
1782 /** Callback: used to implement event_base_loopexit by telling the event_base
1783 * that it's time to exit its loop. */
1785 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1787 struct event_base *base = arg;
1788 base->event_gotterm = 1;
1792 event_loopexit(const struct timeval *tv)
1794 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1799 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1801 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1806 event_loopbreak(void)
1808 return (event_base_loopbreak(current_base));
1812 event_base_loopbreak(struct event_base *event_base)
1815 if (event_base == NULL)
1818 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1819 event_base->event_break = 1;
1821 if (EVBASE_NEED_NOTIFY(event_base)) {
1822 r = evthread_notify_base(event_base);
1826 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1831 event_base_loopcontinue(struct event_base *event_base)
1834 if (event_base == NULL)
1837 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1838 event_base->event_continue = 1;
1840 if (EVBASE_NEED_NOTIFY(event_base)) {
1841 r = evthread_notify_base(event_base);
1845 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1850 event_base_got_break(struct event_base *event_base)
1853 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1854 res = event_base->event_break;
1855 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1860 event_base_got_exit(struct event_base *event_base)
1863 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1864 res = event_base->event_gotterm;
1865 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1869 /* not thread safe */
1872 event_loop(int flags)
1874 return event_base_loop(current_base, flags);
1878 event_base_loop(struct event_base *base, int flags)
1880 const struct eventop *evsel = base->evsel;
1882 struct timeval *tv_p;
1883 int res, done, retval = 0;
1885 /* Grab the lock. We will release it inside evsel.dispatch, and again
1886 * as we invoke user callbacks. */
1887 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1889 if (base->running_loop) {
1890 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1891 " can run on each event_base at once.", __func__);
1892 EVBASE_RELEASE_LOCK(base, th_base_lock);
1896 base->running_loop = 1;
1898 clear_time_cache(base);
1900 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1901 evsig_set_base_(base);
1905 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1906 base->th_owner_id = EVTHREAD_GET_ID();
1909 base->event_gotterm = base->event_break = 0;
1912 base->event_continue = 0;
1913 base->n_deferreds_queued = 0;
1915 /* Terminate the loop if we have been asked to */
1916 if (base->event_gotterm) {
1920 if (base->event_break) {
1925 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1926 timeout_next(base, &tv_p);
1929 * if we have active events, we just poll new events
1932 evutil_timerclear(&tv);
1935 /* If we have no events, we just exit */
1936 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1937 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1938 event_debug(("%s: no events registered.", __func__));
1943 event_queue_make_later_events_active(base);
1945 clear_time_cache(base);
1947 res = evsel->dispatch(base, tv_p);
1950 event_debug(("%s: dispatch returned unsuccessfully.",
1956 update_time_cache(base);
1958 timeout_process(base);
1960 if (N_ACTIVE_CALLBACKS(base)) {
1961 int n = event_process_active(base);
1962 if ((flags & EVLOOP_ONCE)
1963 && N_ACTIVE_CALLBACKS(base) == 0
1966 } else if (flags & EVLOOP_NONBLOCK)
1969 event_debug(("%s: asked to terminate loop.", __func__));
1972 clear_time_cache(base);
1973 base->running_loop = 0;
1975 EVBASE_RELEASE_LOCK(base, th_base_lock);
1980 /* One-time callback to implement event_base_once: invokes the user callback,
1981 * then deletes the allocated storage */
1983 event_once_cb(evutil_socket_t fd, short events, void *arg)
1985 struct event_once *eonce = arg;
1987 (*eonce->cb)(fd, events, eonce->arg);
1988 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1989 LIST_REMOVE(eonce, next_once);
1990 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1991 event_debug_unassign(&eonce->ev);
1995 /* not threadsafe, event scheduled once. */
1997 event_once(evutil_socket_t fd, short events,
1998 void (*callback)(evutil_socket_t, short, void *),
1999 void *arg, const struct timeval *tv)
2001 return event_base_once(current_base, fd, events, callback, arg, tv);
2004 /* Schedules an event once */
2006 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2007 void (*callback)(evutil_socket_t, short, void *),
2008 void *arg, const struct timeval *tv)
2010 struct event_once *eonce;
2014 /* We cannot support signals that just fire once, or persistent
2016 if (events & (EV_SIGNAL|EV_PERSIST))
2019 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2022 eonce->cb = callback;
2025 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2026 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2028 if (tv == NULL || ! evutil_timerisset(tv)) {
2029 /* If the event is going to become active immediately,
2030 * don't put it on the timeout queue. This is one
2031 * idiom for scheduling a callback, so let's make
2032 * it fast (and order-preserving). */
2035 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2036 events &= EV_READ|EV_WRITE|EV_CLOSED;
2038 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2040 /* Bad event combination */
2046 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2048 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2050 res = event_add_nolock_(&eonce->ev, tv, 0);
2056 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2058 EVBASE_RELEASE_LOCK(base, th_base_lock);
2065 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2068 base = current_base;
2069 if (arg == &event_self_cbarg_ptr_)
2072 event_debug_assert_not_added_(ev);
2076 ev->ev_callback = callback;
2079 ev->ev_events = events;
2081 ev->ev_flags = EVLIST_INIT;
2083 ev->ev_pncalls = NULL;
2085 if (events & EV_SIGNAL) {
2086 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2087 event_warnx("%s: EV_SIGNAL is not compatible with "
2088 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2091 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2093 if (events & EV_PERSIST) {
2094 evutil_timerclear(&ev->ev_io_timeout);
2095 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2097 ev->ev_closure = EV_CLOSURE_EVENT;
2101 min_heap_elem_init_(ev);
2104 /* by default, we put new events into the middle priority */
2105 ev->ev_pri = base->nactivequeues / 2;
2108 event_debug_note_setup_(ev);
2114 event_base_set(struct event_base *base, struct event *ev)
2116 /* Only innocent events may be assigned to a different base */
2117 if (ev->ev_flags != EVLIST_INIT)
2120 event_debug_assert_is_setup_(ev);
2123 ev->ev_pri = base->nactivequeues/2;
2129 event_set(struct event *ev, evutil_socket_t fd, short events,
2130 void (*callback)(evutil_socket_t, short, void *), void *arg)
2133 r = event_assign(ev, current_base, fd, events, callback, arg);
2134 EVUTIL_ASSERT(r == 0);
2138 event_self_cbarg(void)
2140 return &event_self_cbarg_ptr_;
2144 event_base_get_running_event(struct event_base *base)
2146 struct event *ev = NULL;
2147 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2148 if (EVBASE_IN_THREAD(base)) {
2149 struct event_callback *evcb = base->current_event;
2150 if (evcb->evcb_flags & EVLIST_INIT)
2151 ev = event_callback_to_event(evcb);
2153 EVBASE_RELEASE_LOCK(base, th_base_lock);
2158 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2161 ev = mm_malloc(sizeof(struct event));
2164 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2173 event_free(struct event *ev)
2175 /* This is disabled, so that events which have been finalized be a
2176 * valid target for event_free(). That's */
2177 // event_debug_assert_is_setup_(ev);
2179 /* make sure that this event won't be coming back to haunt us. */
2181 event_debug_note_teardown_(ev);
2187 event_debug_unassign(struct event *ev)
2189 event_debug_assert_not_added_(ev);
2190 event_debug_note_teardown_(ev);
2192 ev->ev_flags &= ~EVLIST_INIT;
2195 #define EVENT_FINALIZE_FREE_ 0x10000
2197 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2199 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2200 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2202 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2203 ev->ev_closure = closure;
2204 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2205 event_active_nolock_(ev, EV_FINALIZE, 1);
2206 ev->ev_flags |= EVLIST_FINALIZING;
2211 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2214 struct event_base *base = ev->ev_base;
2215 if (EVUTIL_FAILURE_CHECK(!base)) {
2216 event_warnx("%s: event has no event_base set.", __func__);
2220 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2221 r = event_finalize_nolock_(base, flags, ev, cb);
2222 EVBASE_RELEASE_LOCK(base, th_base_lock);
2227 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2229 return event_finalize_impl_(flags, ev, cb);
2233 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2235 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2239 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2241 struct event *ev = NULL;
2242 if (evcb->evcb_flags & EVLIST_INIT) {
2243 ev = event_callback_to_event(evcb);
2244 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2246 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2249 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2250 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2251 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2252 evcb->evcb_flags |= EVLIST_FINALIZING;
2256 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2258 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2259 event_callback_finalize_nolock_(base, flags, evcb, cb);
2260 EVBASE_RELEASE_LOCK(base, th_base_lock);
2263 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2264 * callback will be invoked on *one of them*, after they have *all* been
2267 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2269 int n_pending = 0, i;
2272 base = current_base;
2274 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2276 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2278 /* At most one can be currently executing; the rest we just
2279 * cancel... But we always make sure that the finalize callback
2281 for (i = 0; i < n_cbs; ++i) {
2282 struct event_callback *evcb = evcbs[i];
2283 if (evcb == base->current_event) {
2284 event_callback_finalize_nolock_(base, 0, evcb, cb);
2287 event_callback_cancel_nolock_(base, evcb, 0);
2291 if (n_pending == 0) {
2292 /* Just do the first one. */
2293 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2296 EVBASE_RELEASE_LOCK(base, th_base_lock);
2301 * Set's the priority of an event - if an event is already scheduled
2302 * changing the priority is going to fail.
2306 event_priority_set(struct event *ev, int pri)
2308 event_debug_assert_is_setup_(ev);
2310 if (ev->ev_flags & EVLIST_ACTIVE)
2312 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2321 * Checks if a specific event is pending or scheduled.
2325 event_pending(const struct event *ev, short event, struct timeval *tv)
2329 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2330 event_warnx("%s: event has no event_base set.", __func__);
2334 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2335 event_debug_assert_is_setup_(ev);
2337 if (ev->ev_flags & EVLIST_INSERTED)
2338 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2339 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2340 flags |= ev->ev_res;
2341 if (ev->ev_flags & EVLIST_TIMEOUT)
2342 flags |= EV_TIMEOUT;
2344 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2346 /* See if there is a timeout that we should report */
2347 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2348 struct timeval tmp = ev->ev_timeout;
2349 tmp.tv_usec &= MICROSECONDS_MASK;
2350 /* correctly remamp to real time */
2351 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2354 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2356 return (flags & event);
2360 event_initialized(const struct event *ev)
2362 if (!(ev->ev_flags & EVLIST_INIT))
2369 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2371 event_debug_assert_is_setup_(event);
2374 *base_out = event->ev_base;
2376 *fd_out = event->ev_fd;
2378 *events_out = event->ev_events;
2380 *callback_out = event->ev_callback;
2382 *arg_out = event->ev_arg;
2386 event_get_struct_event_size(void)
2388 return sizeof(struct event);
2392 event_get_fd(const struct event *ev)
2394 event_debug_assert_is_setup_(ev);
2399 event_get_base(const struct event *ev)
2401 event_debug_assert_is_setup_(ev);
2406 event_get_events(const struct event *ev)
2408 event_debug_assert_is_setup_(ev);
2409 return ev->ev_events;
2413 event_get_callback(const struct event *ev)
2415 event_debug_assert_is_setup_(ev);
2416 return ev->ev_callback;
2420 event_get_callback_arg(const struct event *ev)
2422 event_debug_assert_is_setup_(ev);
2427 event_get_priority(const struct event *ev)
2429 event_debug_assert_is_setup_(ev);
2434 event_add(struct event *ev, const struct timeval *tv)
2438 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2439 event_warnx("%s: event has no event_base set.", __func__);
2443 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2445 res = event_add_nolock_(ev, tv, 0);
2447 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2452 /* Helper callback: wake an event_base from another thread. This version
2453 * works by writing a byte to one end of a socketpair, so that the event_base
2454 * listening on the other end will wake up as the corresponding event
2457 evthread_notify_base_default(struct event_base *base)
2463 r = send(base->th_notify_fd[1], buf, 1, 0);
2465 r = write(base->th_notify_fd[1], buf, 1);
2467 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2470 #ifdef EVENT__HAVE_EVENTFD
2471 /* Helper callback: wake an event_base from another thread. This version
2472 * assumes that you have a working eventfd() implementation. */
2474 evthread_notify_base_eventfd(struct event_base *base)
2476 ev_uint64_t msg = 1;
2479 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2480 } while (r < 0 && errno == EAGAIN);
2482 return (r < 0) ? -1 : 0;
2487 /** Tell the thread currently running the event_loop for base (if any) that it
2488 * needs to stop waiting in its dispatch function (if it is) and process all
2489 * active callbacks. */
2491 evthread_notify_base(struct event_base *base)
2493 EVENT_BASE_ASSERT_LOCKED(base);
2494 if (!base->th_notify_fn)
2496 if (base->is_notify_pending)
2498 base->is_notify_pending = 1;
2499 return base->th_notify_fn(base);
2502 /* Implementation function to remove a timeout on a currently pending event.
2505 event_remove_timer_nolock_(struct event *ev)
2507 struct event_base *base = ev->ev_base;
2509 EVENT_BASE_ASSERT_LOCKED(base);
2510 event_debug_assert_is_setup_(ev);
2512 event_debug(("event_remove_timer_nolock: event: %p", ev));
2514 /* If it's not pending on a timeout, we don't need to do anything. */
2515 if (ev->ev_flags & EVLIST_TIMEOUT) {
2516 event_queue_remove_timeout(base, ev);
2517 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2524 event_remove_timer(struct event *ev)
2528 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2529 event_warnx("%s: event has no event_base set.", __func__);
2533 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2535 res = event_remove_timer_nolock_(ev);
2537 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2542 /* Implementation function to add an event. Works just like event_add,
2543 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2544 * we treat tv as an absolute time, not as an interval to add to the current
2547 event_add_nolock_(struct event *ev, const struct timeval *tv,
2550 struct event_base *base = ev->ev_base;
2554 EVENT_BASE_ASSERT_LOCKED(base);
2555 event_debug_assert_is_setup_(ev);
2558 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2560 EV_SOCK_ARG(ev->ev_fd),
2561 ev->ev_events & EV_READ ? "EV_READ " : " ",
2562 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2563 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2564 tv ? "EV_TIMEOUT " : " ",
2567 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2569 if (ev->ev_flags & EVLIST_FINALIZING) {
2575 * prepare for timeout insertion further below, if we get a
2576 * failure on any step, we should not change any state.
2578 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2579 if (min_heap_reserve_(&base->timeheap,
2580 1 + min_heap_size_(&base->timeheap)) == -1)
2581 return (-1); /* ENOMEM == errno */
2584 /* If the main thread is currently executing a signal event's
2585 * callback, and we are not the main thread, then we want to wait
2586 * until the callback is done before we mess with the event, or else
2587 * we can race on ev_ncalls and ev_pncalls below. */
2588 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2589 if (base->current_event == event_to_event_callback(ev) &&
2590 (ev->ev_events & EV_SIGNAL)
2591 && !EVBASE_IN_THREAD(base)) {
2592 ++base->current_event_waiters;
2593 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2597 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2598 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2599 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2600 res = evmap_io_add_(base, ev->ev_fd, ev);
2601 else if (ev->ev_events & EV_SIGNAL)
2602 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2604 event_queue_insert_inserted(base, ev);
2606 /* evmap says we need to notify the main thread. */
2613 * we should change the timeout state only if the previous event
2614 * addition succeeded.
2616 if (res != -1 && tv != NULL) {
2619 #ifdef USE_REINSERT_TIMEOUT
2621 int old_timeout_idx;
2625 * for persistent timeout events, we remember the
2626 * timeout value and re-add the event.
2628 * If tv_is_absolute, this was already set.
2630 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2631 ev->ev_io_timeout = *tv;
2633 #ifndef USE_REINSERT_TIMEOUT
2634 if (ev->ev_flags & EVLIST_TIMEOUT) {
2635 event_queue_remove_timeout(base, ev);
2639 /* Check if it is active due to a timeout. Rescheduling
2640 * this timeout before the callback can be executed
2641 * removes it from the active list. */
2642 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2643 (ev->ev_res & EV_TIMEOUT)) {
2644 if (ev->ev_events & EV_SIGNAL) {
2645 /* See if we are just active executing
2646 * this event in a loop
2648 if (ev->ev_ncalls && ev->ev_pncalls) {
2650 *ev->ev_pncalls = 0;
2654 event_queue_remove_active(base, event_to_event_callback(ev));
2657 gettime(base, &now);
2659 common_timeout = is_common_timeout(tv, base);
2660 #ifdef USE_REINSERT_TIMEOUT
2661 was_common = is_common_timeout(&ev->ev_timeout, base);
2662 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2665 if (tv_is_absolute) {
2666 ev->ev_timeout = *tv;
2667 } else if (common_timeout) {
2668 struct timeval tmp = *tv;
2669 tmp.tv_usec &= MICROSECONDS_MASK;
2670 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2671 ev->ev_timeout.tv_usec |=
2672 (tv->tv_usec & ~MICROSECONDS_MASK);
2674 evutil_timeradd(&now, tv, &ev->ev_timeout);
2678 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2679 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2681 #ifdef USE_REINSERT_TIMEOUT
2682 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2684 event_queue_insert_timeout(base, ev);
2687 if (common_timeout) {
2688 struct common_timeout_list *ctl =
2689 get_common_timeout_list(base, &ev->ev_timeout);
2690 if (ev == TAILQ_FIRST(&ctl->events)) {
2691 common_timeout_schedule(ctl, &now, ev);
2694 struct event* top = NULL;
2695 /* See if the earliest timeout is now earlier than it
2696 * was before: if so, we will need to tell the main
2697 * thread to wake up earlier than it would otherwise.
2698 * We double check the timeout of the top element to
2699 * handle time distortions due to system suspension.
2701 if (min_heap_elt_is_top_(ev))
2703 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2704 evutil_timercmp(&top->ev_timeout, &now, <))
2709 /* if we are not in the right thread, we need to wake up the loop */
2710 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2711 evthread_notify_base(base);
2713 event_debug_note_add_(ev);
2719 event_del_(struct event *ev, int blocking)
2723 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2724 event_warnx("%s: event has no event_base set.", __func__);
2728 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2730 res = event_del_nolock_(ev, blocking);
2732 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2738 event_del(struct event *ev)
2740 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2744 event_del_block(struct event *ev)
2746 return event_del_(ev, EVENT_DEL_BLOCK);
2750 event_del_noblock(struct event *ev)
2752 return event_del_(ev, EVENT_DEL_NOBLOCK);
2755 /** Helper for event_del: always called with th_base_lock held.
2757 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2758 * EVEN_IF_FINALIZING} values. See those for more information.
2761 event_del_nolock_(struct event *ev, int blocking)
2763 struct event_base *base;
2764 int res = 0, notify = 0;
2766 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2767 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2769 /* An event without a base has not been added */
2770 if (ev->ev_base == NULL)
2773 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2775 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2776 if (ev->ev_flags & EVLIST_FINALIZING) {
2782 /* If the main thread is currently executing this event's callback,
2783 * and we are not the main thread, then we want to wait until the
2784 * callback is done before we start removing the event. That way,
2785 * when this function returns, it will be safe to free the
2786 * user-supplied argument. */
2788 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2789 if (blocking != EVENT_DEL_NOBLOCK &&
2790 base->current_event == event_to_event_callback(ev) &&
2791 !EVBASE_IN_THREAD(base) &&
2792 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2793 ++base->current_event_waiters;
2794 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2798 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2800 /* See if we are just active executing this event in a loop */
2801 if (ev->ev_events & EV_SIGNAL) {
2802 if (ev->ev_ncalls && ev->ev_pncalls) {
2804 *ev->ev_pncalls = 0;
2808 if (ev->ev_flags & EVLIST_TIMEOUT) {
2809 /* NOTE: We never need to notify the main thread because of a
2810 * deleted timeout event: all that could happen if we don't is
2811 * that the dispatch loop might wake up too early. But the
2812 * point of notifying the main thread _is_ to wake up the
2813 * dispatch loop early anyway, so we wouldn't gain anything by
2816 event_queue_remove_timeout(base, ev);
2819 if (ev->ev_flags & EVLIST_ACTIVE)
2820 event_queue_remove_active(base, event_to_event_callback(ev));
2821 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2822 event_queue_remove_active_later(base, event_to_event_callback(ev));
2824 if (ev->ev_flags & EVLIST_INSERTED) {
2825 event_queue_remove_inserted(base, ev);
2826 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2827 res = evmap_io_del_(base, ev->ev_fd, ev);
2829 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2831 /* evmap says we need to notify the main thread. */
2837 /* if we are not in the right thread, we need to wake up the loop */
2838 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2839 evthread_notify_base(base);
2841 event_debug_note_del_(ev);
2847 event_active(struct event *ev, int res, short ncalls)
2849 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2850 event_warnx("%s: event has no event_base set.", __func__);
2854 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2856 event_debug_assert_is_setup_(ev);
2858 event_active_nolock_(ev, res, ncalls);
2860 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2865 event_active_nolock_(struct event *ev, int res, short ncalls)
2867 struct event_base *base;
2869 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2870 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2873 EVENT_BASE_ASSERT_LOCKED(base);
2875 if (ev->ev_flags & EVLIST_FINALIZING) {
2880 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2882 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2886 /* We get different kinds of events, add them together */
2889 case EVLIST_ACTIVE_LATER:
2897 if (ev->ev_pri < base->event_running_priority)
2898 base->event_continue = 1;
2900 if (ev->ev_events & EV_SIGNAL) {
2901 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2902 if (base->current_event == event_to_event_callback(ev) &&
2903 !EVBASE_IN_THREAD(base)) {
2904 ++base->current_event_waiters;
2905 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2908 ev->ev_ncalls = ncalls;
2909 ev->ev_pncalls = NULL;
2912 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2916 event_active_later_(struct event *ev, int res)
2918 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2919 event_active_later_nolock_(ev, res);
2920 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2924 event_active_later_nolock_(struct event *ev, int res)
2926 struct event_base *base = ev->ev_base;
2927 EVENT_BASE_ASSERT_LOCKED(base);
2929 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2930 /* We get different kinds of events, add them together */
2937 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2941 event_callback_activate_(struct event_base *base,
2942 struct event_callback *evcb)
2945 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2946 r = event_callback_activate_nolock_(base, evcb);
2947 EVBASE_RELEASE_LOCK(base, th_base_lock);
2952 event_callback_activate_nolock_(struct event_base *base,
2953 struct event_callback *evcb)
2957 if (evcb->evcb_flags & EVLIST_FINALIZING)
2960 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2963 case EVLIST_ACTIVE_LATER:
2964 event_queue_remove_active_later(base, evcb);
2973 event_queue_insert_active(base, evcb);
2975 if (EVBASE_NEED_NOTIFY(base))
2976 evthread_notify_base(base);
2982 event_callback_activate_later_nolock_(struct event_base *base,
2983 struct event_callback *evcb)
2985 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2988 event_queue_insert_active_later(base, evcb);
2989 if (EVBASE_NEED_NOTIFY(base))
2990 evthread_notify_base(base);
2995 event_callback_init_(struct event_base *base,
2996 struct event_callback *cb)
2998 memset(cb, 0, sizeof(*cb));
2999 cb->evcb_pri = base->nactivequeues - 1;
3003 event_callback_cancel_(struct event_base *base,
3004 struct event_callback *evcb)
3007 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3008 r = event_callback_cancel_nolock_(base, evcb, 0);
3009 EVBASE_RELEASE_LOCK(base, th_base_lock);
3014 event_callback_cancel_nolock_(struct event_base *base,
3015 struct event_callback *evcb, int even_if_finalizing)
3017 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3020 if (evcb->evcb_flags & EVLIST_INIT)
3021 return event_del_nolock_(event_callback_to_event(evcb),
3022 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3024 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3026 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3030 /* We get different kinds of events, add them together */
3031 event_queue_remove_active(base, evcb);
3033 case EVLIST_ACTIVE_LATER:
3034 event_queue_remove_active_later(base, evcb);
3044 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3046 memset(cb, 0, sizeof(*cb));
3047 cb->evcb_cb_union.evcb_selfcb = fn;
3049 cb->evcb_pri = priority;
3050 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3054 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3056 cb->evcb_pri = priority;
3060 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3063 base = current_base;
3064 event_callback_cancel_(base, cb);
3067 #define MAX_DEFERREDS_QUEUED 32
3069 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3073 base = current_base;
3074 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3075 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3076 r = event_callback_activate_later_nolock_(base, cb);
3078 r = event_callback_activate_nolock_(base, cb);
3080 ++base->n_deferreds_queued;
3083 EVBASE_RELEASE_LOCK(base, th_base_lock);
3088 timeout_next(struct event_base *base, struct timeval **tv_p)
3090 /* Caller must hold th_base_lock */
3093 struct timeval *tv = *tv_p;
3096 ev = min_heap_top_(&base->timeheap);
3099 /* if no time-based events are active wait for I/O */
3104 if (gettime(base, &now) == -1) {
3109 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3110 evutil_timerclear(tv);
3114 evutil_timersub(&ev->ev_timeout, &now, tv);
3116 EVUTIL_ASSERT(tv->tv_sec >= 0);
3117 EVUTIL_ASSERT(tv->tv_usec >= 0);
3118 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3124 /* Activate every event whose timeout has elapsed. */
3126 timeout_process(struct event_base *base)
3128 /* Caller must hold lock. */
3132 if (min_heap_empty_(&base->timeheap)) {
3136 gettime(base, &now);
3138 while ((ev = min_heap_top_(&base->timeheap))) {
3139 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3142 /* delete this event from the I/O queues */
3143 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3145 event_debug(("timeout_process: event: %p, call %p",
3146 ev, ev->ev_callback));
3147 event_active_nolock_(ev, EV_TIMEOUT, 1);
3151 #if (EVLIST_INTERNAL >> 4) != 1
3152 #error "Mismatch for value of EVLIST_INTERNAL"
3156 #define MAX(a,b) (((a)>(b))?(a):(b))
3159 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3161 /* These are a fancy way to spell
3162 if (flags & EVLIST_INTERNAL)
3163 base->event_count--/++;
3165 #define DECR_EVENT_COUNT(base,flags) \
3166 ((base)->event_count -= (~((flags) >> 4) & 1))
3167 #define INCR_EVENT_COUNT(base,flags) do { \
3168 ((base)->event_count += (~((flags) >> 4) & 1)); \
3169 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3173 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3175 EVENT_BASE_ASSERT_LOCKED(base);
3176 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3177 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3178 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3181 DECR_EVENT_COUNT(base, ev->ev_flags);
3182 ev->ev_flags &= ~EVLIST_INSERTED;
3185 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3187 EVENT_BASE_ASSERT_LOCKED(base);
3188 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3189 event_errx(1, "%s: %p not on queue %x", __func__,
3190 evcb, EVLIST_ACTIVE);
3193 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3194 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3195 base->event_count_active--;
3197 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3198 evcb, evcb_active_next);
3201 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3203 EVENT_BASE_ASSERT_LOCKED(base);
3204 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3205 event_errx(1, "%s: %p not on queue %x", __func__,
3206 evcb, EVLIST_ACTIVE_LATER);
3209 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3210 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3211 base->event_count_active--;
3213 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3216 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3218 EVENT_BASE_ASSERT_LOCKED(base);
3219 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3220 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3221 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3224 DECR_EVENT_COUNT(base, ev->ev_flags);
3225 ev->ev_flags &= ~EVLIST_TIMEOUT;
3227 if (is_common_timeout(&ev->ev_timeout, base)) {
3228 struct common_timeout_list *ctl =
3229 get_common_timeout_list(base, &ev->ev_timeout);
3230 TAILQ_REMOVE(&ctl->events, ev,
3231 ev_timeout_pos.ev_next_with_common_timeout);
3233 min_heap_erase_(&base->timeheap, ev);
3237 #ifdef USE_REINSERT_TIMEOUT
3238 /* Remove and reinsert 'ev' into the timeout queue. */
3240 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3241 int was_common, int is_common, int old_timeout_idx)
3243 struct common_timeout_list *ctl;
3244 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3245 event_queue_insert_timeout(base, ev);
3249 switch ((was_common<<1) | is_common) {
3250 case 3: /* Changing from one common timeout to another */
3251 ctl = base->common_timeout_queues[old_timeout_idx];
3252 TAILQ_REMOVE(&ctl->events, ev,
3253 ev_timeout_pos.ev_next_with_common_timeout);
3254 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3255 insert_common_timeout_inorder(ctl, ev);
3257 case 2: /* Was common; is no longer common */
3258 ctl = base->common_timeout_queues[old_timeout_idx];
3259 TAILQ_REMOVE(&ctl->events, ev,
3260 ev_timeout_pos.ev_next_with_common_timeout);
3261 min_heap_push_(&base->timeheap, ev);
3263 case 1: /* Wasn't common; has become common. */
3264 min_heap_erase_(&base->timeheap, ev);
3265 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3266 insert_common_timeout_inorder(ctl, ev);
3268 case 0: /* was in heap; is still on heap. */
3269 min_heap_adjust_(&base->timeheap, ev);
3272 EVUTIL_ASSERT(0); /* unreachable */
3278 /* Add 'ev' to the common timeout list in 'ev'. */
3280 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3284 /* By all logic, we should just be able to append 'ev' to the end of
3285 * ctl->events, since the timeout on each 'ev' is set to {the common
3286 * timeout} + {the time when we add the event}, and so the events
3287 * should arrive in order of their timeeouts. But just in case
3288 * there's some wacky threading issue going on, we do a search from
3289 * the end of 'ev' to find the right insertion point.
3291 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3292 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3293 /* This timercmp is a little sneaky, since both ev and e have
3294 * magic values in tv_usec. Fortunately, they ought to have
3295 * the _same_ magic values in tv_usec. Let's assert for that.
3298 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3299 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3300 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3301 ev_timeout_pos.ev_next_with_common_timeout);
3305 TAILQ_INSERT_HEAD(&ctl->events, ev,
3306 ev_timeout_pos.ev_next_with_common_timeout);
3310 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3312 EVENT_BASE_ASSERT_LOCKED(base);
3314 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3315 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3316 ev, EV_SOCK_ARG(ev->ev_fd));
3320 INCR_EVENT_COUNT(base, ev->ev_flags);
3322 ev->ev_flags |= EVLIST_INSERTED;
3326 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3328 EVENT_BASE_ASSERT_LOCKED(base);
3330 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3331 /* Double insertion is possible for active events */
3335 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3337 evcb->evcb_flags |= EVLIST_ACTIVE;
3339 base->event_count_active++;
3340 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3341 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3342 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3343 evcb, evcb_active_next);
3347 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3349 EVENT_BASE_ASSERT_LOCKED(base);
3350 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3351 /* Double insertion is possible */
3355 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3356 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3357 base->event_count_active++;
3358 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3359 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3360 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3364 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3366 EVENT_BASE_ASSERT_LOCKED(base);
3368 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3369 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3370 ev, EV_SOCK_ARG(ev->ev_fd));
3374 INCR_EVENT_COUNT(base, ev->ev_flags);
3376 ev->ev_flags |= EVLIST_TIMEOUT;
3378 if (is_common_timeout(&ev->ev_timeout, base)) {
3379 struct common_timeout_list *ctl =
3380 get_common_timeout_list(base, &ev->ev_timeout);
3381 insert_common_timeout_inorder(ctl, ev);
3383 min_heap_push_(&base->timeheap, ev);
3388 event_queue_make_later_events_active(struct event_base *base)
3390 struct event_callback *evcb;
3391 EVENT_BASE_ASSERT_LOCKED(base);
3393 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3394 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3395 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3396 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3397 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3398 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3402 /* Functions for debugging */
3405 event_get_version(void)
3407 return (EVENT__VERSION);
3411 event_get_version_number(void)
3413 return (EVENT__NUMERIC_VERSION);
3417 * No thread-safe interface needed - the information should be the same
3422 event_get_method(void)
3424 return (current_base->evsel->name);
3427 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3428 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3429 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3430 static void (*mm_free_fn_)(void *p) = NULL;
3433 event_mm_malloc_(size_t sz)
3439 return mm_malloc_fn_(sz);
3445 event_mm_calloc_(size_t count, size_t size)
3447 if (count == 0 || size == 0)
3450 if (mm_malloc_fn_) {
3451 size_t sz = count * size;
3453 if (count > EV_SIZE_MAX / size)
3455 p = mm_malloc_fn_(sz);
3457 return memset(p, 0, sz);
3459 void *p = calloc(count, size);
3461 /* Windows calloc doesn't reliably set ENOMEM */
3474 event_mm_strdup_(const char *str)
3481 if (mm_malloc_fn_) {
3482 size_t ln = strlen(str);
3484 if (ln == EV_SIZE_MAX)
3486 p = mm_malloc_fn_(ln+1);
3488 return memcpy(p, str, ln+1);
3491 return _strdup(str);
3502 event_mm_realloc_(void *ptr, size_t sz)
3505 return mm_realloc_fn_(ptr, sz);
3507 return realloc(ptr, sz);
3511 event_mm_free_(void *ptr)
3520 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3521 void *(*realloc_fn)(void *ptr, size_t sz),
3522 void (*free_fn)(void *ptr))
3524 mm_malloc_fn_ = malloc_fn;
3525 mm_realloc_fn_ = realloc_fn;
3526 mm_free_fn_ = free_fn;
3530 #ifdef EVENT__HAVE_EVENTFD
3532 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3536 struct event_base *base = arg;
3538 r = read(fd, (void*) &msg, sizeof(msg));
3539 if (r<0 && errno != EAGAIN) {
3540 event_sock_warn(fd, "Error reading from eventfd");
3542 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3543 base->is_notify_pending = 0;
3544 EVBASE_RELEASE_LOCK(base, th_base_lock);
3549 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3551 unsigned char buf[1024];
3552 struct event_base *base = arg;
3554 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3557 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3561 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3562 base->is_notify_pending = 0;
3563 EVBASE_RELEASE_LOCK(base, th_base_lock);
3567 evthread_make_base_notifiable(struct event_base *base)
3573 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3574 r = evthread_make_base_notifiable_nolock_(base);
3575 EVBASE_RELEASE_LOCK(base, th_base_lock);
3580 evthread_make_base_notifiable_nolock_(struct event_base *base)
3582 void (*cb)(evutil_socket_t, short, void *);
3583 int (*notify)(struct event_base *);
3585 if (base->th_notify_fn != NULL) {
3586 /* The base is already notifiable: we're doing fine. */
3590 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3591 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3592 base->th_notify_fn = event_kq_notify_base_;
3593 /* No need to add an event here; the backend can wake
3594 * itself up just fine. */
3599 #ifdef EVENT__HAVE_EVENTFD
3600 base->th_notify_fd[0] = evutil_eventfd_(0,
3601 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3602 if (base->th_notify_fd[0] >= 0) {
3603 base->th_notify_fd[1] = -1;
3604 notify = evthread_notify_base_eventfd;
3605 cb = evthread_notify_drain_eventfd;
3608 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3609 notify = evthread_notify_base_default;
3610 cb = evthread_notify_drain_default;
3615 base->th_notify_fn = notify;
3617 /* prepare an event that we can use for wakeup */
3618 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3619 EV_READ|EV_PERSIST, cb, base);
3621 /* we need to mark this as internal event */
3622 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3623 event_priority_set(&base->th_notify, 0);
3625 return event_add_nolock_(&base->th_notify, NULL, 0);
3629 event_base_foreach_event_nolock_(struct event_base *base,
3630 event_base_foreach_event_cb fn, void *arg)
3636 /* Start out with all the EVLIST_INSERTED events. */
3637 if ((r = evmap_foreach_event_(base, fn, arg)))
3640 /* Okay, now we deal with those events that have timeouts and are in
3642 for (u = 0; u < base->timeheap.n; ++u) {
3643 ev = base->timeheap.p[u];
3644 if (ev->ev_flags & EVLIST_INSERTED) {
3645 /* we already processed this one */
3648 if ((r = fn(base, ev, arg)))
3652 /* Now for the events in one of the timeout queues.
3654 for (i = 0; i < base->n_common_timeouts; ++i) {
3655 struct common_timeout_list *ctl =
3656 base->common_timeout_queues[i];
3657 TAILQ_FOREACH(ev, &ctl->events,
3658 ev_timeout_pos.ev_next_with_common_timeout) {
3659 if (ev->ev_flags & EVLIST_INSERTED) {
3660 /* we already processed this one */
3663 if ((r = fn(base, ev, arg)))
3668 /* Finally, we deal wit all the active events that we haven't touched
3670 for (i = 0; i < base->nactivequeues; ++i) {
3671 struct event_callback *evcb;
3672 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3673 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3674 /* This isn't an event (evlist_init clear), or
3675 * we already processed it. (inserted or
3679 ev = event_callback_to_event(evcb);
3680 if ((r = fn(base, ev, arg)))
3688 /* Helper for event_base_dump_events: called on each event in the event base;
3689 * dumps only the inserted events. */
3691 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3694 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3697 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3700 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3701 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3702 (e->ev_events&EV_READ)?" Read":"",
3703 (e->ev_events&EV_WRITE)?" Write":"",
3704 (e->ev_events&EV_CLOSED)?" EOF":"",
3705 (e->ev_events&EV_SIGNAL)?" Signal":"",
3706 (e->ev_events&EV_PERSIST)?" Persist":"",
3707 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3708 if (e->ev_flags & EVLIST_TIMEOUT) {
3710 tv.tv_sec = e->ev_timeout.tv_sec;
3711 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3712 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3713 fprintf(output, " Timeout=%ld.%06d",
3714 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3716 fputc('\n', output);
3721 /* Helper for event_base_dump_events: called on each event in the event base;
3722 * dumps only the active events. */
3724 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3727 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3730 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3733 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3734 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3735 (e->ev_res&EV_READ)?" Read":"",
3736 (e->ev_res&EV_WRITE)?" Write":"",
3737 (e->ev_res&EV_CLOSED)?" EOF":"",
3738 (e->ev_res&EV_SIGNAL)?" Signal":"",
3739 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3740 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3741 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3747 event_base_foreach_event(struct event_base *base,
3748 event_base_foreach_event_cb fn, void *arg)
3751 if ((!fn) || (!base)) {
3754 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3755 r = event_base_foreach_event_nolock_(base, fn, arg);
3756 EVBASE_RELEASE_LOCK(base, th_base_lock);
3762 event_base_dump_events(struct event_base *base, FILE *output)
3764 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3765 fprintf(output, "Inserted events:\n");
3766 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3768 fprintf(output, "Active events:\n");
3769 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3770 EVBASE_RELEASE_LOCK(base, th_base_lock);
3774 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3776 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3777 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3778 EVBASE_RELEASE_LOCK(base, th_base_lock);
3782 event_base_active_by_signal(struct event_base *base, int sig)
3784 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3785 evmap_signal_active_(base, sig, 1);
3786 EVBASE_RELEASE_LOCK(base, th_base_lock);
3791 event_base_add_virtual_(struct event_base *base)
3793 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3794 base->virtual_event_count++;
3795 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3796 EVBASE_RELEASE_LOCK(base, th_base_lock);
3800 event_base_del_virtual_(struct event_base *base)
3802 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3803 EVUTIL_ASSERT(base->virtual_event_count > 0);
3804 base->virtual_event_count--;
3805 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3806 evthread_notify_base(base);
3807 EVBASE_RELEASE_LOCK(base, th_base_lock);
3811 event_free_debug_globals_locks(void)
3813 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3814 #ifndef EVENT__DISABLE_DEBUG_MODE
3815 if (event_debug_map_lock_ != NULL) {
3816 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3817 event_debug_map_lock_ = NULL;
3818 evthreadimpl_disable_lock_debugging_();
3820 #endif /* EVENT__DISABLE_DEBUG_MODE */
3821 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3826 event_free_debug_globals(void)
3828 event_free_debug_globals_locks();
3832 event_free_evsig_globals(void)
3834 evsig_free_globals_();
3838 event_free_evutil_globals(void)
3840 evutil_free_globals_();
3844 event_free_globals(void)
3846 event_free_debug_globals();
3847 event_free_evsig_globals();
3848 event_free_evutil_globals();
3852 libevent_global_shutdown(void)
3854 event_disable_debug_mode();
3855 event_free_globals();
3858 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3860 event_global_setup_locks_(const int enable_locks)
3862 #ifndef EVENT__DISABLE_DEBUG_MODE
3863 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3865 if (evsig_global_setup_locks_(enable_locks) < 0)
3867 if (evutil_global_setup_locks_(enable_locks) < 0)
3869 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3876 event_base_assert_ok_(struct event_base *base)
3878 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3879 event_base_assert_ok_nolock_(base);
3880 EVBASE_RELEASE_LOCK(base, th_base_lock);
3884 event_base_assert_ok_nolock_(struct event_base *base)
3889 /* First do checks on the per-fd and per-signal lists */
3890 evmap_check_integrity_(base);
3892 /* Check the heap property */
3893 for (i = 1; i < (int)base->timeheap.n; ++i) {
3894 int parent = (i - 1) / 2;
3895 struct event *ev, *p_ev;
3896 ev = base->timeheap.p[i];
3897 p_ev = base->timeheap.p[parent];
3898 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3899 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3900 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3903 /* Check that the common timeouts are fine */
3904 for (i = 0; i < base->n_common_timeouts; ++i) {
3905 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3906 struct event *last=NULL, *ev;
3908 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3910 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3912 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3913 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3914 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3915 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3920 /* Check the active queues. */
3922 for (i = 0; i < base->nactivequeues; ++i) {
3923 struct event_callback *evcb;
3924 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3925 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3926 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3927 EVUTIL_ASSERT(evcb->evcb_pri == i);
3933 struct event_callback *evcb;
3934 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3935 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3939 EVUTIL_ASSERT(count == base->event_count_active);