2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
30 #ifdef CONFIG_ELOOP_POLL
32 #endif /* CONFIG_ELOOP_POLL */
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
46 eloop_sock_handler handler;
52 struct eloop_timeout {
54 struct os_reltime time;
57 eloop_timeout_handler handler;
66 eloop_signal_handler handler;
70 struct eloop_sock_table {
72 struct eloop_sock *table;
73 eloop_event_type type;
80 int count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 int max_pollfd_map; /* number of pollfds_map currently allocated */
83 int max_poll_fds; /* number of pollfds currently allocated */
84 struct pollfd *pollfds;
85 struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
89 struct eloop_sock *fd_table;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
93 int epoll_max_event_num;
94 struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
99 struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 struct eloop_sock_table readers;
102 struct eloop_sock_table writers;
103 struct eloop_sock_table exceptions;
105 struct dl_list timeout;
108 struct eloop_signal *signals;
110 int pending_terminate;
115 static struct eloop_data eloop;
120 static void eloop_sigsegv_handler(int sig)
122 wpa_trace_show("eloop SIGSEGV");
126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
129 if (table == NULL || table->table == NULL)
131 for (i = 0; i < table->count; i++) {
132 wpa_trace_add_ref(&table->table[i], eloop,
133 table->table[i].eloop_data);
134 wpa_trace_add_ref(&table->table[i], user,
135 table->table[i].user_data);
140 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
143 if (table == NULL || table->table == NULL)
145 for (i = 0; i < table->count; i++) {
146 wpa_trace_remove_ref(&table->table[i], eloop,
147 table->table[i].eloop_data);
148 wpa_trace_remove_ref(&table->table[i], user,
149 table->table[i].user_data);
153 #else /* WPA_TRACE */
155 #define eloop_trace_sock_add_ref(table) do { } while (0)
156 #define eloop_trace_sock_remove_ref(table) do { } while (0)
158 #endif /* WPA_TRACE */
163 os_memset(&eloop, 0, sizeof(eloop));
164 dl_list_init(&eloop.timeout);
165 #ifdef CONFIG_ELOOP_EPOLL
166 eloop.epollfd = epoll_create1(0);
167 if (eloop.epollfd < 0) {
168 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
169 __func__, strerror(errno));
172 #endif /* CONFIG_ELOOP_EPOLL */
173 #ifdef CONFIG_ELOOP_KQUEUE
174 eloop.kqueuefd = kqueue();
175 if (eloop.kqueuefd < 0) {
176 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
177 __func__, strerror(errno));
180 #endif /* CONFIG_ELOOP_KQUEUE */
181 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
182 eloop.readers.type = EVENT_TYPE_READ;
183 eloop.writers.type = EVENT_TYPE_WRITE;
184 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
185 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
187 signal(SIGSEGV, eloop_sigsegv_handler);
188 #endif /* WPA_TRACE */
193 #ifdef CONFIG_ELOOP_EPOLL
194 static int eloop_sock_queue(int sock, eloop_event_type type)
196 struct epoll_event ev;
198 os_memset(&ev, 0, sizeof(ev));
200 case EVENT_TYPE_READ:
203 case EVENT_TYPE_WRITE:
204 ev.events = EPOLLOUT;
207 * Exceptions are always checked when using epoll, but I suppose it's
208 * possible that someone registered a socket *only* for exception
211 case EVENT_TYPE_EXCEPTION:
212 ev.events = EPOLLERR | EPOLLHUP;
216 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
217 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
218 __func__, sock, strerror(errno));
223 #endif /* CONFIG_ELOOP_EPOLL */
226 #ifdef CONFIG_ELOOP_KQUEUE
228 static short event_type_kevent_filter(eloop_event_type type)
231 case EVENT_TYPE_READ:
233 case EVENT_TYPE_WRITE:
241 static int eloop_sock_queue(int sock, eloop_event_type type)
245 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
246 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
247 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
248 __func__, sock, strerror(errno));
254 #endif /* CONFIG_ELOOP_KQUEUE */
257 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
258 int sock, eloop_sock_handler handler,
259 void *eloop_data, void *user_data)
261 #ifdef CONFIG_ELOOP_EPOLL
262 struct epoll_event *temp_events;
263 #endif /* CONFIG_ELOOP_EPOLL */
264 #ifdef CONFIG_ELOOP_KQUEUE
265 struct kevent *temp_events;
266 #endif /* CONFIG_ELOOP_EPOLL */
267 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
268 struct eloop_sock *temp_table;
270 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
271 struct eloop_sock *tmp;
274 if (sock > eloop.max_sock)
277 new_max_sock = eloop.max_sock;
282 #ifdef CONFIG_ELOOP_POLL
283 if (new_max_sock >= eloop.max_pollfd_map) {
284 struct pollfd **nmap;
285 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
286 sizeof(struct pollfd *));
290 eloop.max_pollfd_map = new_max_sock + 50;
291 eloop.pollfds_map = nmap;
294 if (eloop.count + 1 > eloop.max_poll_fds) {
296 int nmax = eloop.count + 1 + 50;
297 n = os_realloc_array(eloop.pollfds, nmax,
298 sizeof(struct pollfd));
302 eloop.max_poll_fds = nmax;
305 #endif /* CONFIG_ELOOP_POLL */
306 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
307 if (new_max_sock >= eloop.max_fd) {
308 next = new_max_sock + 16;
309 temp_table = os_realloc_array(eloop.fd_table, next,
310 sizeof(struct eloop_sock));
311 if (temp_table == NULL)
315 eloop.fd_table = temp_table;
317 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
319 #ifdef CONFIG_ELOOP_EPOLL
320 if (eloop.count + 1 > eloop.epoll_max_event_num) {
321 next = eloop.epoll_max_event_num == 0 ? 8 :
322 eloop.epoll_max_event_num * 2;
323 temp_events = os_realloc_array(eloop.epoll_events, next,
324 sizeof(struct epoll_event));
325 if (temp_events == NULL) {
326 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
327 __func__, strerror(errno));
331 eloop.epoll_max_event_num = next;
332 eloop.epoll_events = temp_events;
334 #endif /* CONFIG_ELOOP_EPOLL */
335 #ifdef CONFIG_ELOOP_KQUEUE
336 if (eloop.count + 1 > eloop.kqueue_nevents) {
337 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
338 temp_events = os_malloc(next * sizeof(*temp_events));
340 wpa_printf(MSG_ERROR,
341 "%s: malloc for kqueue failed: %s",
342 __func__, strerror(errno));
346 os_free(eloop.kqueue_events);
347 eloop.kqueue_events = temp_events;
348 eloop.kqueue_nevents = next;
350 #endif /* CONFIG_ELOOP_KQUEUE */
352 eloop_trace_sock_remove_ref(table);
353 tmp = os_realloc_array(table->table, table->count + 1,
354 sizeof(struct eloop_sock));
356 eloop_trace_sock_add_ref(table);
360 tmp[table->count].sock = sock;
361 tmp[table->count].eloop_data = eloop_data;
362 tmp[table->count].user_data = user_data;
363 tmp[table->count].handler = handler;
364 wpa_trace_record(&tmp[table->count]);
367 eloop.max_sock = new_max_sock;
370 eloop_trace_sock_add_ref(table);
372 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
373 if (eloop_sock_queue(sock, table->type) < 0)
375 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
376 sizeof(struct eloop_sock));
377 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
382 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
385 #ifdef CONFIG_ELOOP_KQUEUE
387 #endif /* CONFIG_ELOOP_KQUEUE */
390 if (table == NULL || table->table == NULL || table->count == 0)
393 for (i = 0; i < table->count; i++) {
394 if (table->table[i].sock == sock)
397 if (i == table->count)
399 eloop_trace_sock_remove_ref(table);
400 if (i != table->count - 1) {
401 os_memmove(&table->table[i], &table->table[i + 1],
402 (table->count - i - 1) *
403 sizeof(struct eloop_sock));
408 eloop_trace_sock_add_ref(table);
409 #ifdef CONFIG_ELOOP_EPOLL
410 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
411 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
412 __func__, sock, strerror(errno));
415 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
416 #endif /* CONFIG_ELOOP_EPOLL */
417 #ifdef CONFIG_ELOOP_KQUEUE
418 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
420 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
421 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
422 __func__, sock, strerror(errno));
425 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
426 #endif /* CONFIG_ELOOP_KQUEUE */
430 #ifdef CONFIG_ELOOP_POLL
432 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
434 if (fd < mx && fd >= 0)
435 return pollfds_map[fd];
440 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
441 struct eloop_sock_table *writers,
442 struct eloop_sock_table *exceptions,
443 struct pollfd *pollfds,
444 struct pollfd **pollfds_map,
452 /* Clear pollfd lookup map. It will be re-populated below. */
453 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
455 if (readers && readers->table) {
456 for (i = 0; i < readers->count; i++) {
457 fd = readers->table[i].sock;
458 assert(fd >= 0 && fd < max_pollfd_map);
459 pollfds[nxt].fd = fd;
460 pollfds[nxt].events = POLLIN;
461 pollfds[nxt].revents = 0;
462 pollfds_map[fd] = &(pollfds[nxt]);
467 if (writers && writers->table) {
468 for (i = 0; i < writers->count; i++) {
470 * See if we already added this descriptor, update it
473 fd = writers->table[i].sock;
474 assert(fd >= 0 && fd < max_pollfd_map);
475 pfd = pollfds_map[fd];
477 pfd = &(pollfds[nxt]);
480 pollfds[i].revents = 0;
481 pollfds_map[fd] = pfd;
484 pfd->events |= POLLOUT;
489 * Exceptions are always checked when using poll, but I suppose it's
490 * possible that someone registered a socket *only* for exception
491 * handling. Set the POLLIN bit in this case.
493 if (exceptions && exceptions->table) {
494 for (i = 0; i < exceptions->count; i++) {
496 * See if we already added this descriptor, just use it
499 fd = exceptions->table[i].sock;
500 assert(fd >= 0 && fd < max_pollfd_map);
501 pfd = pollfds_map[fd];
503 pfd = &(pollfds[nxt]);
504 pfd->events = POLLIN;
506 pollfds[i].revents = 0;
507 pollfds_map[fd] = pfd;
517 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
518 struct pollfd **pollfds_map,
525 if (!table || !table->table)
529 for (i = 0; i < table->count; i++) {
530 pfd = find_pollfd(pollfds_map, table->table[i].sock,
535 if (!(pfd->revents & revents))
538 table->table[i].handler(table->table[i].sock,
539 table->table[i].eloop_data,
540 table->table[i].user_data);
549 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
550 struct eloop_sock_table *writers,
551 struct eloop_sock_table *exceptions,
552 struct pollfd **pollfds_map,
555 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
556 max_pollfd_map, POLLIN | POLLERR |
558 return; /* pollfds may be invalid at this point */
560 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
561 max_pollfd_map, POLLOUT))
562 return; /* pollfds may be invalid at this point */
564 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
565 max_pollfd_map, POLLERR | POLLHUP);
568 #endif /* CONFIG_ELOOP_POLL */
570 #ifdef CONFIG_ELOOP_SELECT
572 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
579 if (table->table == NULL)
582 for (i = 0; i < table->count; i++) {
583 assert(table->table[i].sock >= 0);
584 FD_SET(table->table[i].sock, fds);
589 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
594 if (table == NULL || table->table == NULL)
598 for (i = 0; i < table->count; i++) {
599 if (FD_ISSET(table->table[i].sock, fds)) {
600 table->table[i].handler(table->table[i].sock,
601 table->table[i].eloop_data,
602 table->table[i].user_data);
609 #endif /* CONFIG_ELOOP_SELECT */
612 #ifdef CONFIG_ELOOP_EPOLL
613 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
615 struct eloop_sock *table;
618 for (i = 0; i < nfds; i++) {
619 table = &eloop.fd_table[events[i].data.fd];
620 if (table->handler == NULL)
622 table->handler(table->sock, table->eloop_data,
624 if (eloop.readers.changed ||
625 eloop.writers.changed ||
626 eloop.exceptions.changed)
630 #endif /* CONFIG_ELOOP_EPOLL */
633 #ifdef CONFIG_ELOOP_KQUEUE
635 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
637 struct eloop_sock *table;
640 for (i = 0; i < nfds; i++) {
641 table = &eloop.fd_table[events[i].ident];
642 if (table->handler == NULL)
644 table->handler(table->sock, table->eloop_data,
646 if (eloop.readers.changed ||
647 eloop.writers.changed ||
648 eloop.exceptions.changed)
654 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
659 for (i = 0; i < table->count && table->table; i++) {
660 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
666 #endif /* CONFIG_ELOOP_KQUEUE */
669 int eloop_sock_requeue(void)
673 #ifdef CONFIG_ELOOP_KQUEUE
674 close(eloop.kqueuefd);
675 eloop.kqueuefd = kqueue();
676 if (eloop.kqueuefd < 0) {
677 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
678 __func__, strerror(errno));
682 if (eloop_sock_table_requeue(&eloop.readers) < 0)
684 if (eloop_sock_table_requeue(&eloop.writers) < 0)
686 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
688 #endif /* CONFIG_ELOOP_KQUEUE */
694 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
698 for (i = 0; i < table->count && table->table; i++) {
699 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
700 "sock=%d eloop_data=%p user_data=%p "
702 table->table[i].sock,
703 table->table[i].eloop_data,
704 table->table[i].user_data,
705 table->table[i].handler);
706 wpa_trace_dump_funcname("eloop unregistered socket "
708 table->table[i].handler);
709 wpa_trace_dump("eloop sock", &table->table[i]);
711 os_free(table->table);
716 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
717 void *eloop_data, void *user_data)
719 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
720 eloop_data, user_data);
724 void eloop_unregister_read_sock(int sock)
726 eloop_unregister_sock(sock, EVENT_TYPE_READ);
730 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
733 case EVENT_TYPE_READ:
734 return &eloop.readers;
735 case EVENT_TYPE_WRITE:
736 return &eloop.writers;
737 case EVENT_TYPE_EXCEPTION:
738 return &eloop.exceptions;
745 int eloop_register_sock(int sock, eloop_event_type type,
746 eloop_sock_handler handler,
747 void *eloop_data, void *user_data)
749 struct eloop_sock_table *table;
752 table = eloop_get_sock_table(type);
753 return eloop_sock_table_add_sock(table, sock, handler,
754 eloop_data, user_data);
758 void eloop_unregister_sock(int sock, eloop_event_type type)
760 struct eloop_sock_table *table;
762 table = eloop_get_sock_table(type);
763 eloop_sock_table_remove_sock(table, sock);
767 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
768 eloop_timeout_handler handler,
769 void *eloop_data, void *user_data)
771 struct eloop_timeout *timeout, *tmp;
774 timeout = os_zalloc(sizeof(*timeout));
777 if (os_get_reltime(&timeout->time) < 0) {
781 now_sec = timeout->time.sec;
782 timeout->time.sec += secs;
783 if (timeout->time.sec < now_sec) {
785 * Integer overflow - assume long enough timeout to be assumed
786 * to be infinite, i.e., the timeout would never happen.
788 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
789 "ever happen - ignore it", secs);
793 timeout->time.usec += usecs;
794 while (timeout->time.usec >= 1000000) {
796 timeout->time.usec -= 1000000;
798 timeout->eloop_data = eloop_data;
799 timeout->user_data = user_data;
800 timeout->handler = handler;
801 wpa_trace_add_ref(timeout, eloop, eloop_data);
802 wpa_trace_add_ref(timeout, user, user_data);
803 wpa_trace_record(timeout);
805 /* Maintain timeouts in order of increasing time */
806 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
807 if (os_reltime_before(&timeout->time, &tmp->time)) {
808 dl_list_add(tmp->list.prev, &timeout->list);
812 dl_list_add_tail(&eloop.timeout, &timeout->list);
818 static void eloop_remove_timeout(struct eloop_timeout *timeout)
820 dl_list_del(&timeout->list);
821 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
822 wpa_trace_remove_ref(timeout, user, timeout->user_data);
827 int eloop_cancel_timeout(eloop_timeout_handler handler,
828 void *eloop_data, void *user_data)
830 struct eloop_timeout *timeout, *prev;
833 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
834 struct eloop_timeout, list) {
835 if (timeout->handler == handler &&
836 (timeout->eloop_data == eloop_data ||
837 eloop_data == ELOOP_ALL_CTX) &&
838 (timeout->user_data == user_data ||
839 user_data == ELOOP_ALL_CTX)) {
840 eloop_remove_timeout(timeout);
849 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
850 void *eloop_data, void *user_data,
851 struct os_reltime *remaining)
853 struct eloop_timeout *timeout, *prev;
855 struct os_reltime now;
857 os_get_reltime(&now);
858 remaining->sec = remaining->usec = 0;
860 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
861 struct eloop_timeout, list) {
862 if (timeout->handler == handler &&
863 (timeout->eloop_data == eloop_data) &&
864 (timeout->user_data == user_data)) {
866 if (os_reltime_before(&now, &timeout->time))
867 os_reltime_sub(&timeout->time, &now, remaining);
868 eloop_remove_timeout(timeout);
876 int eloop_is_timeout_registered(eloop_timeout_handler handler,
877 void *eloop_data, void *user_data)
879 struct eloop_timeout *tmp;
881 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
882 if (tmp->handler == handler &&
883 tmp->eloop_data == eloop_data &&
884 tmp->user_data == user_data)
892 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
893 eloop_timeout_handler handler, void *eloop_data,
896 struct os_reltime now, requested, remaining;
897 struct eloop_timeout *tmp;
899 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
900 if (tmp->handler == handler &&
901 tmp->eloop_data == eloop_data &&
902 tmp->user_data == user_data) {
903 requested.sec = req_secs;
904 requested.usec = req_usecs;
905 os_get_reltime(&now);
906 os_reltime_sub(&tmp->time, &now, &remaining);
907 if (os_reltime_before(&requested, &remaining)) {
908 eloop_cancel_timeout(handler, eloop_data,
910 eloop_register_timeout(requested.sec,
924 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
925 eloop_timeout_handler handler, void *eloop_data,
928 struct os_reltime now, requested, remaining;
929 struct eloop_timeout *tmp;
931 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
932 if (tmp->handler == handler &&
933 tmp->eloop_data == eloop_data &&
934 tmp->user_data == user_data) {
935 requested.sec = req_secs;
936 requested.usec = req_usecs;
937 os_get_reltime(&now);
938 os_reltime_sub(&tmp->time, &now, &remaining);
939 if (os_reltime_before(&remaining, &requested)) {
940 eloop_cancel_timeout(handler, eloop_data,
942 eloop_register_timeout(requested.sec,
956 #ifndef CONFIG_NATIVE_WINDOWS
957 static void eloop_handle_alarm(int sig)
959 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
960 "two seconds. Looks like there\n"
961 "is a bug that ends up in a busy loop that "
962 "prevents clean shutdown.\n"
963 "Killing program forcefully.\n");
966 #endif /* CONFIG_NATIVE_WINDOWS */
969 static void eloop_handle_signal(int sig)
973 #ifndef CONFIG_NATIVE_WINDOWS
974 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
975 /* Use SIGALRM to break out from potential busy loops that
976 * would not allow the program to be killed. */
977 eloop.pending_terminate = 1;
978 signal(SIGALRM, eloop_handle_alarm);
981 #endif /* CONFIG_NATIVE_WINDOWS */
984 for (i = 0; i < eloop.signal_count; i++) {
985 if (eloop.signals[i].sig == sig) {
986 eloop.signals[i].signaled++;
993 static void eloop_process_pending_signals(void)
997 if (eloop.signaled == 0)
1001 if (eloop.pending_terminate) {
1002 #ifndef CONFIG_NATIVE_WINDOWS
1004 #endif /* CONFIG_NATIVE_WINDOWS */
1005 eloop.pending_terminate = 0;
1008 for (i = 0; i < eloop.signal_count; i++) {
1009 if (eloop.signals[i].signaled) {
1010 eloop.signals[i].signaled = 0;
1011 eloop.signals[i].handler(eloop.signals[i].sig,
1012 eloop.signals[i].user_data);
1018 int eloop_register_signal(int sig, eloop_signal_handler handler,
1021 struct eloop_signal *tmp;
1023 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1024 sizeof(struct eloop_signal));
1028 tmp[eloop.signal_count].sig = sig;
1029 tmp[eloop.signal_count].user_data = user_data;
1030 tmp[eloop.signal_count].handler = handler;
1031 tmp[eloop.signal_count].signaled = 0;
1032 eloop.signal_count++;
1033 eloop.signals = tmp;
1034 signal(sig, eloop_handle_signal);
1040 int eloop_register_signal_terminate(eloop_signal_handler handler,
1043 int ret = eloop_register_signal(SIGINT, handler, user_data);
1045 ret = eloop_register_signal(SIGTERM, handler, user_data);
1050 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1053 #ifdef CONFIG_NATIVE_WINDOWS
1055 #else /* CONFIG_NATIVE_WINDOWS */
1056 return eloop_register_signal(SIGHUP, handler, user_data);
1057 #endif /* CONFIG_NATIVE_WINDOWS */
1061 void eloop_run(void)
1063 #ifdef CONFIG_ELOOP_POLL
1066 #endif /* CONFIG_ELOOP_POLL */
1067 #ifdef CONFIG_ELOOP_SELECT
1068 fd_set *rfds, *wfds, *efds;
1070 #endif /* CONFIG_ELOOP_SELECT */
1071 #ifdef CONFIG_ELOOP_EPOLL
1072 int timeout_ms = -1;
1073 #endif /* CONFIG_ELOOP_EPOLL */
1074 #ifdef CONFIG_ELOOP_KQUEUE
1076 #endif /* CONFIG_ELOOP_KQUEUE */
1078 struct os_reltime tv, now;
1080 #ifdef CONFIG_ELOOP_SELECT
1081 rfds = os_malloc(sizeof(*rfds));
1082 wfds = os_malloc(sizeof(*wfds));
1083 efds = os_malloc(sizeof(*efds));
1084 if (rfds == NULL || wfds == NULL || efds == NULL)
1086 #endif /* CONFIG_ELOOP_SELECT */
1088 while (!eloop.terminate &&
1089 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1090 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1091 struct eloop_timeout *timeout;
1093 if (eloop.pending_terminate) {
1095 * This may happen in some corner cases where a signal
1096 * is received during a blocking operation. We need to
1097 * process the pending signals and exit if requested to
1098 * avoid hitting the SIGALRM limit if the blocking
1099 * operation took more than two seconds.
1101 eloop_process_pending_signals();
1102 if (eloop.terminate)
1106 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1109 os_get_reltime(&now);
1110 if (os_reltime_before(&now, &timeout->time))
1111 os_reltime_sub(&timeout->time, &now, &tv);
1113 tv.sec = tv.usec = 0;
1114 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1115 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1116 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1117 #ifdef CONFIG_ELOOP_SELECT
1118 _tv.tv_sec = tv.sec;
1119 _tv.tv_usec = tv.usec;
1120 #endif /* CONFIG_ELOOP_SELECT */
1121 #ifdef CONFIG_ELOOP_KQUEUE
1123 ts.tv_nsec = tv.usec * 1000L;
1124 #endif /* CONFIG_ELOOP_KQUEUE */
1127 #ifdef CONFIG_ELOOP_POLL
1128 num_poll_fds = eloop_sock_table_set_fds(
1129 &eloop.readers, &eloop.writers, &eloop.exceptions,
1130 eloop.pollfds, eloop.pollfds_map,
1131 eloop.max_pollfd_map);
1132 res = poll(eloop.pollfds, num_poll_fds,
1133 timeout ? timeout_ms : -1);
1134 #endif /* CONFIG_ELOOP_POLL */
1135 #ifdef CONFIG_ELOOP_SELECT
1136 eloop_sock_table_set_fds(&eloop.readers, rfds);
1137 eloop_sock_table_set_fds(&eloop.writers, wfds);
1138 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1139 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1140 timeout ? &_tv : NULL);
1141 #endif /* CONFIG_ELOOP_SELECT */
1142 #ifdef CONFIG_ELOOP_EPOLL
1143 if (eloop.count == 0) {
1146 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1147 eloop.count, timeout_ms);
1149 #endif /* CONFIG_ELOOP_EPOLL */
1150 #ifdef CONFIG_ELOOP_KQUEUE
1151 if (eloop.count == 0) {
1154 res = kevent(eloop.kqueuefd, NULL, 0,
1155 eloop.kqueue_events, eloop.kqueue_nevents,
1156 timeout ? &ts : NULL);
1158 #endif /* CONFIG_ELOOP_KQUEUE */
1159 if (res < 0 && errno != EINTR && errno != 0) {
1160 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1161 #ifdef CONFIG_ELOOP_POLL
1163 #endif /* CONFIG_ELOOP_POLL */
1164 #ifdef CONFIG_ELOOP_SELECT
1166 #endif /* CONFIG_ELOOP_SELECT */
1167 #ifdef CONFIG_ELOOP_EPOLL
1169 #endif /* CONFIG_ELOOP_EPOLL */
1170 #ifdef CONFIG_ELOOP_KQUEUE
1172 #endif /* CONFIG_ELOOP_EKQUEUE */
1178 eloop.readers.changed = 0;
1179 eloop.writers.changed = 0;
1180 eloop.exceptions.changed = 0;
1182 eloop_process_pending_signals();
1185 /* check if some registered timeouts have occurred */
1186 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1189 os_get_reltime(&now);
1190 if (!os_reltime_before(&now, &timeout->time)) {
1191 void *eloop_data = timeout->eloop_data;
1192 void *user_data = timeout->user_data;
1193 eloop_timeout_handler handler =
1195 eloop_remove_timeout(timeout);
1196 handler(eloop_data, user_data);
1204 if (eloop.readers.changed ||
1205 eloop.writers.changed ||
1206 eloop.exceptions.changed) {
1208 * Sockets may have been closed and reopened with the
1209 * same FD in the signal or timeout handlers, so we
1210 * must skip the previous results and check again
1211 * whether any of the currently registered sockets have
1217 #ifdef CONFIG_ELOOP_POLL
1218 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1219 &eloop.exceptions, eloop.pollfds_map,
1220 eloop.max_pollfd_map);
1221 #endif /* CONFIG_ELOOP_POLL */
1222 #ifdef CONFIG_ELOOP_SELECT
1223 eloop_sock_table_dispatch(&eloop.readers, rfds);
1224 eloop_sock_table_dispatch(&eloop.writers, wfds);
1225 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1226 #endif /* CONFIG_ELOOP_SELECT */
1227 #ifdef CONFIG_ELOOP_EPOLL
1228 eloop_sock_table_dispatch(eloop.epoll_events, res);
1229 #endif /* CONFIG_ELOOP_EPOLL */
1230 #ifdef CONFIG_ELOOP_KQUEUE
1231 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1232 #endif /* CONFIG_ELOOP_KQUEUE */
1235 eloop.terminate = 0;
1237 #ifdef CONFIG_ELOOP_SELECT
1241 #endif /* CONFIG_ELOOP_SELECT */
1246 void eloop_terminate(void)
1248 eloop.terminate = 1;
1252 void eloop_destroy(void)
1254 struct eloop_timeout *timeout, *prev;
1255 struct os_reltime now;
1257 os_get_reltime(&now);
1258 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1259 struct eloop_timeout, list) {
1261 sec = timeout->time.sec - now.sec;
1262 usec = timeout->time.usec - now.usec;
1263 if (timeout->time.usec < now.usec) {
1267 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1268 "eloop_data=%p user_data=%p handler=%p",
1269 sec, usec, timeout->eloop_data, timeout->user_data,
1271 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1273 wpa_trace_dump("eloop timeout", timeout);
1274 eloop_remove_timeout(timeout);
1276 eloop_sock_table_destroy(&eloop.readers);
1277 eloop_sock_table_destroy(&eloop.writers);
1278 eloop_sock_table_destroy(&eloop.exceptions);
1279 os_free(eloop.signals);
1281 #ifdef CONFIG_ELOOP_POLL
1282 os_free(eloop.pollfds);
1283 os_free(eloop.pollfds_map);
1284 #endif /* CONFIG_ELOOP_POLL */
1285 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1286 os_free(eloop.fd_table);
1287 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1288 #ifdef CONFIG_ELOOP_EPOLL
1289 os_free(eloop.epoll_events);
1290 close(eloop.epollfd);
1291 #endif /* CONFIG_ELOOP_EPOLL */
1292 #ifdef CONFIG_ELOOP_KQUEUE
1293 os_free(eloop.kqueue_events);
1294 close(eloop.kqueuefd);
1295 #endif /* CONFIG_ELOOP_KQUEUE */
1299 int eloop_terminated(void)
1301 return eloop.terminate || eloop.pending_terminate;
1305 void eloop_wait_for_read_sock(int sock)
1307 #ifdef CONFIG_ELOOP_POLL
1313 os_memset(&pfd, 0, sizeof(pfd));
1315 pfd.events = POLLIN;
1318 #endif /* CONFIG_ELOOP_POLL */
1319 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1321 * We can use epoll() here. But epoll() requres 4 system calls.
1322 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1323 * epoll fd. So select() is better for performance here.
1331 FD_SET(sock, &rfds);
1332 select(sock + 1, &rfds, NULL, NULL, NULL);
1333 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1334 #ifdef CONFIG_ELOOP_KQUEUE
1336 struct kevent ke1, ke2;
1341 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1342 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1344 #endif /* CONFIG_ELOOP_KQUEUE */
1347 #ifdef CONFIG_ELOOP_SELECT
1348 #undef CONFIG_ELOOP_SELECT
1349 #endif /* CONFIG_ELOOP_SELECT */