2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
21 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
22 #define CONFIG_ELOOP_SELECT
25 #ifdef CONFIG_ELOOP_POLL
27 #endif /* CONFIG_ELOOP_POLL */
29 #ifdef CONFIG_ELOOP_EPOLL
30 #include <sys/epoll.h>
31 #endif /* CONFIG_ELOOP_EPOLL */
37 eloop_sock_handler handler;
43 struct eloop_timeout {
45 struct os_reltime time;
48 eloop_timeout_handler handler;
57 eloop_signal_handler handler;
61 struct eloop_sock_table {
63 struct eloop_sock *table;
64 eloop_event_type type;
71 int count; /* sum of all table counts */
72 #ifdef CONFIG_ELOOP_POLL
73 int max_pollfd_map; /* number of pollfds_map currently allocated */
74 int max_poll_fds; /* number of pollfds currently allocated */
75 struct pollfd *pollfds;
76 struct pollfd **pollfds_map;
77 #endif /* CONFIG_ELOOP_POLL */
78 #ifdef CONFIG_ELOOP_EPOLL
80 int epoll_max_event_num;
82 struct eloop_sock *epoll_table;
83 struct epoll_event *epoll_events;
84 #endif /* CONFIG_ELOOP_EPOLL */
85 struct eloop_sock_table readers;
86 struct eloop_sock_table writers;
87 struct eloop_sock_table exceptions;
89 struct dl_list timeout;
92 struct eloop_signal *signals;
94 int pending_terminate;
99 static struct eloop_data eloop;
104 static void eloop_sigsegv_handler(int sig)
106 wpa_trace_show("eloop SIGSEGV");
110 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
113 if (table == NULL || table->table == NULL)
115 for (i = 0; i < table->count; i++) {
116 wpa_trace_add_ref(&table->table[i], eloop,
117 table->table[i].eloop_data);
118 wpa_trace_add_ref(&table->table[i], user,
119 table->table[i].user_data);
124 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
127 if (table == NULL || table->table == NULL)
129 for (i = 0; i < table->count; i++) {
130 wpa_trace_remove_ref(&table->table[i], eloop,
131 table->table[i].eloop_data);
132 wpa_trace_remove_ref(&table->table[i], user,
133 table->table[i].user_data);
137 #else /* WPA_TRACE */
139 #define eloop_trace_sock_add_ref(table) do { } while (0)
140 #define eloop_trace_sock_remove_ref(table) do { } while (0)
142 #endif /* WPA_TRACE */
147 os_memset(&eloop, 0, sizeof(eloop));
148 dl_list_init(&eloop.timeout);
149 #ifdef CONFIG_ELOOP_EPOLL
150 eloop.epollfd = epoll_create1(0);
151 if (eloop.epollfd < 0) {
152 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
153 __func__, strerror(errno));
156 eloop.readers.type = EVENT_TYPE_READ;
157 eloop.writers.type = EVENT_TYPE_WRITE;
158 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
159 #endif /* CONFIG_ELOOP_EPOLL */
161 signal(SIGSEGV, eloop_sigsegv_handler);
162 #endif /* WPA_TRACE */
167 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
168 int sock, eloop_sock_handler handler,
169 void *eloop_data, void *user_data)
171 #ifdef CONFIG_ELOOP_EPOLL
172 struct eloop_sock *temp_table;
173 struct epoll_event ev, *temp_events;
175 #endif /* CONFIG_ELOOP_EPOLL */
176 struct eloop_sock *tmp;
179 if (sock > eloop.max_sock)
182 new_max_sock = eloop.max_sock;
187 #ifdef CONFIG_ELOOP_POLL
188 if (new_max_sock >= eloop.max_pollfd_map) {
189 struct pollfd **nmap;
190 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
191 sizeof(struct pollfd *));
195 eloop.max_pollfd_map = new_max_sock + 50;
196 eloop.pollfds_map = nmap;
199 if (eloop.count + 1 > eloop.max_poll_fds) {
201 int nmax = eloop.count + 1 + 50;
202 n = os_realloc_array(eloop.pollfds, nmax,
203 sizeof(struct pollfd));
207 eloop.max_poll_fds = nmax;
210 #endif /* CONFIG_ELOOP_POLL */
211 #ifdef CONFIG_ELOOP_EPOLL
212 if (new_max_sock >= eloop.epoll_max_fd) {
213 next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
214 temp_table = os_realloc_array(eloop.epoll_table, next,
215 sizeof(struct eloop_sock));
216 if (temp_table == NULL)
219 eloop.epoll_max_fd = next;
220 eloop.epoll_table = temp_table;
223 if (eloop.count + 1 > eloop.epoll_max_event_num) {
224 next = eloop.epoll_max_event_num == 0 ? 8 :
225 eloop.epoll_max_event_num * 2;
226 temp_events = os_realloc_array(eloop.epoll_events, next,
227 sizeof(struct epoll_event));
228 if (temp_events == NULL) {
229 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
230 "%s\n", __func__, strerror(errno));
234 eloop.epoll_max_event_num = next;
235 eloop.epoll_events = temp_events;
237 #endif /* CONFIG_ELOOP_EPOLL */
239 eloop_trace_sock_remove_ref(table);
240 tmp = os_realloc_array(table->table, table->count + 1,
241 sizeof(struct eloop_sock));
243 eloop_trace_sock_add_ref(table);
247 tmp[table->count].sock = sock;
248 tmp[table->count].eloop_data = eloop_data;
249 tmp[table->count].user_data = user_data;
250 tmp[table->count].handler = handler;
251 wpa_trace_record(&tmp[table->count]);
254 eloop.max_sock = new_max_sock;
257 eloop_trace_sock_add_ref(table);
259 #ifdef CONFIG_ELOOP_EPOLL
260 os_memset(&ev, 0, sizeof(ev));
261 switch (table->type) {
262 case EVENT_TYPE_READ:
265 case EVENT_TYPE_WRITE:
266 ev.events = EPOLLOUT;
269 * Exceptions are always checked when using epoll, but I suppose it's
270 * possible that someone registered a socket *only* for exception
273 case EVENT_TYPE_EXCEPTION:
274 ev.events = EPOLLERR | EPOLLHUP;
278 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
279 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
280 "failed. %s\n", __func__, sock, strerror(errno));
283 os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
284 sizeof(struct eloop_sock));
285 #endif /* CONFIG_ELOOP_EPOLL */
290 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
295 if (table == NULL || table->table == NULL || table->count == 0)
298 for (i = 0; i < table->count; i++) {
299 if (table->table[i].sock == sock)
302 if (i == table->count)
304 eloop_trace_sock_remove_ref(table);
305 if (i != table->count - 1) {
306 os_memmove(&table->table[i], &table->table[i + 1],
307 (table->count - i - 1) *
308 sizeof(struct eloop_sock));
313 eloop_trace_sock_add_ref(table);
314 #ifdef CONFIG_ELOOP_EPOLL
315 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
316 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
317 "failed. %s\n", __func__, sock, strerror(errno));
320 os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
321 #endif /* CONFIG_ELOOP_EPOLL */
325 #ifdef CONFIG_ELOOP_POLL
327 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
329 if (fd < mx && fd >= 0)
330 return pollfds_map[fd];
335 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
336 struct eloop_sock_table *writers,
337 struct eloop_sock_table *exceptions,
338 struct pollfd *pollfds,
339 struct pollfd **pollfds_map,
347 /* Clear pollfd lookup map. It will be re-populated below. */
348 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
350 if (readers && readers->table) {
351 for (i = 0; i < readers->count; i++) {
352 fd = readers->table[i].sock;
353 assert(fd >= 0 && fd < max_pollfd_map);
354 pollfds[nxt].fd = fd;
355 pollfds[nxt].events = POLLIN;
356 pollfds[nxt].revents = 0;
357 pollfds_map[fd] = &(pollfds[nxt]);
362 if (writers && writers->table) {
363 for (i = 0; i < writers->count; i++) {
365 * See if we already added this descriptor, update it
368 fd = writers->table[i].sock;
369 assert(fd >= 0 && fd < max_pollfd_map);
370 pfd = pollfds_map[fd];
372 pfd = &(pollfds[nxt]);
375 pollfds[i].revents = 0;
376 pollfds_map[fd] = pfd;
379 pfd->events |= POLLOUT;
384 * Exceptions are always checked when using poll, but I suppose it's
385 * possible that someone registered a socket *only* for exception
386 * handling. Set the POLLIN bit in this case.
388 if (exceptions && exceptions->table) {
389 for (i = 0; i < exceptions->count; i++) {
391 * See if we already added this descriptor, just use it
394 fd = exceptions->table[i].sock;
395 assert(fd >= 0 && fd < max_pollfd_map);
396 pfd = pollfds_map[fd];
398 pfd = &(pollfds[nxt]);
399 pfd->events = POLLIN;
401 pollfds[i].revents = 0;
402 pollfds_map[fd] = pfd;
412 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
413 struct pollfd **pollfds_map,
420 if (!table || !table->table)
424 for (i = 0; i < table->count; i++) {
425 pfd = find_pollfd(pollfds_map, table->table[i].sock,
430 if (!(pfd->revents & revents))
433 table->table[i].handler(table->table[i].sock,
434 table->table[i].eloop_data,
435 table->table[i].user_data);
444 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
445 struct eloop_sock_table *writers,
446 struct eloop_sock_table *exceptions,
447 struct pollfd **pollfds_map,
450 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
451 max_pollfd_map, POLLIN | POLLERR |
453 return; /* pollfds may be invalid at this point */
455 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
456 max_pollfd_map, POLLOUT))
457 return; /* pollfds may be invalid at this point */
459 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
460 max_pollfd_map, POLLERR | POLLHUP);
463 #endif /* CONFIG_ELOOP_POLL */
465 #ifdef CONFIG_ELOOP_SELECT
467 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
474 if (table->table == NULL)
477 for (i = 0; i < table->count; i++) {
478 assert(table->table[i].sock >= 0);
479 FD_SET(table->table[i].sock, fds);
484 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
489 if (table == NULL || table->table == NULL)
493 for (i = 0; i < table->count; i++) {
494 if (FD_ISSET(table->table[i].sock, fds)) {
495 table->table[i].handler(table->table[i].sock,
496 table->table[i].eloop_data,
497 table->table[i].user_data);
504 #endif /* CONFIG_ELOOP_SELECT */
507 #ifdef CONFIG_ELOOP_EPOLL
508 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
510 struct eloop_sock *table;
513 for (i = 0; i < nfds; i++) {
514 table = &eloop.epoll_table[events[i].data.fd];
515 if (table->handler == NULL)
517 table->handler(table->sock, table->eloop_data,
519 if (eloop.readers.changed ||
520 eloop.writers.changed ||
521 eloop.exceptions.changed)
525 #endif /* CONFIG_ELOOP_EPOLL */
528 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
532 for (i = 0; i < table->count && table->table; i++) {
533 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
534 "sock=%d eloop_data=%p user_data=%p "
536 table->table[i].sock,
537 table->table[i].eloop_data,
538 table->table[i].user_data,
539 table->table[i].handler);
540 wpa_trace_dump_funcname("eloop unregistered socket "
542 table->table[i].handler);
543 wpa_trace_dump("eloop sock", &table->table[i]);
545 os_free(table->table);
550 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
551 void *eloop_data, void *user_data)
553 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
554 eloop_data, user_data);
558 void eloop_unregister_read_sock(int sock)
560 eloop_unregister_sock(sock, EVENT_TYPE_READ);
564 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
567 case EVENT_TYPE_READ:
568 return &eloop.readers;
569 case EVENT_TYPE_WRITE:
570 return &eloop.writers;
571 case EVENT_TYPE_EXCEPTION:
572 return &eloop.exceptions;
579 int eloop_register_sock(int sock, eloop_event_type type,
580 eloop_sock_handler handler,
581 void *eloop_data, void *user_data)
583 struct eloop_sock_table *table;
586 table = eloop_get_sock_table(type);
587 return eloop_sock_table_add_sock(table, sock, handler,
588 eloop_data, user_data);
592 void eloop_unregister_sock(int sock, eloop_event_type type)
594 struct eloop_sock_table *table;
596 table = eloop_get_sock_table(type);
597 eloop_sock_table_remove_sock(table, sock);
601 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
602 eloop_timeout_handler handler,
603 void *eloop_data, void *user_data)
605 struct eloop_timeout *timeout, *tmp;
608 timeout = os_zalloc(sizeof(*timeout));
611 if (os_get_reltime(&timeout->time) < 0) {
615 now_sec = timeout->time.sec;
616 timeout->time.sec += secs;
617 if (timeout->time.sec < now_sec) {
619 * Integer overflow - assume long enough timeout to be assumed
620 * to be infinite, i.e., the timeout would never happen.
622 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
623 "ever happen - ignore it", secs);
627 timeout->time.usec += usecs;
628 while (timeout->time.usec >= 1000000) {
630 timeout->time.usec -= 1000000;
632 timeout->eloop_data = eloop_data;
633 timeout->user_data = user_data;
634 timeout->handler = handler;
635 wpa_trace_add_ref(timeout, eloop, eloop_data);
636 wpa_trace_add_ref(timeout, user, user_data);
637 wpa_trace_record(timeout);
639 /* Maintain timeouts in order of increasing time */
640 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
641 if (os_reltime_before(&timeout->time, &tmp->time)) {
642 dl_list_add(tmp->list.prev, &timeout->list);
646 dl_list_add_tail(&eloop.timeout, &timeout->list);
652 static void eloop_remove_timeout(struct eloop_timeout *timeout)
654 dl_list_del(&timeout->list);
655 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
656 wpa_trace_remove_ref(timeout, user, timeout->user_data);
661 int eloop_cancel_timeout(eloop_timeout_handler handler,
662 void *eloop_data, void *user_data)
664 struct eloop_timeout *timeout, *prev;
667 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
668 struct eloop_timeout, list) {
669 if (timeout->handler == handler &&
670 (timeout->eloop_data == eloop_data ||
671 eloop_data == ELOOP_ALL_CTX) &&
672 (timeout->user_data == user_data ||
673 user_data == ELOOP_ALL_CTX)) {
674 eloop_remove_timeout(timeout);
683 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
684 void *eloop_data, void *user_data,
685 struct os_reltime *remaining)
687 struct eloop_timeout *timeout, *prev;
689 struct os_reltime now;
691 os_get_reltime(&now);
692 remaining->sec = remaining->usec = 0;
694 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
695 struct eloop_timeout, list) {
696 if (timeout->handler == handler &&
697 (timeout->eloop_data == eloop_data) &&
698 (timeout->user_data == user_data)) {
700 if (os_reltime_before(&now, &timeout->time))
701 os_reltime_sub(&timeout->time, &now, remaining);
702 eloop_remove_timeout(timeout);
710 int eloop_is_timeout_registered(eloop_timeout_handler handler,
711 void *eloop_data, void *user_data)
713 struct eloop_timeout *tmp;
715 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
716 if (tmp->handler == handler &&
717 tmp->eloop_data == eloop_data &&
718 tmp->user_data == user_data)
726 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
727 eloop_timeout_handler handler, void *eloop_data,
730 struct os_reltime now, requested, remaining;
731 struct eloop_timeout *tmp;
733 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
734 if (tmp->handler == handler &&
735 tmp->eloop_data == eloop_data &&
736 tmp->user_data == user_data) {
737 requested.sec = req_secs;
738 requested.usec = req_usecs;
739 os_get_reltime(&now);
740 os_reltime_sub(&tmp->time, &now, &remaining);
741 if (os_reltime_before(&requested, &remaining)) {
742 eloop_cancel_timeout(handler, eloop_data,
744 eloop_register_timeout(requested.sec,
758 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
759 eloop_timeout_handler handler, void *eloop_data,
762 struct os_reltime now, requested, remaining;
763 struct eloop_timeout *tmp;
765 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
766 if (tmp->handler == handler &&
767 tmp->eloop_data == eloop_data &&
768 tmp->user_data == user_data) {
769 requested.sec = req_secs;
770 requested.usec = req_usecs;
771 os_get_reltime(&now);
772 os_reltime_sub(&tmp->time, &now, &remaining);
773 if (os_reltime_before(&remaining, &requested)) {
774 eloop_cancel_timeout(handler, eloop_data,
776 eloop_register_timeout(requested.sec,
790 #ifndef CONFIG_NATIVE_WINDOWS
791 static void eloop_handle_alarm(int sig)
793 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
794 "two seconds. Looks like there\n"
795 "is a bug that ends up in a busy loop that "
796 "prevents clean shutdown.\n"
797 "Killing program forcefully.\n");
800 #endif /* CONFIG_NATIVE_WINDOWS */
803 static void eloop_handle_signal(int sig)
807 #ifndef CONFIG_NATIVE_WINDOWS
808 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
809 /* Use SIGALRM to break out from potential busy loops that
810 * would not allow the program to be killed. */
811 eloop.pending_terminate = 1;
812 signal(SIGALRM, eloop_handle_alarm);
815 #endif /* CONFIG_NATIVE_WINDOWS */
818 for (i = 0; i < eloop.signal_count; i++) {
819 if (eloop.signals[i].sig == sig) {
820 eloop.signals[i].signaled++;
827 static void eloop_process_pending_signals(void)
831 if (eloop.signaled == 0)
835 if (eloop.pending_terminate) {
836 #ifndef CONFIG_NATIVE_WINDOWS
838 #endif /* CONFIG_NATIVE_WINDOWS */
839 eloop.pending_terminate = 0;
842 for (i = 0; i < eloop.signal_count; i++) {
843 if (eloop.signals[i].signaled) {
844 eloop.signals[i].signaled = 0;
845 eloop.signals[i].handler(eloop.signals[i].sig,
846 eloop.signals[i].user_data);
852 int eloop_register_signal(int sig, eloop_signal_handler handler,
855 struct eloop_signal *tmp;
857 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
858 sizeof(struct eloop_signal));
862 tmp[eloop.signal_count].sig = sig;
863 tmp[eloop.signal_count].user_data = user_data;
864 tmp[eloop.signal_count].handler = handler;
865 tmp[eloop.signal_count].signaled = 0;
866 eloop.signal_count++;
868 signal(sig, eloop_handle_signal);
874 int eloop_register_signal_terminate(eloop_signal_handler handler,
877 int ret = eloop_register_signal(SIGINT, handler, user_data);
879 ret = eloop_register_signal(SIGTERM, handler, user_data);
884 int eloop_register_signal_reconfig(eloop_signal_handler handler,
887 #ifdef CONFIG_NATIVE_WINDOWS
889 #else /* CONFIG_NATIVE_WINDOWS */
890 return eloop_register_signal(SIGHUP, handler, user_data);
891 #endif /* CONFIG_NATIVE_WINDOWS */
897 #ifdef CONFIG_ELOOP_POLL
900 #endif /* CONFIG_ELOOP_POLL */
901 #ifdef CONFIG_ELOOP_SELECT
902 fd_set *rfds, *wfds, *efds;
904 #endif /* CONFIG_ELOOP_SELECT */
905 #ifdef CONFIG_ELOOP_EPOLL
907 #endif /* CONFIG_ELOOP_EPOLL */
909 struct os_reltime tv, now;
911 #ifdef CONFIG_ELOOP_SELECT
912 rfds = os_malloc(sizeof(*rfds));
913 wfds = os_malloc(sizeof(*wfds));
914 efds = os_malloc(sizeof(*efds));
915 if (rfds == NULL || wfds == NULL || efds == NULL)
917 #endif /* CONFIG_ELOOP_SELECT */
919 while (!eloop.terminate &&
920 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
921 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
922 struct eloop_timeout *timeout;
924 if (eloop.pending_terminate) {
926 * This may happen in some corner cases where a signal
927 * is received during a blocking operation. We need to
928 * process the pending signals and exit if requested to
929 * avoid hitting the SIGALRM limit if the blocking
930 * operation took more than two seconds.
932 eloop_process_pending_signals();
937 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
940 os_get_reltime(&now);
941 if (os_reltime_before(&now, &timeout->time))
942 os_reltime_sub(&timeout->time, &now, &tv);
944 tv.sec = tv.usec = 0;
945 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
946 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
947 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
948 #ifdef CONFIG_ELOOP_SELECT
950 _tv.tv_usec = tv.usec;
951 #endif /* CONFIG_ELOOP_SELECT */
954 #ifdef CONFIG_ELOOP_POLL
955 num_poll_fds = eloop_sock_table_set_fds(
956 &eloop.readers, &eloop.writers, &eloop.exceptions,
957 eloop.pollfds, eloop.pollfds_map,
958 eloop.max_pollfd_map);
959 res = poll(eloop.pollfds, num_poll_fds,
960 timeout ? timeout_ms : -1);
961 #endif /* CONFIG_ELOOP_POLL */
962 #ifdef CONFIG_ELOOP_SELECT
963 eloop_sock_table_set_fds(&eloop.readers, rfds);
964 eloop_sock_table_set_fds(&eloop.writers, wfds);
965 eloop_sock_table_set_fds(&eloop.exceptions, efds);
966 res = select(eloop.max_sock + 1, rfds, wfds, efds,
967 timeout ? &_tv : NULL);
968 #endif /* CONFIG_ELOOP_SELECT */
969 #ifdef CONFIG_ELOOP_EPOLL
970 if (eloop.count == 0) {
973 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
974 eloop.count, timeout_ms);
976 #endif /* CONFIG_ELOOP_EPOLL */
977 if (res < 0 && errno != EINTR && errno != 0) {
978 wpa_printf(MSG_ERROR, "eloop: %s: %s",
979 #ifdef CONFIG_ELOOP_POLL
981 #endif /* CONFIG_ELOOP_POLL */
982 #ifdef CONFIG_ELOOP_SELECT
984 #endif /* CONFIG_ELOOP_SELECT */
985 #ifdef CONFIG_ELOOP_EPOLL
987 #endif /* CONFIG_ELOOP_EPOLL */
992 eloop.readers.changed = 0;
993 eloop.writers.changed = 0;
994 eloop.exceptions.changed = 0;
996 eloop_process_pending_signals();
998 /* check if some registered timeouts have occurred */
999 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1002 os_get_reltime(&now);
1003 if (!os_reltime_before(&now, &timeout->time)) {
1004 void *eloop_data = timeout->eloop_data;
1005 void *user_data = timeout->user_data;
1006 eloop_timeout_handler handler =
1008 eloop_remove_timeout(timeout);
1009 handler(eloop_data, user_data);
1017 if (eloop.readers.changed ||
1018 eloop.writers.changed ||
1019 eloop.exceptions.changed) {
1021 * Sockets may have been closed and reopened with the
1022 * same FD in the signal or timeout handlers, so we
1023 * must skip the previous results and check again
1024 * whether any of the currently registered sockets have
1030 #ifdef CONFIG_ELOOP_POLL
1031 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1032 &eloop.exceptions, eloop.pollfds_map,
1033 eloop.max_pollfd_map);
1034 #endif /* CONFIG_ELOOP_POLL */
1035 #ifdef CONFIG_ELOOP_SELECT
1036 eloop_sock_table_dispatch(&eloop.readers, rfds);
1037 eloop_sock_table_dispatch(&eloop.writers, wfds);
1038 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1039 #endif /* CONFIG_ELOOP_SELECT */
1040 #ifdef CONFIG_ELOOP_EPOLL
1041 eloop_sock_table_dispatch(eloop.epoll_events, res);
1042 #endif /* CONFIG_ELOOP_EPOLL */
1045 eloop.terminate = 0;
1047 #ifdef CONFIG_ELOOP_SELECT
1051 #endif /* CONFIG_ELOOP_SELECT */
1056 void eloop_terminate(void)
1058 eloop.terminate = 1;
1062 void eloop_destroy(void)
1064 struct eloop_timeout *timeout, *prev;
1065 struct os_reltime now;
1067 os_get_reltime(&now);
1068 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1069 struct eloop_timeout, list) {
1071 sec = timeout->time.sec - now.sec;
1072 usec = timeout->time.usec - now.usec;
1073 if (timeout->time.usec < now.usec) {
1077 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1078 "eloop_data=%p user_data=%p handler=%p",
1079 sec, usec, timeout->eloop_data, timeout->user_data,
1081 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1083 wpa_trace_dump("eloop timeout", timeout);
1084 eloop_remove_timeout(timeout);
1086 eloop_sock_table_destroy(&eloop.readers);
1087 eloop_sock_table_destroy(&eloop.writers);
1088 eloop_sock_table_destroy(&eloop.exceptions);
1089 os_free(eloop.signals);
1091 #ifdef CONFIG_ELOOP_POLL
1092 os_free(eloop.pollfds);
1093 os_free(eloop.pollfds_map);
1094 #endif /* CONFIG_ELOOP_POLL */
1095 #ifdef CONFIG_ELOOP_EPOLL
1096 os_free(eloop.epoll_table);
1097 os_free(eloop.epoll_events);
1098 close(eloop.epollfd);
1099 #endif /* CONFIG_ELOOP_EPOLL */
1103 int eloop_terminated(void)
1105 return eloop.terminate || eloop.pending_terminate;
1109 void eloop_wait_for_read_sock(int sock)
1111 #ifdef CONFIG_ELOOP_POLL
1117 os_memset(&pfd, 0, sizeof(pfd));
1119 pfd.events = POLLIN;
1122 #endif /* CONFIG_ELOOP_POLL */
1123 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1125 * We can use epoll() here. But epoll() requres 4 system calls.
1126 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1127 * epoll fd. So select() is better for performance here.
1135 FD_SET(sock, &rfds);
1136 select(sock + 1, &rfds, NULL, NULL, NULL);
1137 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1140 #ifdef CONFIG_ELOOP_SELECT
1141 #undef CONFIG_ELOOP_SELECT
1142 #endif /* CONFIG_ELOOP_SELECT */