]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/wpa/src/utils/eloop.c
Update hostapd/wpa_supplicant to 2.8 to fix multiple vulnerabilities.
[FreeBSD/FreeBSD.git] / contrib / wpa / src / utils / eloop.c
1 /*
2  * Event loop based on select() loop
3  * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4  *
5  * This software may be distributed under the terms of the BSD license.
6  * See README for more details.
7  */
8
9 #include "includes.h"
10 #include <assert.h>
11
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26     !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41
42 struct eloop_sock {
43         int sock;
44         void *eloop_data;
45         void *user_data;
46         eloop_sock_handler handler;
47         WPA_TRACE_REF(eloop);
48         WPA_TRACE_REF(user);
49         WPA_TRACE_INFO
50 };
51
52 struct eloop_timeout {
53         struct dl_list list;
54         struct os_reltime time;
55         void *eloop_data;
56         void *user_data;
57         eloop_timeout_handler handler;
58         WPA_TRACE_REF(eloop);
59         WPA_TRACE_REF(user);
60         WPA_TRACE_INFO
61 };
62
63 struct eloop_signal {
64         int sig;
65         void *user_data;
66         eloop_signal_handler handler;
67         int signaled;
68 };
69
70 struct eloop_sock_table {
71         int count;
72         struct eloop_sock *table;
73         eloop_event_type type;
74         int changed;
75 };
76
77 struct eloop_data {
78         int max_sock;
79
80         int count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82         int max_pollfd_map; /* number of pollfds_map currently allocated */
83         int max_poll_fds; /* number of pollfds currently allocated */
84         struct pollfd *pollfds;
85         struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88         int max_fd;
89         struct eloop_sock *fd_table;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
92         int epollfd;
93         int epoll_max_event_num;
94         struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
97         int kqueuefd;
98         int kqueue_nevents;
99         struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101         struct eloop_sock_table readers;
102         struct eloop_sock_table writers;
103         struct eloop_sock_table exceptions;
104
105         struct dl_list timeout;
106
107         int signal_count;
108         struct eloop_signal *signals;
109         int signaled;
110         int pending_terminate;
111
112         int terminate;
113 };
114
115 static struct eloop_data eloop;
116
117
118 #ifdef WPA_TRACE
119
120 static void eloop_sigsegv_handler(int sig)
121 {
122         wpa_trace_show("eloop SIGSEGV");
123         abort();
124 }
125
126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127 {
128         int i;
129         if (table == NULL || table->table == NULL)
130                 return;
131         for (i = 0; i < table->count; i++) {
132                 wpa_trace_add_ref(&table->table[i], eloop,
133                                   table->table[i].eloop_data);
134                 wpa_trace_add_ref(&table->table[i], user,
135                                   table->table[i].user_data);
136         }
137 }
138
139
140 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
141 {
142         int i;
143         if (table == NULL || table->table == NULL)
144                 return;
145         for (i = 0; i < table->count; i++) {
146                 wpa_trace_remove_ref(&table->table[i], eloop,
147                                      table->table[i].eloop_data);
148                 wpa_trace_remove_ref(&table->table[i], user,
149                                      table->table[i].user_data);
150         }
151 }
152
153 #else /* WPA_TRACE */
154
155 #define eloop_trace_sock_add_ref(table) do { } while (0)
156 #define eloop_trace_sock_remove_ref(table) do { } while (0)
157
158 #endif /* WPA_TRACE */
159
160
161 int eloop_init(void)
162 {
163         os_memset(&eloop, 0, sizeof(eloop));
164         dl_list_init(&eloop.timeout);
165 #ifdef CONFIG_ELOOP_EPOLL
166         eloop.epollfd = epoll_create1(0);
167         if (eloop.epollfd < 0) {
168                 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
169                            __func__, strerror(errno));
170                 return -1;
171         }
172 #endif /* CONFIG_ELOOP_EPOLL */
173 #ifdef CONFIG_ELOOP_KQUEUE
174         eloop.kqueuefd = kqueue();
175         if (eloop.kqueuefd < 0) {
176                 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
177                            __func__, strerror(errno));
178                 return -1;
179         }
180 #endif /* CONFIG_ELOOP_KQUEUE */
181 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
182         eloop.readers.type = EVENT_TYPE_READ;
183         eloop.writers.type = EVENT_TYPE_WRITE;
184         eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
185 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
186 #ifdef WPA_TRACE
187         signal(SIGSEGV, eloop_sigsegv_handler);
188 #endif /* WPA_TRACE */
189         return 0;
190 }
191
192
193 #ifdef CONFIG_ELOOP_EPOLL
194 static int eloop_sock_queue(int sock, eloop_event_type type)
195 {
196         struct epoll_event ev;
197
198         os_memset(&ev, 0, sizeof(ev));
199         switch (type) {
200         case EVENT_TYPE_READ:
201                 ev.events = EPOLLIN;
202                 break;
203         case EVENT_TYPE_WRITE:
204                 ev.events = EPOLLOUT;
205                 break;
206         /*
207          * Exceptions are always checked when using epoll, but I suppose it's
208          * possible that someone registered a socket *only* for exception
209          * handling.
210          */
211         case EVENT_TYPE_EXCEPTION:
212                 ev.events = EPOLLERR | EPOLLHUP;
213                 break;
214         }
215         ev.data.fd = sock;
216         if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
217                 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
218                            __func__, sock, strerror(errno));
219                 return -1;
220         }
221         return 0;
222 }
223 #endif /* CONFIG_ELOOP_EPOLL */
224
225
226 #ifdef CONFIG_ELOOP_KQUEUE
227
228 static short event_type_kevent_filter(eloop_event_type type)
229 {
230         switch (type) {
231         case EVENT_TYPE_READ:
232                 return EVFILT_READ;
233         case EVENT_TYPE_WRITE:
234                 return EVFILT_WRITE;
235         default:
236                 return 0;
237         }
238 }
239
240
241 static int eloop_sock_queue(int sock, eloop_event_type type)
242 {
243         struct kevent ke;
244
245         EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
246         if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
247                 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
248                            __func__, sock, strerror(errno));
249                 return -1;
250         }
251         return 0;
252 }
253
254 #endif /* CONFIG_ELOOP_KQUEUE */
255
256
257 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
258                                      int sock, eloop_sock_handler handler,
259                                      void *eloop_data, void *user_data)
260 {
261 #ifdef CONFIG_ELOOP_EPOLL
262         struct epoll_event *temp_events;
263 #endif /* CONFIG_ELOOP_EPOLL */
264 #ifdef CONFIG_ELOOP_KQUEUE
265         struct kevent *temp_events;
266 #endif /* CONFIG_ELOOP_EPOLL */
267 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
268         struct eloop_sock *temp_table;
269         int next;
270 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
271         struct eloop_sock *tmp;
272         int new_max_sock;
273
274         if (sock > eloop.max_sock)
275                 new_max_sock = sock;
276         else
277                 new_max_sock = eloop.max_sock;
278
279         if (table == NULL)
280                 return -1;
281
282 #ifdef CONFIG_ELOOP_POLL
283         if (new_max_sock >= eloop.max_pollfd_map) {
284                 struct pollfd **nmap;
285                 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
286                                         sizeof(struct pollfd *));
287                 if (nmap == NULL)
288                         return -1;
289
290                 eloop.max_pollfd_map = new_max_sock + 50;
291                 eloop.pollfds_map = nmap;
292         }
293
294         if (eloop.count + 1 > eloop.max_poll_fds) {
295                 struct pollfd *n;
296                 int nmax = eloop.count + 1 + 50;
297                 n = os_realloc_array(eloop.pollfds, nmax,
298                                      sizeof(struct pollfd));
299                 if (n == NULL)
300                         return -1;
301
302                 eloop.max_poll_fds = nmax;
303                 eloop.pollfds = n;
304         }
305 #endif /* CONFIG_ELOOP_POLL */
306 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
307         if (new_max_sock >= eloop.max_fd) {
308                 next = new_max_sock + 16;
309                 temp_table = os_realloc_array(eloop.fd_table, next,
310                                               sizeof(struct eloop_sock));
311                 if (temp_table == NULL)
312                         return -1;
313
314                 eloop.max_fd = next;
315                 eloop.fd_table = temp_table;
316         }
317 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
318
319 #ifdef CONFIG_ELOOP_EPOLL
320         if (eloop.count + 1 > eloop.epoll_max_event_num) {
321                 next = eloop.epoll_max_event_num == 0 ? 8 :
322                         eloop.epoll_max_event_num * 2;
323                 temp_events = os_realloc_array(eloop.epoll_events, next,
324                                                sizeof(struct epoll_event));
325                 if (temp_events == NULL) {
326                         wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
327                                    __func__, strerror(errno));
328                         return -1;
329                 }
330
331                 eloop.epoll_max_event_num = next;
332                 eloop.epoll_events = temp_events;
333         }
334 #endif /* CONFIG_ELOOP_EPOLL */
335 #ifdef CONFIG_ELOOP_KQUEUE
336         if (eloop.count + 1 > eloop.kqueue_nevents) {
337                 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
338                 temp_events = os_malloc(next * sizeof(*temp_events));
339                 if (!temp_events) {
340                         wpa_printf(MSG_ERROR,
341                                    "%s: malloc for kqueue failed: %s",
342                                    __func__, strerror(errno));
343                         return -1;
344                 }
345
346                 os_free(eloop.kqueue_events);
347                 eloop.kqueue_events = temp_events;
348                 eloop.kqueue_nevents = next;
349         }
350 #endif /* CONFIG_ELOOP_KQUEUE */
351
352         eloop_trace_sock_remove_ref(table);
353         tmp = os_realloc_array(table->table, table->count + 1,
354                                sizeof(struct eloop_sock));
355         if (tmp == NULL) {
356                 eloop_trace_sock_add_ref(table);
357                 return -1;
358         }
359
360         tmp[table->count].sock = sock;
361         tmp[table->count].eloop_data = eloop_data;
362         tmp[table->count].user_data = user_data;
363         tmp[table->count].handler = handler;
364         wpa_trace_record(&tmp[table->count]);
365         table->count++;
366         table->table = tmp;
367         eloop.max_sock = new_max_sock;
368         eloop.count++;
369         table->changed = 1;
370         eloop_trace_sock_add_ref(table);
371
372 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
373         if (eloop_sock_queue(sock, table->type) < 0)
374                 return -1;
375         os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
376                   sizeof(struct eloop_sock));
377 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
378         return 0;
379 }
380
381
382 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
383                                          int sock)
384 {
385 #ifdef CONFIG_ELOOP_KQUEUE
386         struct kevent ke;
387 #endif /* CONFIG_ELOOP_KQUEUE */
388         int i;
389
390         if (table == NULL || table->table == NULL || table->count == 0)
391                 return;
392
393         for (i = 0; i < table->count; i++) {
394                 if (table->table[i].sock == sock)
395                         break;
396         }
397         if (i == table->count)
398                 return;
399         eloop_trace_sock_remove_ref(table);
400         if (i != table->count - 1) {
401                 os_memmove(&table->table[i], &table->table[i + 1],
402                            (table->count - i - 1) *
403                            sizeof(struct eloop_sock));
404         }
405         table->count--;
406         eloop.count--;
407         table->changed = 1;
408         eloop_trace_sock_add_ref(table);
409 #ifdef CONFIG_ELOOP_EPOLL
410         if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
411                 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
412                            __func__, sock, strerror(errno));
413                 return;
414         }
415         os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
416 #endif /* CONFIG_ELOOP_EPOLL */
417 #ifdef CONFIG_ELOOP_KQUEUE
418         EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
419                0, 0);
420         if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
421                 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
422                            __func__, sock, strerror(errno));
423                 return;
424         }
425         os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
426 #endif /* CONFIG_ELOOP_KQUEUE */
427 }
428
429
430 #ifdef CONFIG_ELOOP_POLL
431
432 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
433 {
434         if (fd < mx && fd >= 0)
435                 return pollfds_map[fd];
436         return NULL;
437 }
438
439
440 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
441                                     struct eloop_sock_table *writers,
442                                     struct eloop_sock_table *exceptions,
443                                     struct pollfd *pollfds,
444                                     struct pollfd **pollfds_map,
445                                     int max_pollfd_map)
446 {
447         int i;
448         int nxt = 0;
449         int fd;
450         struct pollfd *pfd;
451
452         /* Clear pollfd lookup map. It will be re-populated below. */
453         os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
454
455         if (readers && readers->table) {
456                 for (i = 0; i < readers->count; i++) {
457                         fd = readers->table[i].sock;
458                         assert(fd >= 0 && fd < max_pollfd_map);
459                         pollfds[nxt].fd = fd;
460                         pollfds[nxt].events = POLLIN;
461                         pollfds[nxt].revents = 0;
462                         pollfds_map[fd] = &(pollfds[nxt]);
463                         nxt++;
464                 }
465         }
466
467         if (writers && writers->table) {
468                 for (i = 0; i < writers->count; i++) {
469                         /*
470                          * See if we already added this descriptor, update it
471                          * if so.
472                          */
473                         fd = writers->table[i].sock;
474                         assert(fd >= 0 && fd < max_pollfd_map);
475                         pfd = pollfds_map[fd];
476                         if (!pfd) {
477                                 pfd = &(pollfds[nxt]);
478                                 pfd->events = 0;
479                                 pfd->fd = fd;
480                                 pollfds[i].revents = 0;
481                                 pollfds_map[fd] = pfd;
482                                 nxt++;
483                         }
484                         pfd->events |= POLLOUT;
485                 }
486         }
487
488         /*
489          * Exceptions are always checked when using poll, but I suppose it's
490          * possible that someone registered a socket *only* for exception
491          * handling. Set the POLLIN bit in this case.
492          */
493         if (exceptions && exceptions->table) {
494                 for (i = 0; i < exceptions->count; i++) {
495                         /*
496                          * See if we already added this descriptor, just use it
497                          * if so.
498                          */
499                         fd = exceptions->table[i].sock;
500                         assert(fd >= 0 && fd < max_pollfd_map);
501                         pfd = pollfds_map[fd];
502                         if (!pfd) {
503                                 pfd = &(pollfds[nxt]);
504                                 pfd->events = POLLIN;
505                                 pfd->fd = fd;
506                                 pollfds[i].revents = 0;
507                                 pollfds_map[fd] = pfd;
508                                 nxt++;
509                         }
510                 }
511         }
512
513         return nxt;
514 }
515
516
517 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
518                                            struct pollfd **pollfds_map,
519                                            int max_pollfd_map,
520                                            short int revents)
521 {
522         int i;
523         struct pollfd *pfd;
524
525         if (!table || !table->table)
526                 return 0;
527
528         table->changed = 0;
529         for (i = 0; i < table->count; i++) {
530                 pfd = find_pollfd(pollfds_map, table->table[i].sock,
531                                   max_pollfd_map);
532                 if (!pfd)
533                         continue;
534
535                 if (!(pfd->revents & revents))
536                         continue;
537
538                 table->table[i].handler(table->table[i].sock,
539                                         table->table[i].eloop_data,
540                                         table->table[i].user_data);
541                 if (table->changed)
542                         return 1;
543         }
544
545         return 0;
546 }
547
548
549 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
550                                       struct eloop_sock_table *writers,
551                                       struct eloop_sock_table *exceptions,
552                                       struct pollfd **pollfds_map,
553                                       int max_pollfd_map)
554 {
555         if (eloop_sock_table_dispatch_table(readers, pollfds_map,
556                                             max_pollfd_map, POLLIN | POLLERR |
557                                             POLLHUP))
558                 return; /* pollfds may be invalid at this point */
559
560         if (eloop_sock_table_dispatch_table(writers, pollfds_map,
561                                             max_pollfd_map, POLLOUT))
562                 return; /* pollfds may be invalid at this point */
563
564         eloop_sock_table_dispatch_table(exceptions, pollfds_map,
565                                         max_pollfd_map, POLLERR | POLLHUP);
566 }
567
568 #endif /* CONFIG_ELOOP_POLL */
569
570 #ifdef CONFIG_ELOOP_SELECT
571
572 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
573                                      fd_set *fds)
574 {
575         int i;
576
577         FD_ZERO(fds);
578
579         if (table->table == NULL)
580                 return;
581
582         for (i = 0; i < table->count; i++) {
583                 assert(table->table[i].sock >= 0);
584                 FD_SET(table->table[i].sock, fds);
585         }
586 }
587
588
589 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
590                                       fd_set *fds)
591 {
592         int i;
593
594         if (table == NULL || table->table == NULL)
595                 return;
596
597         table->changed = 0;
598         for (i = 0; i < table->count; i++) {
599                 if (FD_ISSET(table->table[i].sock, fds)) {
600                         table->table[i].handler(table->table[i].sock,
601                                                 table->table[i].eloop_data,
602                                                 table->table[i].user_data);
603                         if (table->changed)
604                                 break;
605                 }
606         }
607 }
608
609 #endif /* CONFIG_ELOOP_SELECT */
610
611
612 #ifdef CONFIG_ELOOP_EPOLL
613 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
614 {
615         struct eloop_sock *table;
616         int i;
617
618         for (i = 0; i < nfds; i++) {
619                 table = &eloop.fd_table[events[i].data.fd];
620                 if (table->handler == NULL)
621                         continue;
622                 table->handler(table->sock, table->eloop_data,
623                                table->user_data);
624                 if (eloop.readers.changed ||
625                     eloop.writers.changed ||
626                     eloop.exceptions.changed)
627                         break;
628         }
629 }
630 #endif /* CONFIG_ELOOP_EPOLL */
631
632
633 #ifdef CONFIG_ELOOP_KQUEUE
634
635 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
636 {
637         struct eloop_sock *table;
638         int i;
639
640         for (i = 0; i < nfds; i++) {
641                 table = &eloop.fd_table[events[i].ident];
642                 if (table->handler == NULL)
643                         continue;
644                 table->handler(table->sock, table->eloop_data,
645                                table->user_data);
646                 if (eloop.readers.changed ||
647                     eloop.writers.changed ||
648                     eloop.exceptions.changed)
649                         break;
650         }
651 }
652
653
654 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
655 {
656         int i, r;
657
658         r = 0;
659         for (i = 0; i < table->count && table->table; i++) {
660                 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
661                         r = -1;
662         }
663         return r;
664 }
665
666 #endif /* CONFIG_ELOOP_KQUEUE */
667
668
669 int eloop_sock_requeue(void)
670 {
671         int r = 0;
672
673 #ifdef CONFIG_ELOOP_KQUEUE
674         close(eloop.kqueuefd);
675         eloop.kqueuefd = kqueue();
676         if (eloop.kqueuefd < 0) {
677                 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
678                            __func__, strerror(errno));
679                 return -1;
680         }
681
682         if (eloop_sock_table_requeue(&eloop.readers) < 0)
683                 r = -1;
684         if (eloop_sock_table_requeue(&eloop.writers) < 0)
685                 r = -1;
686         if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
687                 r = -1;
688 #endif /* CONFIG_ELOOP_KQUEUE */
689
690         return r;
691 }
692
693
694 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
695 {
696         if (table) {
697                 int i;
698                 for (i = 0; i < table->count && table->table; i++) {
699                         wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
700                                    "sock=%d eloop_data=%p user_data=%p "
701                                    "handler=%p",
702                                    table->table[i].sock,
703                                    table->table[i].eloop_data,
704                                    table->table[i].user_data,
705                                    table->table[i].handler);
706                         wpa_trace_dump_funcname("eloop unregistered socket "
707                                                 "handler",
708                                                 table->table[i].handler);
709                         wpa_trace_dump("eloop sock", &table->table[i]);
710                 }
711                 os_free(table->table);
712         }
713 }
714
715
716 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
717                              void *eloop_data, void *user_data)
718 {
719         return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
720                                    eloop_data, user_data);
721 }
722
723
724 void eloop_unregister_read_sock(int sock)
725 {
726         eloop_unregister_sock(sock, EVENT_TYPE_READ);
727 }
728
729
730 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
731 {
732         switch (type) {
733         case EVENT_TYPE_READ:
734                 return &eloop.readers;
735         case EVENT_TYPE_WRITE:
736                 return &eloop.writers;
737         case EVENT_TYPE_EXCEPTION:
738                 return &eloop.exceptions;
739         }
740
741         return NULL;
742 }
743
744
745 int eloop_register_sock(int sock, eloop_event_type type,
746                         eloop_sock_handler handler,
747                         void *eloop_data, void *user_data)
748 {
749         struct eloop_sock_table *table;
750
751         assert(sock >= 0);
752         table = eloop_get_sock_table(type);
753         return eloop_sock_table_add_sock(table, sock, handler,
754                                          eloop_data, user_data);
755 }
756
757
758 void eloop_unregister_sock(int sock, eloop_event_type type)
759 {
760         struct eloop_sock_table *table;
761
762         table = eloop_get_sock_table(type);
763         eloop_sock_table_remove_sock(table, sock);
764 }
765
766
767 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
768                            eloop_timeout_handler handler,
769                            void *eloop_data, void *user_data)
770 {
771         struct eloop_timeout *timeout, *tmp;
772         os_time_t now_sec;
773
774         timeout = os_zalloc(sizeof(*timeout));
775         if (timeout == NULL)
776                 return -1;
777         if (os_get_reltime(&timeout->time) < 0) {
778                 os_free(timeout);
779                 return -1;
780         }
781         now_sec = timeout->time.sec;
782         timeout->time.sec += secs;
783         if (timeout->time.sec < now_sec) {
784                 /*
785                  * Integer overflow - assume long enough timeout to be assumed
786                  * to be infinite, i.e., the timeout would never happen.
787                  */
788                 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
789                            "ever happen - ignore it", secs);
790                 os_free(timeout);
791                 return 0;
792         }
793         timeout->time.usec += usecs;
794         while (timeout->time.usec >= 1000000) {
795                 timeout->time.sec++;
796                 timeout->time.usec -= 1000000;
797         }
798         timeout->eloop_data = eloop_data;
799         timeout->user_data = user_data;
800         timeout->handler = handler;
801         wpa_trace_add_ref(timeout, eloop, eloop_data);
802         wpa_trace_add_ref(timeout, user, user_data);
803         wpa_trace_record(timeout);
804
805         /* Maintain timeouts in order of increasing time */
806         dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
807                 if (os_reltime_before(&timeout->time, &tmp->time)) {
808                         dl_list_add(tmp->list.prev, &timeout->list);
809                         return 0;
810                 }
811         }
812         dl_list_add_tail(&eloop.timeout, &timeout->list);
813
814         return 0;
815 }
816
817
818 static void eloop_remove_timeout(struct eloop_timeout *timeout)
819 {
820         dl_list_del(&timeout->list);
821         wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
822         wpa_trace_remove_ref(timeout, user, timeout->user_data);
823         os_free(timeout);
824 }
825
826
827 int eloop_cancel_timeout(eloop_timeout_handler handler,
828                          void *eloop_data, void *user_data)
829 {
830         struct eloop_timeout *timeout, *prev;
831         int removed = 0;
832
833         dl_list_for_each_safe(timeout, prev, &eloop.timeout,
834                               struct eloop_timeout, list) {
835                 if (timeout->handler == handler &&
836                     (timeout->eloop_data == eloop_data ||
837                      eloop_data == ELOOP_ALL_CTX) &&
838                     (timeout->user_data == user_data ||
839                      user_data == ELOOP_ALL_CTX)) {
840                         eloop_remove_timeout(timeout);
841                         removed++;
842                 }
843         }
844
845         return removed;
846 }
847
848
849 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
850                              void *eloop_data, void *user_data,
851                              struct os_reltime *remaining)
852 {
853         struct eloop_timeout *timeout, *prev;
854         int removed = 0;
855         struct os_reltime now;
856
857         os_get_reltime(&now);
858         remaining->sec = remaining->usec = 0;
859
860         dl_list_for_each_safe(timeout, prev, &eloop.timeout,
861                               struct eloop_timeout, list) {
862                 if (timeout->handler == handler &&
863                     (timeout->eloop_data == eloop_data) &&
864                     (timeout->user_data == user_data)) {
865                         removed = 1;
866                         if (os_reltime_before(&now, &timeout->time))
867                                 os_reltime_sub(&timeout->time, &now, remaining);
868                         eloop_remove_timeout(timeout);
869                         break;
870                 }
871         }
872         return removed;
873 }
874
875
876 int eloop_is_timeout_registered(eloop_timeout_handler handler,
877                                 void *eloop_data, void *user_data)
878 {
879         struct eloop_timeout *tmp;
880
881         dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
882                 if (tmp->handler == handler &&
883                     tmp->eloop_data == eloop_data &&
884                     tmp->user_data == user_data)
885                         return 1;
886         }
887
888         return 0;
889 }
890
891
892 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
893                           eloop_timeout_handler handler, void *eloop_data,
894                           void *user_data)
895 {
896         struct os_reltime now, requested, remaining;
897         struct eloop_timeout *tmp;
898
899         dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
900                 if (tmp->handler == handler &&
901                     tmp->eloop_data == eloop_data &&
902                     tmp->user_data == user_data) {
903                         requested.sec = req_secs;
904                         requested.usec = req_usecs;
905                         os_get_reltime(&now);
906                         os_reltime_sub(&tmp->time, &now, &remaining);
907                         if (os_reltime_before(&requested, &remaining)) {
908                                 eloop_cancel_timeout(handler, eloop_data,
909                                                      user_data);
910                                 eloop_register_timeout(requested.sec,
911                                                        requested.usec,
912                                                        handler, eloop_data,
913                                                        user_data);
914                                 return 1;
915                         }
916                         return 0;
917                 }
918         }
919
920         return -1;
921 }
922
923
924 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
925                             eloop_timeout_handler handler, void *eloop_data,
926                             void *user_data)
927 {
928         struct os_reltime now, requested, remaining;
929         struct eloop_timeout *tmp;
930
931         dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
932                 if (tmp->handler == handler &&
933                     tmp->eloop_data == eloop_data &&
934                     tmp->user_data == user_data) {
935                         requested.sec = req_secs;
936                         requested.usec = req_usecs;
937                         os_get_reltime(&now);
938                         os_reltime_sub(&tmp->time, &now, &remaining);
939                         if (os_reltime_before(&remaining, &requested)) {
940                                 eloop_cancel_timeout(handler, eloop_data,
941                                                      user_data);
942                                 eloop_register_timeout(requested.sec,
943                                                        requested.usec,
944                                                        handler, eloop_data,
945                                                        user_data);
946                                 return 1;
947                         }
948                         return 0;
949                 }
950         }
951
952         return -1;
953 }
954
955
956 #ifndef CONFIG_NATIVE_WINDOWS
957 static void eloop_handle_alarm(int sig)
958 {
959         wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
960                    "two seconds. Looks like there\n"
961                    "is a bug that ends up in a busy loop that "
962                    "prevents clean shutdown.\n"
963                    "Killing program forcefully.\n");
964         exit(1);
965 }
966 #endif /* CONFIG_NATIVE_WINDOWS */
967
968
969 static void eloop_handle_signal(int sig)
970 {
971         int i;
972
973 #ifndef CONFIG_NATIVE_WINDOWS
974         if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
975                 /* Use SIGALRM to break out from potential busy loops that
976                  * would not allow the program to be killed. */
977                 eloop.pending_terminate = 1;
978                 signal(SIGALRM, eloop_handle_alarm);
979                 alarm(2);
980         }
981 #endif /* CONFIG_NATIVE_WINDOWS */
982
983         eloop.signaled++;
984         for (i = 0; i < eloop.signal_count; i++) {
985                 if (eloop.signals[i].sig == sig) {
986                         eloop.signals[i].signaled++;
987                         break;
988                 }
989         }
990 }
991
992
993 static void eloop_process_pending_signals(void)
994 {
995         int i;
996
997         if (eloop.signaled == 0)
998                 return;
999         eloop.signaled = 0;
1000
1001         if (eloop.pending_terminate) {
1002 #ifndef CONFIG_NATIVE_WINDOWS
1003                 alarm(0);
1004 #endif /* CONFIG_NATIVE_WINDOWS */
1005                 eloop.pending_terminate = 0;
1006         }
1007
1008         for (i = 0; i < eloop.signal_count; i++) {
1009                 if (eloop.signals[i].signaled) {
1010                         eloop.signals[i].signaled = 0;
1011                         eloop.signals[i].handler(eloop.signals[i].sig,
1012                                                  eloop.signals[i].user_data);
1013                 }
1014         }
1015 }
1016
1017
1018 int eloop_register_signal(int sig, eloop_signal_handler handler,
1019                           void *user_data)
1020 {
1021         struct eloop_signal *tmp;
1022
1023         tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1024                                sizeof(struct eloop_signal));
1025         if (tmp == NULL)
1026                 return -1;
1027
1028         tmp[eloop.signal_count].sig = sig;
1029         tmp[eloop.signal_count].user_data = user_data;
1030         tmp[eloop.signal_count].handler = handler;
1031         tmp[eloop.signal_count].signaled = 0;
1032         eloop.signal_count++;
1033         eloop.signals = tmp;
1034         signal(sig, eloop_handle_signal);
1035
1036         return 0;
1037 }
1038
1039
1040 int eloop_register_signal_terminate(eloop_signal_handler handler,
1041                                     void *user_data)
1042 {
1043         int ret = eloop_register_signal(SIGINT, handler, user_data);
1044         if (ret == 0)
1045                 ret = eloop_register_signal(SIGTERM, handler, user_data);
1046         return ret;
1047 }
1048
1049
1050 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1051                                    void *user_data)
1052 {
1053 #ifdef CONFIG_NATIVE_WINDOWS
1054         return 0;
1055 #else /* CONFIG_NATIVE_WINDOWS */
1056         return eloop_register_signal(SIGHUP, handler, user_data);
1057 #endif /* CONFIG_NATIVE_WINDOWS */
1058 }
1059
1060
1061 void eloop_run(void)
1062 {
1063 #ifdef CONFIG_ELOOP_POLL
1064         int num_poll_fds;
1065         int timeout_ms = 0;
1066 #endif /* CONFIG_ELOOP_POLL */
1067 #ifdef CONFIG_ELOOP_SELECT
1068         fd_set *rfds, *wfds, *efds;
1069         struct timeval _tv;
1070 #endif /* CONFIG_ELOOP_SELECT */
1071 #ifdef CONFIG_ELOOP_EPOLL
1072         int timeout_ms = -1;
1073 #endif /* CONFIG_ELOOP_EPOLL */
1074 #ifdef CONFIG_ELOOP_KQUEUE
1075         struct timespec ts;
1076 #endif /* CONFIG_ELOOP_KQUEUE */
1077         int res;
1078         struct os_reltime tv, now;
1079
1080 #ifdef CONFIG_ELOOP_SELECT
1081         rfds = os_malloc(sizeof(*rfds));
1082         wfds = os_malloc(sizeof(*wfds));
1083         efds = os_malloc(sizeof(*efds));
1084         if (rfds == NULL || wfds == NULL || efds == NULL)
1085                 goto out;
1086 #endif /* CONFIG_ELOOP_SELECT */
1087
1088         while (!eloop.terminate &&
1089                (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1090                 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1091                 struct eloop_timeout *timeout;
1092
1093                 if (eloop.pending_terminate) {
1094                         /*
1095                          * This may happen in some corner cases where a signal
1096                          * is received during a blocking operation. We need to
1097                          * process the pending signals and exit if requested to
1098                          * avoid hitting the SIGALRM limit if the blocking
1099                          * operation took more than two seconds.
1100                          */
1101                         eloop_process_pending_signals();
1102                         if (eloop.terminate)
1103                                 break;
1104                 }
1105
1106                 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1107                                         list);
1108                 if (timeout) {
1109                         os_get_reltime(&now);
1110                         if (os_reltime_before(&now, &timeout->time))
1111                                 os_reltime_sub(&timeout->time, &now, &tv);
1112                         else
1113                                 tv.sec = tv.usec = 0;
1114 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1115                         timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1116 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1117 #ifdef CONFIG_ELOOP_SELECT
1118                         _tv.tv_sec = tv.sec;
1119                         _tv.tv_usec = tv.usec;
1120 #endif /* CONFIG_ELOOP_SELECT */
1121 #ifdef CONFIG_ELOOP_KQUEUE
1122                         ts.tv_sec = tv.sec;
1123                         ts.tv_nsec = tv.usec * 1000L;
1124 #endif /* CONFIG_ELOOP_KQUEUE */
1125                 }
1126
1127 #ifdef CONFIG_ELOOP_POLL
1128                 num_poll_fds = eloop_sock_table_set_fds(
1129                         &eloop.readers, &eloop.writers, &eloop.exceptions,
1130                         eloop.pollfds, eloop.pollfds_map,
1131                         eloop.max_pollfd_map);
1132                 res = poll(eloop.pollfds, num_poll_fds,
1133                            timeout ? timeout_ms : -1);
1134 #endif /* CONFIG_ELOOP_POLL */
1135 #ifdef CONFIG_ELOOP_SELECT
1136                 eloop_sock_table_set_fds(&eloop.readers, rfds);
1137                 eloop_sock_table_set_fds(&eloop.writers, wfds);
1138                 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1139                 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1140                              timeout ? &_tv : NULL);
1141 #endif /* CONFIG_ELOOP_SELECT */
1142 #ifdef CONFIG_ELOOP_EPOLL
1143                 if (eloop.count == 0) {
1144                         res = 0;
1145                 } else {
1146                         res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1147                                          eloop.count, timeout_ms);
1148                 }
1149 #endif /* CONFIG_ELOOP_EPOLL */
1150 #ifdef CONFIG_ELOOP_KQUEUE
1151                 if (eloop.count == 0) {
1152                         res = 0;
1153                 } else {
1154                         res = kevent(eloop.kqueuefd, NULL, 0,
1155                                      eloop.kqueue_events, eloop.kqueue_nevents,
1156                                      timeout ? &ts : NULL);
1157                 }
1158 #endif /* CONFIG_ELOOP_KQUEUE */
1159                 if (res < 0 && errno != EINTR && errno != 0) {
1160                         wpa_printf(MSG_ERROR, "eloop: %s: %s",
1161 #ifdef CONFIG_ELOOP_POLL
1162                                    "poll"
1163 #endif /* CONFIG_ELOOP_POLL */
1164 #ifdef CONFIG_ELOOP_SELECT
1165                                    "select"
1166 #endif /* CONFIG_ELOOP_SELECT */
1167 #ifdef CONFIG_ELOOP_EPOLL
1168                                    "epoll"
1169 #endif /* CONFIG_ELOOP_EPOLL */
1170 #ifdef CONFIG_ELOOP_KQUEUE
1171                                    "kqueue"
1172 #endif /* CONFIG_ELOOP_EKQUEUE */
1173
1174                                    , strerror(errno));
1175                         goto out;
1176                 }
1177
1178                 eloop.readers.changed = 0;
1179                 eloop.writers.changed = 0;
1180                 eloop.exceptions.changed = 0;
1181
1182                 eloop_process_pending_signals();
1183
1184
1185                 /* check if some registered timeouts have occurred */
1186                 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1187                                         list);
1188                 if (timeout) {
1189                         os_get_reltime(&now);
1190                         if (!os_reltime_before(&now, &timeout->time)) {
1191                                 void *eloop_data = timeout->eloop_data;
1192                                 void *user_data = timeout->user_data;
1193                                 eloop_timeout_handler handler =
1194                                         timeout->handler;
1195                                 eloop_remove_timeout(timeout);
1196                                 handler(eloop_data, user_data);
1197                         }
1198
1199                 }
1200
1201                 if (res <= 0)
1202                         continue;
1203
1204                 if (eloop.readers.changed ||
1205                     eloop.writers.changed ||
1206                     eloop.exceptions.changed) {
1207                          /*
1208                           * Sockets may have been closed and reopened with the
1209                           * same FD in the signal or timeout handlers, so we
1210                           * must skip the previous results and check again
1211                           * whether any of the currently registered sockets have
1212                           * events.
1213                           */
1214                         continue;
1215                 }
1216
1217 #ifdef CONFIG_ELOOP_POLL
1218                 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1219                                           &eloop.exceptions, eloop.pollfds_map,
1220                                           eloop.max_pollfd_map);
1221 #endif /* CONFIG_ELOOP_POLL */
1222 #ifdef CONFIG_ELOOP_SELECT
1223                 eloop_sock_table_dispatch(&eloop.readers, rfds);
1224                 eloop_sock_table_dispatch(&eloop.writers, wfds);
1225                 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1226 #endif /* CONFIG_ELOOP_SELECT */
1227 #ifdef CONFIG_ELOOP_EPOLL
1228                 eloop_sock_table_dispatch(eloop.epoll_events, res);
1229 #endif /* CONFIG_ELOOP_EPOLL */
1230 #ifdef CONFIG_ELOOP_KQUEUE
1231                 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1232 #endif /* CONFIG_ELOOP_KQUEUE */
1233         }
1234
1235         eloop.terminate = 0;
1236 out:
1237 #ifdef CONFIG_ELOOP_SELECT
1238         os_free(rfds);
1239         os_free(wfds);
1240         os_free(efds);
1241 #endif /* CONFIG_ELOOP_SELECT */
1242         return;
1243 }
1244
1245
1246 void eloop_terminate(void)
1247 {
1248         eloop.terminate = 1;
1249 }
1250
1251
1252 void eloop_destroy(void)
1253 {
1254         struct eloop_timeout *timeout, *prev;
1255         struct os_reltime now;
1256
1257         os_get_reltime(&now);
1258         dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1259                               struct eloop_timeout, list) {
1260                 int sec, usec;
1261                 sec = timeout->time.sec - now.sec;
1262                 usec = timeout->time.usec - now.usec;
1263                 if (timeout->time.usec < now.usec) {
1264                         sec--;
1265                         usec += 1000000;
1266                 }
1267                 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1268                            "eloop_data=%p user_data=%p handler=%p",
1269                            sec, usec, timeout->eloop_data, timeout->user_data,
1270                            timeout->handler);
1271                 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1272                                         timeout->handler);
1273                 wpa_trace_dump("eloop timeout", timeout);
1274                 eloop_remove_timeout(timeout);
1275         }
1276         eloop_sock_table_destroy(&eloop.readers);
1277         eloop_sock_table_destroy(&eloop.writers);
1278         eloop_sock_table_destroy(&eloop.exceptions);
1279         os_free(eloop.signals);
1280
1281 #ifdef CONFIG_ELOOP_POLL
1282         os_free(eloop.pollfds);
1283         os_free(eloop.pollfds_map);
1284 #endif /* CONFIG_ELOOP_POLL */
1285 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1286         os_free(eloop.fd_table);
1287 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1288 #ifdef CONFIG_ELOOP_EPOLL
1289         os_free(eloop.epoll_events);
1290         close(eloop.epollfd);
1291 #endif /* CONFIG_ELOOP_EPOLL */
1292 #ifdef CONFIG_ELOOP_KQUEUE
1293         os_free(eloop.kqueue_events);
1294         close(eloop.kqueuefd);
1295 #endif /* CONFIG_ELOOP_KQUEUE */
1296 }
1297
1298
1299 int eloop_terminated(void)
1300 {
1301         return eloop.terminate || eloop.pending_terminate;
1302 }
1303
1304
1305 void eloop_wait_for_read_sock(int sock)
1306 {
1307 #ifdef CONFIG_ELOOP_POLL
1308         struct pollfd pfd;
1309
1310         if (sock < 0)
1311                 return;
1312
1313         os_memset(&pfd, 0, sizeof(pfd));
1314         pfd.fd = sock;
1315         pfd.events = POLLIN;
1316
1317         poll(&pfd, 1, -1);
1318 #endif /* CONFIG_ELOOP_POLL */
1319 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1320         /*
1321          * We can use epoll() here. But epoll() requres 4 system calls.
1322          * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1323          * epoll fd. So select() is better for performance here.
1324          */
1325         fd_set rfds;
1326
1327         if (sock < 0)
1328                 return;
1329
1330         FD_ZERO(&rfds);
1331         FD_SET(sock, &rfds);
1332         select(sock + 1, &rfds, NULL, NULL, NULL);
1333 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1334 #ifdef CONFIG_ELOOP_KQUEUE
1335         int kfd;
1336         struct kevent ke1, ke2;
1337
1338         kfd = kqueue();
1339         if (kfd == -1)
1340                 return;
1341         EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1342         kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1343         close(kfd);
1344 #endif /* CONFIG_ELOOP_KQUEUE */
1345 }
1346
1347 #ifdef CONFIG_ELOOP_SELECT
1348 #undef CONFIG_ELOOP_SELECT
1349 #endif /* CONFIG_ELOOP_SELECT */