2 * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
30 #include <sys/types.h>
34 /* Minimum required for InitializeCriticalSectionAndSpinCount */
35 #define _WIN32_WINNT 0x0403
43 #ifdef EVENT__HAVE_SYS_SOCKET_H
44 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_FCNTL_H
49 #ifdef EVENT__HAVE_UNISTD_H
53 #include "event2/listener.h"
54 #include "event2/util.h"
55 #include "event2/event.h"
56 #include "event2/event_struct.h"
57 #include "mm-internal.h"
58 #include "util-internal.h"
59 #include "log-internal.h"
60 #include "evthread-internal.h"
62 #include "iocp-internal.h"
63 #include "defer-internal.h"
64 #include "event-internal.h"
67 struct evconnlistener_ops {
68 int (*enable)(struct evconnlistener *);
69 int (*disable)(struct evconnlistener *);
70 void (*destroy)(struct evconnlistener *);
71 void (*shutdown)(struct evconnlistener *);
72 evutil_socket_t (*getfd)(struct evconnlistener *);
73 struct event_base *(*getbase)(struct evconnlistener *);
76 struct evconnlistener {
77 const struct evconnlistener_ops *ops;
80 evconnlistener_errorcb errorcb;
88 struct evconnlistener_event {
89 struct evconnlistener base;
90 struct event listener;
94 struct evconnlistener_iocp {
95 struct evconnlistener base;
97 struct event_base *event_base;
98 struct event_iocp_port *port;
100 unsigned shutting_down : 1;
101 unsigned event_added : 1;
102 struct accepting_socket **accepting;
106 #define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0)
107 #define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0)
109 struct evconnlistener *
110 evconnlistener_new_async(struct event_base *base,
111 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
112 evutil_socket_t fd); /* XXXX export this? */
114 static int event_listener_enable(struct evconnlistener *);
115 static int event_listener_disable(struct evconnlistener *);
116 static void event_listener_destroy(struct evconnlistener *);
117 static evutil_socket_t event_listener_getfd(struct evconnlistener *);
118 static struct event_base *event_listener_getbase(struct evconnlistener *);
122 listener_incref_and_lock(struct evconnlistener *listener)
130 listener_decref_and_unlock(struct evconnlistener *listener)
132 int refcnt = --listener->refcnt;
134 listener->ops->destroy(listener);
136 EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
145 static const struct evconnlistener_ops evconnlistener_event_ops = {
146 event_listener_enable,
147 event_listener_disable,
148 event_listener_destroy,
150 event_listener_getfd,
151 event_listener_getbase
154 static void listener_read_cb(evutil_socket_t, short, void *);
156 struct evconnlistener *
157 evconnlistener_new(struct event_base *base,
158 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
161 struct evconnlistener_event *lev;
164 if (base && event_base_get_iocp_(base)) {
165 const struct win32_extension_fns *ext =
166 event_get_win32_extension_fns_();
167 if (ext->AcceptEx && ext->GetAcceptExSockaddrs)
168 return evconnlistener_new_async(base, cb, ptr, flags,
174 if (listen(fd, backlog) < 0)
176 } else if (backlog < 0) {
177 if (listen(fd, 128) < 0)
181 lev = mm_calloc(1, sizeof(struct evconnlistener_event));
185 lev->base.ops = &evconnlistener_event_ops;
187 lev->base.user_data = ptr;
188 lev->base.flags = flags;
189 lev->base.refcnt = 1;
191 lev->base.accept4_flags = 0;
192 if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
193 lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK;
194 if (flags & LEV_OPT_CLOSE_ON_EXEC)
195 lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC;
197 if (flags & LEV_OPT_THREADSAFE) {
198 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
201 event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST,
202 listener_read_cb, lev);
204 if (!(flags & LEV_OPT_DISABLED))
205 evconnlistener_enable(&lev->base);
210 struct evconnlistener *
211 evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb,
212 void *ptr, unsigned flags, int backlog, const struct sockaddr *sa,
215 struct evconnlistener *listener;
218 int family = sa ? sa->sa_family : AF_UNSPEC;
219 int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK;
224 if (flags & LEV_OPT_CLOSE_ON_EXEC)
225 socktype |= EVUTIL_SOCK_CLOEXEC;
227 fd = evutil_socket_(family, socktype, 0);
231 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0)
234 if (flags & LEV_OPT_REUSEABLE) {
235 if (evutil_make_listen_socket_reuseable(fd) < 0)
239 if (flags & LEV_OPT_REUSEABLE_PORT) {
240 if (evutil_make_listen_socket_reuseable_port(fd) < 0)
244 if (flags & LEV_OPT_DEFERRED_ACCEPT) {
245 if (evutil_make_tcp_listen_socket_deferred(fd) < 0)
249 if (flags & LEV_OPT_BIND_IPV6ONLY) {
250 if (evutil_make_listen_socket_ipv6only(fd) < 0)
255 if (bind(fd, sa, socklen)<0)
259 listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd);
265 evutil_closesocket(fd);
270 evconnlistener_free(struct evconnlistener *lev)
275 if (lev->ops->shutdown)
276 lev->ops->shutdown(lev);
277 listener_decref_and_unlock(lev);
281 event_listener_destroy(struct evconnlistener *lev)
283 struct evconnlistener_event *lev_e =
284 EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
286 event_del(&lev_e->listener);
287 if (lev->flags & LEV_OPT_CLOSE_ON_FREE)
288 evutil_closesocket(event_get_fd(&lev_e->listener));
289 event_debug_unassign(&lev_e->listener);
293 evconnlistener_enable(struct evconnlistener *lev)
299 r = lev->ops->enable(lev);
307 evconnlistener_disable(struct evconnlistener *lev)
312 r = lev->ops->disable(lev);
318 event_listener_enable(struct evconnlistener *lev)
320 struct evconnlistener_event *lev_e =
321 EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
322 return event_add(&lev_e->listener, NULL);
326 event_listener_disable(struct evconnlistener *lev)
328 struct evconnlistener_event *lev_e =
329 EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
330 return event_del(&lev_e->listener);
334 evconnlistener_get_fd(struct evconnlistener *lev)
338 fd = lev->ops->getfd(lev);
343 static evutil_socket_t
344 event_listener_getfd(struct evconnlistener *lev)
346 struct evconnlistener_event *lev_e =
347 EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
348 return event_get_fd(&lev_e->listener);
352 evconnlistener_get_base(struct evconnlistener *lev)
354 struct event_base *base;
356 base = lev->ops->getbase(lev);
361 static struct event_base *
362 event_listener_getbase(struct evconnlistener *lev)
364 struct evconnlistener_event *lev_e =
365 EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
366 return event_get_base(&lev_e->listener);
370 evconnlistener_set_cb(struct evconnlistener *lev,
371 evconnlistener_cb cb, void *arg)
375 if (lev->enabled && !lev->cb)
378 lev->user_data = arg;
380 evconnlistener_enable(lev);
385 evconnlistener_set_error_cb(struct evconnlistener *lev,
386 evconnlistener_errorcb errorcb)
389 lev->errorcb = errorcb;
394 listener_read_cb(evutil_socket_t fd, short what, void *p)
396 struct evconnlistener *lev = p;
398 evconnlistener_cb cb;
399 evconnlistener_errorcb errorcb;
403 struct sockaddr_storage ss;
404 ev_socklen_t socklen = sizeof(ss);
405 evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags);
409 /* This can happen with some older linux kernels in
410 * response to nmap. */
411 evutil_closesocket(new_fd);
415 if (lev->cb == NULL) {
416 evutil_closesocket(new_fd);
422 user_data = lev->user_data;
424 cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
427 if (lev->refcnt == 1) {
428 int freed = listener_decref_and_unlock(lev);
429 EVUTIL_ASSERT(freed);
434 /* the callback could have disabled the listener */
439 err = evutil_socket_geterror(fd);
440 if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
444 if (lev->errorcb != NULL) {
446 errorcb = lev->errorcb;
447 user_data = lev->user_data;
449 errorcb(lev, user_data);
451 listener_decref_and_unlock(lev);
453 event_sock_warn(fd, "Error from accept() call");
459 struct accepting_socket {
460 CRITICAL_SECTION lock;
461 struct event_overlapped overlapped;
464 struct event_callback deferred;
465 struct evconnlistener_iocp *lev;
468 unsigned free_on_cb:1;
472 static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key,
473 ev_ssize_t n, int ok);
474 static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg);
477 iocp_listener_event_add(struct evconnlistener_iocp *lev)
479 if (lev->event_added)
482 lev->event_added = 1;
483 event_base_add_virtual_(lev->event_base);
487 iocp_listener_event_del(struct evconnlistener_iocp *lev)
489 if (!lev->event_added)
492 lev->event_added = 0;
493 event_base_del_virtual_(lev->event_base);
496 static struct accepting_socket *
497 new_accepting_socket(struct evconnlistener_iocp *lev, int family)
499 struct accepting_socket *res;
503 if (family == AF_INET)
504 addrlen = sizeof(struct sockaddr_in);
505 else if (family == AF_INET6)
506 addrlen = sizeof(struct sockaddr_in6);
509 buflen = (addrlen+16)*2;
511 res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen);
515 event_overlapped_init_(&res->overlapped, accepted_socket_cb);
516 res->s = EVUTIL_INVALID_SOCKET;
518 res->buflen = buflen;
519 res->family = family;
521 event_deferred_cb_init_(&res->deferred,
522 event_base_get_npriorities(lev->event_base) / 2,
523 accepted_socket_invoke_user_cb, res);
525 InitializeCriticalSectionAndSpinCount(&res->lock, 1000);
531 free_and_unlock_accepting_socket(struct accepting_socket *as)
534 if (as->s != EVUTIL_INVALID_SOCKET)
537 LeaveCriticalSection(&as->lock);
538 DeleteCriticalSection(&as->lock);
543 start_accepting(struct accepting_socket *as)
546 const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
548 SOCKET s = socket(as->family, SOCK_STREAM, 0);
551 if (!as->lev->base.enabled)
554 if (s == EVUTIL_INVALID_SOCKET) {
555 error = WSAGetLastError();
559 /* XXXX It turns out we need to do this again later. Does this call
560 * have any effect? */
561 setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
562 (char *)&as->lev->fd, sizeof(&as->lev->fd));
564 if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
565 evutil_make_socket_nonblocking(s);
567 if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) {
574 if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0,
575 as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped))
577 /* Immediate success! */
578 accepted_socket_cb(&as->overlapped, 1, 0, 1);
580 error = WSAGetLastError();
581 if (error != ERROR_IO_PENDING) {
590 event_deferred_cb_schedule_(
597 stop_accepting(struct accepting_socket *as)
601 as->s = EVUTIL_INVALID_SOCKET;
606 accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg)
608 struct accepting_socket *as = arg;
610 struct sockaddr *sa_local=NULL, *sa_remote=NULL;
611 int socklen_local=0, socklen_remote=0;
612 const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
613 struct evconnlistener *lev = &as->lev->base;
614 evutil_socket_t sock=-1;
616 evconnlistener_cb cb=NULL;
617 evconnlistener_errorcb errorcb=NULL;
620 EVUTIL_ASSERT(ext->GetAcceptExSockaddrs);
623 EnterCriticalSection(&as->lock);
624 if (as->free_on_cb) {
625 free_and_unlock_accepting_socket(as);
626 listener_decref_and_unlock(lev);
635 errorcb = lev->errorcb;
637 ext->GetAcceptExSockaddrs(
638 as->addrbuf, 0, as->buflen/2, as->buflen/2,
639 &sa_local, &socklen_local, &sa_remote,
643 as->s = EVUTIL_INVALID_SOCKET;
645 /* We need to call this so getsockname, getpeername, and
646 * shutdown work correctly on the accepted socket. */
647 /* XXXX handle error? */
648 setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
649 (char *)&as->lev->fd, sizeof(&as->lev->fd));
651 data = lev->user_data;
653 LeaveCriticalSection(&as->lock);
657 WSASetLastError(error);
660 cb(lev, sock, sa_remote, socklen_remote, data);
664 if (listener_decref_and_unlock(lev))
667 EnterCriticalSection(&as->lock);
669 LeaveCriticalSection(&as->lock);
673 accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok)
675 struct accepting_socket *as =
676 EVUTIL_UPCAST(o, struct accepting_socket, overlapped);
678 LOCK(&as->lev->base);
679 EnterCriticalSection(&as->lock);
681 /* XXXX Don't do this if some EV_MT flag is set. */
682 event_deferred_cb_schedule_(
685 LeaveCriticalSection(&as->lock);
686 } else if (as->free_on_cb) {
687 struct evconnlistener *lev = &as->lev->base;
688 free_and_unlock_accepting_socket(as);
689 listener_decref_and_unlock(lev);
691 } else if (as->s == EVUTIL_INVALID_SOCKET) {
692 /* This is okay; we were disabled by iocp_listener_disable. */
693 LeaveCriticalSection(&as->lock);
695 /* Some error on accept that we couldn't actually handle. */
697 DWORD transfer = 0, flags=0;
698 event_sock_warn(as->s, "Unexpected error on AcceptEx");
699 ok = WSAGetOverlappedResult(as->s, &o->overlapped,
700 &transfer, FALSE, &flags);
702 /* well, that was confusing! */
705 as->error = WSAGetLastError();
707 event_deferred_cb_schedule_(
710 LeaveCriticalSection(&as->lock);
712 UNLOCK(&as->lev->base);
716 iocp_listener_enable(struct evconnlistener *lev)
719 struct evconnlistener_iocp *lev_iocp =
720 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
723 iocp_listener_event_add(lev_iocp);
724 for (i = 0; i < lev_iocp->n_accepting; ++i) {
725 struct accepting_socket *as = lev_iocp->accepting[i];
728 EnterCriticalSection(&as->lock);
729 if (!as->free_on_cb && as->s == EVUTIL_INVALID_SOCKET)
731 LeaveCriticalSection(&as->lock);
738 iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown)
741 struct evconnlistener_iocp *lev_iocp =
742 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
745 iocp_listener_event_del(lev_iocp);
746 for (i = 0; i < lev_iocp->n_accepting; ++i) {
747 struct accepting_socket *as = lev_iocp->accepting[i];
750 EnterCriticalSection(&as->lock);
751 if (!as->free_on_cb && as->s != EVUTIL_INVALID_SOCKET) {
756 LeaveCriticalSection(&as->lock);
759 if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE)
760 evutil_closesocket(lev_iocp->fd);
767 iocp_listener_disable(struct evconnlistener *lev)
769 return iocp_listener_disable_impl(lev,0);
773 iocp_listener_destroy(struct evconnlistener *lev)
775 struct evconnlistener_iocp *lev_iocp =
776 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
778 if (! lev_iocp->shutting_down) {
779 lev_iocp->shutting_down = 1;
780 iocp_listener_disable_impl(lev,1);
785 static evutil_socket_t
786 iocp_listener_getfd(struct evconnlistener *lev)
788 struct evconnlistener_iocp *lev_iocp =
789 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
792 static struct event_base *
793 iocp_listener_getbase(struct evconnlistener *lev)
795 struct evconnlistener_iocp *lev_iocp =
796 EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
797 return lev_iocp->event_base;
800 static const struct evconnlistener_ops evconnlistener_iocp_ops = {
801 iocp_listener_enable,
802 iocp_listener_disable,
803 iocp_listener_destroy,
804 iocp_listener_destroy, /* shutdown */
806 iocp_listener_getbase
809 /* XXX define some way to override this. */
810 #define N_SOCKETS_PER_LISTENER 4
812 struct evconnlistener *
813 evconnlistener_new_async(struct event_base *base,
814 evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
817 struct sockaddr_storage ss;
818 int socklen = sizeof(ss);
819 struct evconnlistener_iocp *lev;
822 flags |= LEV_OPT_THREADSAFE;
824 if (!base || !event_base_get_iocp_(base))
827 /* XXXX duplicate code */
829 if (listen(fd, backlog) < 0)
831 } else if (backlog < 0) {
832 if (listen(fd, 128) < 0)
835 if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
836 event_sock_warn(fd, "getsockname");
839 lev = mm_calloc(1, sizeof(struct evconnlistener_iocp));
841 event_warn("calloc");
844 lev->base.ops = &evconnlistener_iocp_ops;
846 lev->base.user_data = ptr;
847 lev->base.flags = flags;
848 lev->base.refcnt = 1;
849 lev->base.enabled = 1;
851 lev->port = event_base_get_iocp_(base);
853 lev->event_base = base;
856 if (event_iocp_port_associate_(lev->port, fd, 1) < 0)
859 EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
861 lev->n_accepting = N_SOCKETS_PER_LISTENER;
862 lev->accepting = mm_calloc(lev->n_accepting,
863 sizeof(struct accepting_socket *));
864 if (!lev->accepting) {
865 event_warn("calloc");
866 goto err_delete_lock;
868 for (i = 0; i < lev->n_accepting; ++i) {
869 lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
870 if (!lev->accepting[i]) {
871 event_warnx("Couldn't create accepting socket");
872 goto err_free_accepting;
874 if (cb && start_accepting(lev->accepting[i]) < 0) {
875 event_warnx("Couldn't start accepting on socket");
876 EnterCriticalSection(&lev->accepting[i]->lock);
877 free_and_unlock_accepting_socket(lev->accepting[i]);
878 goto err_free_accepting;
883 iocp_listener_event_add(lev);
888 mm_free(lev->accepting);
889 /* XXXX free the other elements. */
891 EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
895 /* Don't close the fd, it is caller's responsibility. */