2 * util/netevent.c - event notification
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains event notification functions.
42 #include "util/netevent.h"
43 #include "util/ub_event.h"
45 #include "util/net_help.h"
46 #include "util/fptr_wlist.h"
47 #include "sldns/pkthdr.h"
48 #include "sldns/sbuffer.h"
49 #include "dnstap/dnstap.h"
50 #include "dnscrypt/dnscrypt.h"
51 #ifdef HAVE_OPENSSL_SSL_H
52 #include <openssl/ssl.h>
54 #ifdef HAVE_OPENSSL_ERR_H
55 #include <openssl/err.h>
58 /* -------- Start of local definitions -------- */
59 /** if CMSG_ALIGN is not defined on this platform, a workaround */
62 # define CMSG_ALIGN(n) __CMSG_ALIGN(n)
63 # elif defined(CMSG_DATA_ALIGN)
64 # define CMSG_ALIGN _CMSG_DATA_ALIGN
66 # define CMSG_ALIGN(len) (((len)+sizeof(long)-1) & ~(sizeof(long)-1))
70 /** if CMSG_LEN is not defined on this platform, a workaround */
72 # define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr))+(len))
75 /** if CMSG_SPACE is not defined on this platform, a workaround */
77 # ifdef _CMSG_HDR_ALIGN
78 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+_CMSG_HDR_ALIGN(sizeof(struct cmsghdr)))
80 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+CMSG_ALIGN(sizeof(struct cmsghdr)))
84 /** The TCP reading or writing query timeout in milliseconds */
85 #define TCP_QUERY_TIMEOUT 120000
86 /** The TCP timeout in msec for fast queries, above half are used */
87 #define TCP_QUERY_TIMEOUT_FAST 200
89 #ifndef NONBLOCKING_IS_BROKEN
90 /** number of UDP reads to perform per read indication from select */
91 #define NUM_UDP_PER_SELECT 100
93 #define NUM_UDP_PER_SELECT 1
97 * The internal event structure for keeping ub_event info for the event.
98 * Possibly other structures (list, tree) this is part of.
100 struct internal_event {
102 struct comm_base* base;
103 /** ub_event event type */
108 * Internal base structure, so that every thread has its own events.
110 struct internal_base {
111 /** ub_event event_base type. */
112 struct ub_event_base* base;
113 /** seconds time pointer points here */
115 /** timeval with current time */
117 /** the event used for slow_accept timeouts */
118 struct ub_event* slow_accept;
119 /** true if slow_accept is enabled */
120 int slow_accept_enabled;
124 * Internal timer structure, to store timer event in.
126 struct internal_timer {
127 /** the super struct from which derived */
128 struct comm_timer super;
130 struct comm_base* base;
131 /** ub_event event type */
133 /** is timer enabled */
138 * Internal signal structure, to store signal event in.
140 struct internal_signal {
141 /** ub_event event type */
143 /** next in signal list */
144 struct internal_signal* next;
147 /** create a tcp handler with a parent */
148 static struct comm_point* comm_point_create_tcp_handler(
149 struct comm_base *base, struct comm_point* parent, size_t bufsize,
150 comm_point_callback_type* callback, void* callback_arg);
152 /* -------- End of local definitions -------- */
155 comm_base_create(int sigs)
157 struct comm_base* b = (struct comm_base*)calloc(1,
158 sizeof(struct comm_base));
159 const char *evnm="event", *evsys="", *evmethod="";
163 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
168 b->eb->base = ub_default_event_base(sigs, &b->eb->secs, &b->eb->now);
175 ub_get_event_sys(b->eb->base, &evnm, &evsys, &evmethod);
176 verbose(VERB_ALGO, "%s %s user %s method.", evnm, evsys, evmethod);
181 comm_base_create_event(struct ub_event_base* base)
183 struct comm_base* b = (struct comm_base*)calloc(1,
184 sizeof(struct comm_base));
187 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
198 comm_base_delete(struct comm_base* b)
202 if(b->eb->slow_accept_enabled) {
203 if(ub_event_del(b->eb->slow_accept) != 0) {
204 log_err("could not event_del slow_accept");
206 ub_event_free(b->eb->slow_accept);
208 ub_event_base_free(b->eb->base);
215 comm_base_delete_no_base(struct comm_base* b)
219 if(b->eb->slow_accept_enabled) {
220 if(ub_event_del(b->eb->slow_accept) != 0) {
221 log_err("could not event_del slow_accept");
223 ub_event_free(b->eb->slow_accept);
231 comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
238 comm_base_dispatch(struct comm_base* b)
241 retval = ub_event_base_dispatch(b->eb->base);
243 fatal_exit("event_dispatch returned error %d, "
244 "errno is %s", retval, strerror(errno));
248 void comm_base_exit(struct comm_base* b)
250 if(ub_event_base_loopexit(b->eb->base) != 0) {
251 log_err("Could not loopexit");
255 void comm_base_set_slow_accept_handlers(struct comm_base* b,
256 void (*stop_acc)(void*), void (*start_acc)(void*), void* arg)
258 b->stop_accept = stop_acc;
259 b->start_accept = start_acc;
263 struct ub_event_base* comm_base_internal(struct comm_base* b)
268 /** see if errno for udp has to be logged or not uses globals */
270 udp_send_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
272 /* do not log transient errors (unless high verbosity) */
273 #if defined(ENETUNREACH) || defined(EHOSTDOWN) || defined(EHOSTUNREACH) || defined(ENETDOWN)
287 if(verbosity < VERB_ALGO)
293 /* permission denied is gotten for every send if the
294 * network is disconnected (on some OS), squelch it */
295 if( ((errno == EPERM)
296 # ifdef EADDRNOTAVAIL
297 /* 'Cannot assign requested address' also when disconnected */
298 || (errno == EADDRNOTAVAIL)
300 ) && verbosity < VERB_DETAIL)
302 /* squelch errors where people deploy AAAA ::ffff:bla for
303 * authority servers, which we try for intranets. */
304 if(errno == EINVAL && addr_is_ip4mapped(
305 (struct sockaddr_storage*)addr, addrlen) &&
306 verbosity < VERB_DETAIL)
308 /* SO_BROADCAST sockopt can give access to 255.255.255.255,
309 * but a dns cache does not need it. */
310 if(errno == EACCES && addr_is_broadcast(
311 (struct sockaddr_storage*)addr, addrlen) &&
312 verbosity < VERB_DETAIL)
317 int tcp_connect_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
319 return udp_send_errno_needs_log(addr, addrlen);
322 /* send a UDP reply */
324 comm_point_send_udp_msg(struct comm_point *c, sldns_buffer* packet,
325 struct sockaddr* addr, socklen_t addrlen)
328 log_assert(c->fd != -1);
330 if(sldns_buffer_remaining(packet) == 0)
331 log_err("error: send empty UDP packet");
333 log_assert(addr && addrlen > 0);
334 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
335 sldns_buffer_remaining(packet), 0,
338 /* try again and block, waiting for IO to complete,
339 * we want to send the answer, and we will wait for
340 * the ethernet interface buffer to have space. */
342 if(errno == EAGAIN ||
344 errno == EWOULDBLOCK ||
348 if(WSAGetLastError() == WSAEINPROGRESS ||
349 WSAGetLastError() == WSAENOBUFS ||
350 WSAGetLastError() == WSAEWOULDBLOCK) {
354 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
355 sldns_buffer_remaining(packet), 0,
358 fd_set_nonblock(c->fd);
363 if(!udp_send_errno_needs_log(addr, addrlen))
366 verbose(VERB_OPS, "sendto failed: %s", strerror(errno));
368 verbose(VERB_OPS, "sendto failed: %s",
369 wsa_strerror(WSAGetLastError()));
371 log_addr(VERB_OPS, "remote address is",
372 (struct sockaddr_storage*)addr, addrlen);
374 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
375 log_err("sent %d in place of %d bytes",
376 (int)sent, (int)sldns_buffer_remaining(packet));
382 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && (defined(HAVE_RECVMSG) || defined(HAVE_SENDMSG))
383 /** print debug ancillary info */
384 static void p_ancil(const char* str, struct comm_reply* r)
386 if(r->srctype != 4 && r->srctype != 6) {
387 log_info("%s: unknown srctype %d", str, r->srctype);
390 if(r->srctype == 6) {
392 if(inet_ntop(AF_INET6, &r->pktinfo.v6info.ipi6_addr,
393 buf, (socklen_t)sizeof(buf)) == 0) {
394 (void)strlcpy(buf, "(inet_ntop error)", sizeof(buf));
396 buf[sizeof(buf)-1]=0;
397 log_info("%s: %s %d", str, buf, r->pktinfo.v6info.ipi6_ifindex);
398 } else if(r->srctype == 4) {
400 char buf1[1024], buf2[1024];
401 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_addr,
402 buf1, (socklen_t)sizeof(buf1)) == 0) {
403 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
405 buf1[sizeof(buf1)-1]=0;
406 #ifdef HAVE_STRUCT_IN_PKTINFO_IPI_SPEC_DST
407 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_spec_dst,
408 buf2, (socklen_t)sizeof(buf2)) == 0) {
409 (void)strlcpy(buf2, "(inet_ntop error)", sizeof(buf2));
411 buf2[sizeof(buf2)-1]=0;
415 log_info("%s: %d %s %s", str, r->pktinfo.v4info.ipi_ifindex,
417 #elif defined(IP_RECVDSTADDR)
419 if(inet_ntop(AF_INET, &r->pktinfo.v4addr,
420 buf1, (socklen_t)sizeof(buf1)) == 0) {
421 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
423 buf1[sizeof(buf1)-1]=0;
424 log_info("%s: %s", str, buf1);
425 #endif /* IP_PKTINFO or PI_RECVDSTDADDR */
428 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG||HAVE_SENDMSG */
430 /** send a UDP reply over specified interface*/
432 comm_point_send_udp_msg_if(struct comm_point *c, sldns_buffer* packet,
433 struct sockaddr* addr, socklen_t addrlen, struct comm_reply* r)
435 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_SENDMSG)
441 struct cmsghdr *cmsg;
442 #endif /* S_SPLINT_S */
444 log_assert(c->fd != -1);
446 if(sldns_buffer_remaining(packet) == 0)
447 log_err("error: send empty UDP packet");
449 log_assert(addr && addrlen > 0);
452 msg.msg_namelen = addrlen;
453 iov[0].iov_base = sldns_buffer_begin(packet);
454 iov[0].iov_len = sldns_buffer_remaining(packet);
457 msg.msg_control = control;
459 msg.msg_controllen = sizeof(control);
460 #endif /* S_SPLINT_S */
464 cmsg = CMSG_FIRSTHDR(&msg);
465 if(r->srctype == 4) {
468 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
469 log_assert(msg.msg_controllen <= sizeof(control));
470 cmsg->cmsg_level = IPPROTO_IP;
471 cmsg->cmsg_type = IP_PKTINFO;
472 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4info,
473 sizeof(struct in_pktinfo));
474 /* unset the ifindex to not bypass the routing tables */
475 cmsg_data = CMSG_DATA(cmsg);
476 ((struct in_pktinfo *) cmsg_data)->ipi_ifindex = 0;
477 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
478 #elif defined(IP_SENDSRCADDR)
479 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_addr));
480 log_assert(msg.msg_controllen <= sizeof(control));
481 cmsg->cmsg_level = IPPROTO_IP;
482 cmsg->cmsg_type = IP_SENDSRCADDR;
483 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4addr,
484 sizeof(struct in_addr));
485 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
487 verbose(VERB_ALGO, "no IP_PKTINFO or IP_SENDSRCADDR");
488 msg.msg_control = NULL;
489 #endif /* IP_PKTINFO or IP_SENDSRCADDR */
490 } else if(r->srctype == 6) {
492 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
493 log_assert(msg.msg_controllen <= sizeof(control));
494 cmsg->cmsg_level = IPPROTO_IPV6;
495 cmsg->cmsg_type = IPV6_PKTINFO;
496 memmove(CMSG_DATA(cmsg), &r->pktinfo.v6info,
497 sizeof(struct in6_pktinfo));
498 /* unset the ifindex to not bypass the routing tables */
499 cmsg_data = CMSG_DATA(cmsg);
500 ((struct in6_pktinfo *) cmsg_data)->ipi6_ifindex = 0;
501 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
503 /* try to pass all 0 to use default route */
504 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
505 log_assert(msg.msg_controllen <= sizeof(control));
506 cmsg->cmsg_level = IPPROTO_IPV6;
507 cmsg->cmsg_type = IPV6_PKTINFO;
508 memset(CMSG_DATA(cmsg), 0, sizeof(struct in6_pktinfo));
509 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
511 #endif /* S_SPLINT_S */
512 if(verbosity >= VERB_ALGO)
513 p_ancil("send_udp over interface", r);
514 sent = sendmsg(c->fd, &msg, 0);
516 /* try again and block, waiting for IO to complete,
517 * we want to send the answer, and we will wait for
518 * the ethernet interface buffer to have space. */
520 if(errno == EAGAIN ||
522 errno == EWOULDBLOCK ||
526 if(WSAGetLastError() == WSAEINPROGRESS ||
527 WSAGetLastError() == WSAENOBUFS ||
528 WSAGetLastError() == WSAEWOULDBLOCK) {
532 sent = sendmsg(c->fd, &msg, 0);
534 fd_set_nonblock(c->fd);
539 if(!udp_send_errno_needs_log(addr, addrlen))
541 verbose(VERB_OPS, "sendmsg failed: %s", strerror(errno));
542 log_addr(VERB_OPS, "remote address is",
543 (struct sockaddr_storage*)addr, addrlen);
545 /* netbsd 7 has IP_PKTINFO for recv but not send */
546 if(errno == EINVAL && r->srctype == 4)
547 log_err("sendmsg: No support for sendmsg(IP_PKTINFO). "
548 "Please disable interface-automatic");
551 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
552 log_err("sent %d in place of %d bytes",
553 (int)sent, (int)sldns_buffer_remaining(packet));
563 log_err("sendmsg: IPV6_PKTINFO not supported");
565 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_SENDMSG */
569 comm_point_udp_ancil_callback(int fd, short event, void* arg)
571 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_RECVMSG)
572 struct comm_reply rep;
579 struct cmsghdr* cmsg;
580 #endif /* S_SPLINT_S */
582 rep.c = (struct comm_point*)arg;
583 log_assert(rep.c->type == comm_udp);
585 if(!(event&UB_EV_READ))
587 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
588 ub_comm_base_now(rep.c->ev->base);
589 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
590 sldns_buffer_clear(rep.c->buffer);
591 rep.addrlen = (socklen_t)sizeof(rep.addr);
592 log_assert(fd != -1);
593 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
594 msg.msg_name = &rep.addr;
595 msg.msg_namelen = (socklen_t)sizeof(rep.addr);
596 iov[0].iov_base = sldns_buffer_begin(rep.c->buffer);
597 iov[0].iov_len = sldns_buffer_remaining(rep.c->buffer);
600 msg.msg_control = ancil;
602 msg.msg_controllen = sizeof(ancil);
603 #endif /* S_SPLINT_S */
605 rcv = recvmsg(fd, &msg, 0);
607 if(errno != EAGAIN && errno != EINTR) {
608 log_err("recvmsg failed: %s", strerror(errno));
612 rep.addrlen = msg.msg_namelen;
613 sldns_buffer_skip(rep.c->buffer, rcv);
614 sldns_buffer_flip(rep.c->buffer);
617 for(cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
618 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
619 if( cmsg->cmsg_level == IPPROTO_IPV6 &&
620 cmsg->cmsg_type == IPV6_PKTINFO) {
622 memmove(&rep.pktinfo.v6info, CMSG_DATA(cmsg),
623 sizeof(struct in6_pktinfo));
626 } else if( cmsg->cmsg_level == IPPROTO_IP &&
627 cmsg->cmsg_type == IP_PKTINFO) {
629 memmove(&rep.pktinfo.v4info, CMSG_DATA(cmsg),
630 sizeof(struct in_pktinfo));
632 #elif defined(IP_RECVDSTADDR)
633 } else if( cmsg->cmsg_level == IPPROTO_IP &&
634 cmsg->cmsg_type == IP_RECVDSTADDR) {
636 memmove(&rep.pktinfo.v4addr, CMSG_DATA(cmsg),
637 sizeof(struct in_addr));
639 #endif /* IP_PKTINFO or IP_RECVDSTADDR */
642 if(verbosity >= VERB_ALGO)
643 p_ancil("receive_udp on interface", &rep);
644 #endif /* S_SPLINT_S */
645 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
646 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
647 /* send back immediate reply */
648 (void)comm_point_send_udp_msg_if(rep.c, rep.c->buffer,
649 (struct sockaddr*)&rep.addr, rep.addrlen, &rep);
651 if(rep.c->fd == -1) /* commpoint closed */
658 fatal_exit("recvmsg: No support for IPV6_PKTINFO; IP_PKTINFO or IP_RECVDSTADDR. "
659 "Please disable interface-automatic");
660 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG */
664 comm_point_udp_callback(int fd, short event, void* arg)
666 struct comm_reply rep;
669 struct sldns_buffer *buffer;
671 rep.c = (struct comm_point*)arg;
672 log_assert(rep.c->type == comm_udp);
674 if(!(event&UB_EV_READ))
676 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
677 ub_comm_base_now(rep.c->ev->base);
678 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
679 sldns_buffer_clear(rep.c->buffer);
680 rep.addrlen = (socklen_t)sizeof(rep.addr);
681 log_assert(fd != -1);
682 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
683 rcv = recvfrom(fd, (void*)sldns_buffer_begin(rep.c->buffer),
684 sldns_buffer_remaining(rep.c->buffer), 0,
685 (struct sockaddr*)&rep.addr, &rep.addrlen);
688 if(errno != EAGAIN && errno != EINTR)
689 log_err("recvfrom %d failed: %s",
690 fd, strerror(errno));
692 if(WSAGetLastError() != WSAEINPROGRESS &&
693 WSAGetLastError() != WSAECONNRESET &&
694 WSAGetLastError()!= WSAEWOULDBLOCK)
695 log_err("recvfrom failed: %s",
696 wsa_strerror(WSAGetLastError()));
700 sldns_buffer_skip(rep.c->buffer, rcv);
701 sldns_buffer_flip(rep.c->buffer);
703 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
704 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
705 /* send back immediate reply */
707 buffer = rep.c->dnscrypt_buffer;
709 buffer = rep.c->buffer;
711 (void)comm_point_send_udp_msg(rep.c, buffer,
712 (struct sockaddr*)&rep.addr, rep.addrlen);
714 if(rep.c->fd != fd) /* commpoint closed to -1 or reused for
715 another UDP port. Note rep.c cannot be reused with TCP fd. */
720 /** Use a new tcp handler for new query fd, set to read query */
722 setup_tcp_handler(struct comm_point* c, int fd, int cur, int max)
724 log_assert(c->type == comm_tcp);
725 log_assert(c->fd == -1);
726 sldns_buffer_clear(c->buffer);
729 sldns_buffer_clear(c->dnscrypt_buffer);
731 c->tcp_is_reading = 1;
732 c->tcp_byte_count = 0;
733 c->tcp_timeout_msec = TCP_QUERY_TIMEOUT;
734 /* if more than half the tcp handlers are in use, use a shorter
735 * timeout for this TCP connection, we need to make space for
736 * other connections to be able to get attention */
738 c->tcp_timeout_msec = TCP_QUERY_TIMEOUT_FAST;
739 comm_point_start_listening(c, fd, c->tcp_timeout_msec);
742 void comm_base_handle_slow_accept(int ATTR_UNUSED(fd),
743 short ATTR_UNUSED(event), void* arg)
745 struct comm_base* b = (struct comm_base*)arg;
746 /* timeout for the slow accept, re-enable accepts again */
747 if(b->start_accept) {
748 verbose(VERB_ALGO, "wait is over, slow accept disabled");
749 fptr_ok(fptr_whitelist_start_accept(b->start_accept));
750 (*b->start_accept)(b->cb_arg);
751 b->eb->slow_accept_enabled = 0;
755 int comm_point_perform_accept(struct comm_point* c,
756 struct sockaddr_storage* addr, socklen_t* addrlen)
759 *addrlen = (socklen_t)sizeof(*addr);
760 new_fd = accept(c->fd, (struct sockaddr*)addr, addrlen);
763 /* EINTR is signal interrupt. others are closed connection. */
764 if( errno == EINTR || errno == EAGAIN
766 || errno == EWOULDBLOCK
769 || errno == ECONNABORTED
776 #if defined(ENFILE) && defined(EMFILE)
777 if(errno == ENFILE || errno == EMFILE) {
778 /* out of file descriptors, likely outside of our
779 * control. stop accept() calls for some time */
780 if(c->ev->base->stop_accept) {
781 struct comm_base* b = c->ev->base;
783 verbose(VERB_ALGO, "out of file descriptors: "
785 b->eb->slow_accept_enabled = 1;
786 fptr_ok(fptr_whitelist_stop_accept(
788 (*b->stop_accept)(b->cb_arg);
789 /* set timeout, no mallocs */
790 tv.tv_sec = NETEVENT_SLOW_ACCEPT_TIME/1000;
791 tv.tv_usec = (NETEVENT_SLOW_ACCEPT_TIME%1000)*1000;
792 b->eb->slow_accept = ub_event_new(b->eb->base,
794 comm_base_handle_slow_accept, b);
795 if(b->eb->slow_accept == NULL) {
796 /* we do not want to log here, because
797 * that would spam the logfiles.
798 * error: "event_base_set failed." */
800 else if(ub_event_add(b->eb->slow_accept, &tv)
802 /* we do not want to log here,
803 * error: "event_add failed." */
809 log_err_addr("accept failed", strerror(errno), addr, *addrlen);
810 #else /* USE_WINSOCK */
811 if(WSAGetLastError() == WSAEINPROGRESS ||
812 WSAGetLastError() == WSAECONNRESET)
814 if(WSAGetLastError() == WSAEWOULDBLOCK) {
815 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
818 log_err_addr("accept failed", wsa_strerror(WSAGetLastError()),
823 fd_set_nonblock(new_fd);
828 static long win_bio_cb(BIO *b, int oper, const char* ATTR_UNUSED(argp),
829 int ATTR_UNUSED(argi), long argl, long retvalue)
831 verbose(VERB_ALGO, "bio_cb %d, %s %s %s", oper,
832 (oper&BIO_CB_RETURN)?"return":"before",
833 (oper&BIO_CB_READ)?"read":((oper&BIO_CB_WRITE)?"write":"other"),
834 WSAGetLastError()==WSAEWOULDBLOCK?"wsawb":"");
835 /* on windows, check if previous operation caused EWOULDBLOCK */
836 if( (oper == (BIO_CB_READ|BIO_CB_RETURN) && argl == 0) ||
837 (oper == (BIO_CB_GETS|BIO_CB_RETURN) && argl == 0)) {
838 if(WSAGetLastError() == WSAEWOULDBLOCK)
839 ub_winsock_tcp_wouldblock((struct ub_event*)
840 BIO_get_callback_arg(b), UB_EV_READ);
842 if( (oper == (BIO_CB_WRITE|BIO_CB_RETURN) && argl == 0) ||
843 (oper == (BIO_CB_PUTS|BIO_CB_RETURN) && argl == 0)) {
844 if(WSAGetLastError() == WSAEWOULDBLOCK)
845 ub_winsock_tcp_wouldblock((struct ub_event*)
846 BIO_get_callback_arg(b), UB_EV_WRITE);
848 /* return original return value */
852 /** set win bio callbacks for nonblocking operations */
854 comm_point_tcp_win_bio_cb(struct comm_point* c, void* thessl)
856 SSL* ssl = (SSL*)thessl;
857 /* set them both just in case, but usually they are the same BIO */
858 BIO_set_callback(SSL_get_rbio(ssl), &win_bio_cb);
859 BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)c->ev->ev);
860 BIO_set_callback(SSL_get_wbio(ssl), &win_bio_cb);
861 BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)c->ev->ev);
866 comm_point_tcp_accept_callback(int fd, short event, void* arg)
868 struct comm_point* c = (struct comm_point*)arg, *c_hdl;
870 log_assert(c->type == comm_tcp_accept);
871 if(!(event & UB_EV_READ)) {
872 log_info("ignoring tcp accept event %d", (int)event);
875 ub_comm_base_now(c->ev->base);
876 /* find free tcp handler. */
878 log_warn("accepted too many tcp, connections full");
881 /* accept incoming connection. */
883 log_assert(fd != -1);
885 new_fd = comm_point_perform_accept(c, &c_hdl->repinfo.addr,
886 &c_hdl->repinfo.addrlen);
890 c_hdl->ssl = incoming_ssl_fd(c->ssl, new_fd);
893 comm_point_close(c_hdl);
896 c_hdl->ssl_shake_state = comm_ssl_shake_read;
898 comm_point_tcp_win_bio_cb(c_hdl, c_hdl->ssl);
902 /* grab the tcp handler buffers */
904 c->tcp_free = c_hdl->tcp_free;
906 /* stop accepting incoming queries for now. */
907 comm_point_stop_listening(c);
909 setup_tcp_handler(c_hdl, new_fd, c->cur_tcp_count, c->max_tcp_count);
912 /** Make tcp handler free for next assignment */
914 reclaim_tcp_handler(struct comm_point* c)
916 log_assert(c->type == comm_tcp);
919 SSL_shutdown(c->ssl);
926 c->tcp_parent->cur_tcp_count--;
927 c->tcp_free = c->tcp_parent->tcp_free;
928 c->tcp_parent->tcp_free = c;
930 /* re-enable listening on accept socket */
931 comm_point_start_listening(c->tcp_parent, -1, -1);
936 /** do the callback when writing is done */
938 tcp_callback_writer(struct comm_point* c)
940 log_assert(c->type == comm_tcp);
941 sldns_buffer_clear(c->buffer);
942 if(c->tcp_do_toggle_rw)
943 c->tcp_is_reading = 1;
944 c->tcp_byte_count = 0;
945 /* switch from listening(write) to listening(read) */
946 comm_point_stop_listening(c);
947 comm_point_start_listening(c, -1, -1);
950 /** do the callback when reading is done */
952 tcp_callback_reader(struct comm_point* c)
954 log_assert(c->type == comm_tcp || c->type == comm_local);
955 sldns_buffer_flip(c->buffer);
956 if(c->tcp_do_toggle_rw)
957 c->tcp_is_reading = 0;
958 c->tcp_byte_count = 0;
959 if(c->type == comm_tcp)
960 comm_point_stop_listening(c);
961 fptr_ok(fptr_whitelist_comm_point(c->callback));
962 if( (*c->callback)(c, c->cb_arg, NETEVENT_NOERROR, &c->repinfo) ) {
963 comm_point_start_listening(c, -1, c->tcp_timeout_msec);
967 /** continue ssl handshake */
970 ssl_handshake(struct comm_point* c)
973 if(c->ssl_shake_state == comm_ssl_shake_hs_read) {
974 /* read condition satisfied back to writing */
975 comm_point_listen_for_rw(c, 1, 1);
976 c->ssl_shake_state = comm_ssl_shake_none;
979 if(c->ssl_shake_state == comm_ssl_shake_hs_write) {
980 /* write condition satisfied, back to reading */
981 comm_point_listen_for_rw(c, 1, 0);
982 c->ssl_shake_state = comm_ssl_shake_none;
987 r = SSL_do_handshake(c->ssl);
989 int want = SSL_get_error(c->ssl, r);
990 if(want == SSL_ERROR_WANT_READ) {
991 if(c->ssl_shake_state == comm_ssl_shake_read)
993 c->ssl_shake_state = comm_ssl_shake_read;
994 comm_point_listen_for_rw(c, 1, 0);
996 } else if(want == SSL_ERROR_WANT_WRITE) {
997 if(c->ssl_shake_state == comm_ssl_shake_write)
999 c->ssl_shake_state = comm_ssl_shake_write;
1000 comm_point_listen_for_rw(c, 0, 1);
1003 return 0; /* closed */
1004 } else if(want == SSL_ERROR_SYSCALL) {
1005 /* SYSCALL and errno==0 means closed uncleanly */
1007 log_err("SSL_handshake syscall: %s",
1011 log_crypto_err("ssl handshake failed");
1012 log_addr(1, "ssl handshake failed", &c->repinfo.addr,
1013 c->repinfo.addrlen);
1017 /* this is where peer verification could take place */
1018 log_addr(VERB_ALGO, "SSL DNS connection", &c->repinfo.addr,
1019 c->repinfo.addrlen);
1021 /* setup listen rw correctly */
1022 if(c->tcp_is_reading) {
1023 if(c->ssl_shake_state != comm_ssl_shake_read)
1024 comm_point_listen_for_rw(c, 1, 0);
1026 comm_point_listen_for_rw(c, 1, 1);
1028 c->ssl_shake_state = comm_ssl_shake_none;
1031 #endif /* HAVE_SSL */
1033 /** ssl read callback on TCP */
1035 ssl_handle_read(struct comm_point* c)
1039 if(c->ssl_shake_state != comm_ssl_shake_none) {
1040 if(!ssl_handshake(c))
1042 if(c->ssl_shake_state != comm_ssl_shake_none)
1045 if(c->tcp_byte_count < sizeof(uint16_t)) {
1046 /* read length bytes */
1048 if((r=SSL_read(c->ssl, (void*)sldns_buffer_at(c->buffer,
1049 c->tcp_byte_count), (int)(sizeof(uint16_t) -
1050 c->tcp_byte_count))) <= 0) {
1051 int want = SSL_get_error(c->ssl, r);
1052 if(want == SSL_ERROR_ZERO_RETURN) {
1053 return 0; /* shutdown, closed */
1054 } else if(want == SSL_ERROR_WANT_READ) {
1055 return 1; /* read more later */
1056 } else if(want == SSL_ERROR_WANT_WRITE) {
1057 c->ssl_shake_state = comm_ssl_shake_hs_write;
1058 comm_point_listen_for_rw(c, 0, 1);
1060 } else if(want == SSL_ERROR_SYSCALL) {
1062 log_err("SSL_read syscall: %s",
1066 log_crypto_err("could not SSL_read");
1069 c->tcp_byte_count += r;
1070 if(c->tcp_byte_count != sizeof(uint16_t))
1072 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1073 sldns_buffer_capacity(c->buffer)) {
1074 verbose(VERB_QUERY, "ssl: dropped larger than buffer");
1077 sldns_buffer_set_limit(c->buffer,
1078 sldns_buffer_read_u16_at(c->buffer, 0));
1079 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1080 verbose(VERB_QUERY, "ssl: dropped bogus too short.");
1083 verbose(VERB_ALGO, "Reading ssl tcp query of length %d",
1084 (int)sldns_buffer_limit(c->buffer));
1086 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1088 r = SSL_read(c->ssl, (void*)sldns_buffer_current(c->buffer),
1089 (int)sldns_buffer_remaining(c->buffer));
1091 int want = SSL_get_error(c->ssl, r);
1092 if(want == SSL_ERROR_ZERO_RETURN) {
1093 return 0; /* shutdown, closed */
1094 } else if(want == SSL_ERROR_WANT_READ) {
1095 return 1; /* read more later */
1096 } else if(want == SSL_ERROR_WANT_WRITE) {
1097 c->ssl_shake_state = comm_ssl_shake_hs_write;
1098 comm_point_listen_for_rw(c, 0, 1);
1100 } else if(want == SSL_ERROR_SYSCALL) {
1102 log_err("SSL_read syscall: %s",
1106 log_crypto_err("could not SSL_read");
1109 sldns_buffer_skip(c->buffer, (ssize_t)r);
1110 if(sldns_buffer_remaining(c->buffer) <= 0) {
1111 tcp_callback_reader(c);
1117 #endif /* HAVE_SSL */
1120 /** ssl write callback on TCP */
1122 ssl_handle_write(struct comm_point* c)
1126 if(c->ssl_shake_state != comm_ssl_shake_none) {
1127 if(!ssl_handshake(c))
1129 if(c->ssl_shake_state != comm_ssl_shake_none)
1132 /* ignore return, if fails we may simply block */
1133 (void)SSL_set_mode(c->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
1134 if(c->tcp_byte_count < sizeof(uint16_t)) {
1135 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1137 r = SSL_write(c->ssl,
1138 (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1139 (int)(sizeof(uint16_t)-c->tcp_byte_count));
1141 int want = SSL_get_error(c->ssl, r);
1142 if(want == SSL_ERROR_ZERO_RETURN) {
1143 return 0; /* closed */
1144 } else if(want == SSL_ERROR_WANT_READ) {
1145 c->ssl_shake_state = comm_ssl_shake_read;
1146 comm_point_listen_for_rw(c, 1, 0);
1147 return 1; /* wait for read condition */
1148 } else if(want == SSL_ERROR_WANT_WRITE) {
1149 return 1; /* write more later */
1150 } else if(want == SSL_ERROR_SYSCALL) {
1152 log_err("SSL_write syscall: %s",
1156 log_crypto_err("could not SSL_write");
1159 c->tcp_byte_count += r;
1160 if(c->tcp_byte_count < sizeof(uint16_t))
1162 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1164 if(sldns_buffer_remaining(c->buffer) == 0) {
1165 tcp_callback_writer(c);
1169 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1171 r = SSL_write(c->ssl, (void*)sldns_buffer_current(c->buffer),
1172 (int)sldns_buffer_remaining(c->buffer));
1174 int want = SSL_get_error(c->ssl, r);
1175 if(want == SSL_ERROR_ZERO_RETURN) {
1176 return 0; /* closed */
1177 } else if(want == SSL_ERROR_WANT_READ) {
1178 c->ssl_shake_state = comm_ssl_shake_read;
1179 comm_point_listen_for_rw(c, 1, 0);
1180 return 1; /* wait for read condition */
1181 } else if(want == SSL_ERROR_WANT_WRITE) {
1182 return 1; /* write more later */
1183 } else if(want == SSL_ERROR_SYSCALL) {
1185 log_err("SSL_write syscall: %s",
1189 log_crypto_err("could not SSL_write");
1192 sldns_buffer_skip(c->buffer, (ssize_t)r);
1194 if(sldns_buffer_remaining(c->buffer) == 0) {
1195 tcp_callback_writer(c);
1201 #endif /* HAVE_SSL */
1204 /** handle ssl tcp connection with dns contents */
1206 ssl_handle_it(struct comm_point* c)
1208 if(c->tcp_is_reading)
1209 return ssl_handle_read(c);
1210 return ssl_handle_write(c);
1213 /** Handle tcp reading callback.
1214 * @param fd: file descriptor of socket.
1215 * @param c: comm point to read from into buffer.
1216 * @param short_ok: if true, very short packets are OK (for comm_local).
1217 * @return: 0 on error
1220 comm_point_tcp_handle_read(int fd, struct comm_point* c, int short_ok)
1223 log_assert(c->type == comm_tcp || c->type == comm_local);
1225 return ssl_handle_it(c);
1226 if(!c->tcp_is_reading)
1229 log_assert(fd != -1);
1230 if(c->tcp_byte_count < sizeof(uint16_t)) {
1231 /* read length bytes */
1232 r = recv(fd,(void*)sldns_buffer_at(c->buffer,c->tcp_byte_count),
1233 sizeof(uint16_t)-c->tcp_byte_count, 0);
1238 if(errno == EINTR || errno == EAGAIN)
1241 if(errno == ECONNRESET && verbosity < 2)
1242 return 0; /* silence reset by peer */
1244 log_err_addr("read (in tcp s)", strerror(errno),
1245 &c->repinfo.addr, c->repinfo.addrlen);
1246 #else /* USE_WINSOCK */
1247 if(WSAGetLastError() == WSAECONNRESET)
1249 if(WSAGetLastError() == WSAEINPROGRESS)
1251 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1252 ub_winsock_tcp_wouldblock(c->ev->ev,
1256 log_err_addr("read (in tcp s)",
1257 wsa_strerror(WSAGetLastError()),
1258 &c->repinfo.addr, c->repinfo.addrlen);
1262 c->tcp_byte_count += r;
1263 if(c->tcp_byte_count != sizeof(uint16_t))
1265 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1266 sldns_buffer_capacity(c->buffer)) {
1267 verbose(VERB_QUERY, "tcp: dropped larger than buffer");
1270 sldns_buffer_set_limit(c->buffer,
1271 sldns_buffer_read_u16_at(c->buffer, 0));
1273 sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1274 verbose(VERB_QUERY, "tcp: dropped bogus too short.");
1277 verbose(VERB_ALGO, "Reading tcp query of length %d",
1278 (int)sldns_buffer_limit(c->buffer));
1281 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1282 r = recv(fd, (void*)sldns_buffer_current(c->buffer),
1283 sldns_buffer_remaining(c->buffer), 0);
1286 } else if(r == -1) {
1288 if(errno == EINTR || errno == EAGAIN)
1290 log_err_addr("read (in tcp r)", strerror(errno),
1291 &c->repinfo.addr, c->repinfo.addrlen);
1292 #else /* USE_WINSOCK */
1293 if(WSAGetLastError() == WSAECONNRESET)
1295 if(WSAGetLastError() == WSAEINPROGRESS)
1297 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1298 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
1301 log_err_addr("read (in tcp r)",
1302 wsa_strerror(WSAGetLastError()),
1303 &c->repinfo.addr, c->repinfo.addrlen);
1307 sldns_buffer_skip(c->buffer, r);
1308 if(sldns_buffer_remaining(c->buffer) <= 0) {
1309 tcp_callback_reader(c);
1315 * Handle tcp writing callback.
1316 * @param fd: file descriptor of socket.
1317 * @param c: comm point to write buffer out of.
1318 * @return: 0 on error
1321 comm_point_tcp_handle_write(int fd, struct comm_point* c)
1324 struct sldns_buffer *buffer;
1325 log_assert(c->type == comm_tcp);
1327 buffer = c->dnscrypt_buffer;
1331 if(c->tcp_is_reading && !c->ssl)
1333 log_assert(fd != -1);
1334 if(c->tcp_byte_count == 0 && c->tcp_check_nb_connect) {
1335 /* check for pending error from nonblocking connect */
1336 /* from Stevens, unix network programming, vol1, 3rd ed, p450*/
1338 socklen_t len = (socklen_t)sizeof(error);
1339 if(getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&error,
1342 error = errno; /* on solaris errno is error */
1343 #else /* USE_WINSOCK */
1344 error = WSAGetLastError();
1348 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1349 if(error == EINPROGRESS || error == EWOULDBLOCK)
1350 return 1; /* try again later */
1353 if(error != 0 && verbosity < 2)
1354 return 0; /* silence lots of chatter in the logs */
1355 else if(error != 0) {
1356 log_err_addr("tcp connect", strerror(error),
1357 &c->repinfo.addr, c->repinfo.addrlen);
1358 #else /* USE_WINSOCK */
1360 if(error == WSAEINPROGRESS)
1362 else if(error == WSAEWOULDBLOCK) {
1363 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
1365 } else if(error != 0 && verbosity < 2)
1367 else if(error != 0) {
1368 log_err_addr("tcp connect", wsa_strerror(error),
1369 &c->repinfo.addr, c->repinfo.addrlen);
1370 #endif /* USE_WINSOCK */
1375 return ssl_handle_it(c);
1377 #ifdef USE_MSG_FASTOPEN
1378 /* Only try this on first use of a connection that uses tfo,
1379 otherwise fall through to normal write */
1380 /* Also, TFO support on WINDOWS not implemented at the moment */
1381 if(c->tcp_do_fastopen == 1) {
1382 /* this form of sendmsg() does both a connect() and send() so need to
1383 look for various flavours of error*/
1384 uint16_t len = htons(sldns_buffer_limit(buffer));
1386 struct iovec iov[2];
1387 c->tcp_do_fastopen = 0;
1388 memset(&msg, 0, sizeof(msg));
1389 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1390 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1391 iov[1].iov_base = sldns_buffer_begin(buffer);
1392 iov[1].iov_len = sldns_buffer_limit(buffer);
1393 log_assert(iov[0].iov_len > 0);
1394 log_assert(iov[1].iov_len > 0);
1395 msg.msg_name = &c->repinfo.addr;
1396 msg.msg_namelen = c->repinfo.addrlen;
1399 r = sendmsg(fd, &msg, MSG_FASTOPEN);
1401 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1402 /* Handshake is underway, maybe because no TFO cookie available.
1403 Come back to write the messsage*/
1404 if(errno == EINPROGRESS || errno == EWOULDBLOCK)
1407 if(errno == EINTR || errno == EAGAIN)
1409 /* Not handling EISCONN here as shouldn't ever hit that case.*/
1410 if(errno != 0 && verbosity < 2)
1411 return 0; /* silence lots of chatter in the logs */
1413 log_err_addr("tcp sendmsg", strerror(errno),
1414 &c->repinfo.addr, c->repinfo.addrlen);
1417 c->tcp_byte_count += r;
1418 if(c->tcp_byte_count < sizeof(uint16_t))
1420 sldns_buffer_set_position(buffer, c->tcp_byte_count -
1422 if(sldns_buffer_remaining(buffer) == 0) {
1423 tcp_callback_writer(c);
1428 #endif /* USE_MSG_FASTOPEN */
1430 if(c->tcp_byte_count < sizeof(uint16_t)) {
1431 uint16_t len = htons(sldns_buffer_limit(buffer));
1433 struct iovec iov[2];
1434 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1435 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1436 iov[1].iov_base = sldns_buffer_begin(buffer);
1437 iov[1].iov_len = sldns_buffer_limit(buffer);
1438 log_assert(iov[0].iov_len > 0);
1439 log_assert(iov[1].iov_len > 0);
1440 r = writev(fd, iov, 2);
1441 #else /* HAVE_WRITEV */
1442 r = send(fd, (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1443 sizeof(uint16_t)-c->tcp_byte_count, 0);
1444 #endif /* HAVE_WRITEV */
1448 if(errno == EPIPE && verbosity < 2)
1449 return 0; /* silence 'broken pipe' */
1451 if(errno == EINTR || errno == EAGAIN)
1454 log_err_addr("tcp writev", strerror(errno),
1455 &c->repinfo.addr, c->repinfo.addrlen);
1456 # else /* HAVE_WRITEV */
1457 log_err_addr("tcp send s", strerror(errno),
1458 &c->repinfo.addr, c->repinfo.addrlen);
1459 # endif /* HAVE_WRITEV */
1461 if(WSAGetLastError() == WSAENOTCONN)
1463 if(WSAGetLastError() == WSAEINPROGRESS)
1465 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1466 ub_winsock_tcp_wouldblock(c->ev->ev,
1470 log_err_addr("tcp send s",
1471 wsa_strerror(WSAGetLastError()),
1472 &c->repinfo.addr, c->repinfo.addrlen);
1476 c->tcp_byte_count += r;
1477 if(c->tcp_byte_count < sizeof(uint16_t))
1479 sldns_buffer_set_position(buffer, c->tcp_byte_count -
1481 if(sldns_buffer_remaining(buffer) == 0) {
1482 tcp_callback_writer(c);
1486 log_assert(sldns_buffer_remaining(buffer) > 0);
1487 r = send(fd, (void*)sldns_buffer_current(buffer),
1488 sldns_buffer_remaining(buffer), 0);
1491 if(errno == EINTR || errno == EAGAIN)
1493 log_err_addr("tcp send r", strerror(errno),
1494 &c->repinfo.addr, c->repinfo.addrlen);
1496 if(WSAGetLastError() == WSAEINPROGRESS)
1498 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1499 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
1502 log_err_addr("tcp send r", wsa_strerror(WSAGetLastError()),
1503 &c->repinfo.addr, c->repinfo.addrlen);
1507 sldns_buffer_skip(buffer, r);
1509 if(sldns_buffer_remaining(buffer) == 0) {
1510 tcp_callback_writer(c);
1517 comm_point_tcp_handle_callback(int fd, short event, void* arg)
1519 struct comm_point* c = (struct comm_point*)arg;
1520 log_assert(c->type == comm_tcp);
1521 ub_comm_base_now(c->ev->base);
1524 /* Initialize if this is a dnscrypt socket */
1526 c->dnscrypt = c->tcp_parent->dnscrypt;
1528 if(c->dnscrypt && c->dnscrypt_buffer == c->buffer) {
1529 c->dnscrypt_buffer = sldns_buffer_new(sldns_buffer_capacity(c->buffer));
1530 if(!c->dnscrypt_buffer) {
1531 log_err("Could not allocate dnscrypt buffer");
1537 if(event&UB_EV_READ) {
1538 if(!comm_point_tcp_handle_read(fd, c, 0)) {
1539 reclaim_tcp_handler(c);
1540 if(!c->tcp_do_close) {
1541 fptr_ok(fptr_whitelist_comm_point(
1543 (void)(*c->callback)(c, c->cb_arg,
1544 NETEVENT_CLOSED, NULL);
1549 if(event&UB_EV_WRITE) {
1550 if(!comm_point_tcp_handle_write(fd, c)) {
1551 reclaim_tcp_handler(c);
1552 if(!c->tcp_do_close) {
1553 fptr_ok(fptr_whitelist_comm_point(
1555 (void)(*c->callback)(c, c->cb_arg,
1556 NETEVENT_CLOSED, NULL);
1561 if(event&UB_EV_TIMEOUT) {
1562 verbose(VERB_QUERY, "tcp took too long, dropped");
1563 reclaim_tcp_handler(c);
1564 if(!c->tcp_do_close) {
1565 fptr_ok(fptr_whitelist_comm_point(c->callback));
1566 (void)(*c->callback)(c, c->cb_arg,
1567 NETEVENT_TIMEOUT, NULL);
1571 log_err("Ignored event %d for tcphdl.", event);
1574 void comm_point_local_handle_callback(int fd, short event, void* arg)
1576 struct comm_point* c = (struct comm_point*)arg;
1577 log_assert(c->type == comm_local);
1578 ub_comm_base_now(c->ev->base);
1580 if(event&UB_EV_READ) {
1581 if(!comm_point_tcp_handle_read(fd, c, 1)) {
1582 fptr_ok(fptr_whitelist_comm_point(c->callback));
1583 (void)(*c->callback)(c, c->cb_arg, NETEVENT_CLOSED,
1588 log_err("Ignored event %d for localhdl.", event);
1591 void comm_point_raw_handle_callback(int ATTR_UNUSED(fd),
1592 short event, void* arg)
1594 struct comm_point* c = (struct comm_point*)arg;
1595 int err = NETEVENT_NOERROR;
1596 log_assert(c->type == comm_raw);
1597 ub_comm_base_now(c->ev->base);
1599 if(event&UB_EV_TIMEOUT)
1600 err = NETEVENT_TIMEOUT;
1601 fptr_ok(fptr_whitelist_comm_point_raw(c->callback));
1602 (void)(*c->callback)(c, c->cb_arg, err, NULL);
1606 comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
1607 comm_point_callback_type* callback, void* callback_arg)
1609 struct comm_point* c = (struct comm_point*)calloc(1,
1610 sizeof(struct comm_point));
1614 c->ev = (struct internal_event*)calloc(1,
1615 sizeof(struct internal_event));
1624 c->tcp_is_reading = 0;
1625 c->tcp_byte_count = 0;
1626 c->tcp_parent = NULL;
1627 c->max_tcp_count = 0;
1628 c->cur_tcp_count = 0;
1629 c->tcp_handlers = NULL;
1632 c->tcp_do_close = 0;
1633 c->do_not_close = 0;
1634 c->tcp_do_toggle_rw = 0;
1635 c->tcp_check_nb_connect = 0;
1636 #ifdef USE_MSG_FASTOPEN
1637 c->tcp_do_fastopen = 0;
1641 c->dnscrypt_buffer = buffer;
1644 c->callback = callback;
1645 c->cb_arg = callback_arg;
1646 evbits = UB_EV_READ | UB_EV_PERSIST;
1647 /* ub_event stuff */
1648 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1649 comm_point_udp_callback, c);
1650 if(c->ev->ev == NULL) {
1651 log_err("could not baseset udp event");
1652 comm_point_delete(c);
1655 if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
1656 log_err("could not add udp event");
1657 comm_point_delete(c);
1664 comm_point_create_udp_ancil(struct comm_base *base, int fd,
1665 sldns_buffer* buffer,
1666 comm_point_callback_type* callback, void* callback_arg)
1668 struct comm_point* c = (struct comm_point*)calloc(1,
1669 sizeof(struct comm_point));
1673 c->ev = (struct internal_event*)calloc(1,
1674 sizeof(struct internal_event));
1683 c->tcp_is_reading = 0;
1684 c->tcp_byte_count = 0;
1685 c->tcp_parent = NULL;
1686 c->max_tcp_count = 0;
1687 c->cur_tcp_count = 0;
1688 c->tcp_handlers = NULL;
1691 c->tcp_do_close = 0;
1692 c->do_not_close = 0;
1695 c->dnscrypt_buffer = buffer;
1698 c->tcp_do_toggle_rw = 0;
1699 c->tcp_check_nb_connect = 0;
1700 #ifdef USE_MSG_FASTOPEN
1701 c->tcp_do_fastopen = 0;
1703 c->callback = callback;
1704 c->cb_arg = callback_arg;
1705 evbits = UB_EV_READ | UB_EV_PERSIST;
1706 /* ub_event stuff */
1707 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1708 comm_point_udp_ancil_callback, c);
1709 if(c->ev->ev == NULL) {
1710 log_err("could not baseset udp event");
1711 comm_point_delete(c);
1714 if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
1715 log_err("could not add udp event");
1716 comm_point_delete(c);
1722 static struct comm_point*
1723 comm_point_create_tcp_handler(struct comm_base *base,
1724 struct comm_point* parent, size_t bufsize,
1725 comm_point_callback_type* callback, void* callback_arg)
1727 struct comm_point* c = (struct comm_point*)calloc(1,
1728 sizeof(struct comm_point));
1732 c->ev = (struct internal_event*)calloc(1,
1733 sizeof(struct internal_event));
1740 c->buffer = sldns_buffer_new(bufsize);
1746 c->timeout = (struct timeval*)malloc(sizeof(struct timeval));
1748 sldns_buffer_free(c->buffer);
1753 c->tcp_is_reading = 0;
1754 c->tcp_byte_count = 0;
1755 c->tcp_parent = parent;
1756 c->max_tcp_count = 0;
1757 c->cur_tcp_count = 0;
1758 c->tcp_handlers = NULL;
1761 c->tcp_do_close = 0;
1762 c->do_not_close = 0;
1763 c->tcp_do_toggle_rw = 1;
1764 c->tcp_check_nb_connect = 0;
1765 #ifdef USE_MSG_FASTOPEN
1766 c->tcp_do_fastopen = 0;
1770 // We don't know just yet if this is a dnscrypt channel. Allocation
1771 // will be done when handling the callback.
1772 c->dnscrypt_buffer = c->buffer;
1775 c->callback = callback;
1776 c->cb_arg = callback_arg;
1777 /* add to parent free list */
1778 c->tcp_free = parent->tcp_free;
1779 parent->tcp_free = c;
1780 /* ub_event stuff */
1781 evbits = UB_EV_PERSIST | UB_EV_READ | UB_EV_TIMEOUT;
1782 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1783 comm_point_tcp_handle_callback, c);
1784 if(c->ev->ev == NULL)
1786 log_err("could not basetset tcphdl event");
1787 parent->tcp_free = c->tcp_free;
1796 comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
1797 comm_point_callback_type* callback, void* callback_arg)
1799 struct comm_point* c = (struct comm_point*)calloc(1,
1800 sizeof(struct comm_point));
1803 /* first allocate the TCP accept listener */
1806 c->ev = (struct internal_event*)calloc(1,
1807 sizeof(struct internal_event));
1816 c->tcp_is_reading = 0;
1817 c->tcp_byte_count = 0;
1818 c->tcp_parent = NULL;
1819 c->max_tcp_count = num;
1820 c->cur_tcp_count = 0;
1821 c->tcp_handlers = (struct comm_point**)calloc((size_t)num,
1822 sizeof(struct comm_point*));
1823 if(!c->tcp_handlers) {
1829 c->type = comm_tcp_accept;
1830 c->tcp_do_close = 0;
1831 c->do_not_close = 0;
1832 c->tcp_do_toggle_rw = 0;
1833 c->tcp_check_nb_connect = 0;
1834 #ifdef USE_MSG_FASTOPEN
1835 c->tcp_do_fastopen = 0;
1839 c->dnscrypt_buffer = NULL;
1843 evbits = UB_EV_READ | UB_EV_PERSIST;
1844 /* ub_event stuff */
1845 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1846 comm_point_tcp_accept_callback, c);
1847 if(c->ev->ev == NULL) {
1848 log_err("could not baseset tcpacc event");
1849 comm_point_delete(c);
1852 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
1853 log_err("could not add tcpacc event");
1854 comm_point_delete(c);
1857 /* now prealloc the tcp handlers */
1858 for(i=0; i<num; i++) {
1859 c->tcp_handlers[i] = comm_point_create_tcp_handler(base,
1860 c, bufsize, callback, callback_arg);
1861 if(!c->tcp_handlers[i]) {
1862 comm_point_delete(c);
1871 comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
1872 comm_point_callback_type* callback, void* callback_arg)
1874 struct comm_point* c = (struct comm_point*)calloc(1,
1875 sizeof(struct comm_point));
1879 c->ev = (struct internal_event*)calloc(1,
1880 sizeof(struct internal_event));
1887 c->buffer = sldns_buffer_new(bufsize);
1894 c->tcp_is_reading = 0;
1895 c->tcp_byte_count = 0;
1896 c->tcp_parent = NULL;
1897 c->max_tcp_count = 0;
1898 c->cur_tcp_count = 0;
1899 c->tcp_handlers = NULL;
1902 c->tcp_do_close = 0;
1903 c->do_not_close = 0;
1904 c->tcp_do_toggle_rw = 1;
1905 c->tcp_check_nb_connect = 1;
1906 #ifdef USE_MSG_FASTOPEN
1907 c->tcp_do_fastopen = 1;
1911 c->dnscrypt_buffer = c->buffer;
1914 c->callback = callback;
1915 c->cb_arg = callback_arg;
1916 evbits = UB_EV_PERSIST | UB_EV_WRITE;
1917 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1918 comm_point_tcp_handle_callback, c);
1919 if(c->ev->ev == NULL)
1921 log_err("could not baseset tcpout event");
1922 sldns_buffer_free(c->buffer);
1932 comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
1933 comm_point_callback_type* callback, void* callback_arg)
1935 struct comm_point* c = (struct comm_point*)calloc(1,
1936 sizeof(struct comm_point));
1940 c->ev = (struct internal_event*)calloc(1,
1941 sizeof(struct internal_event));
1948 c->buffer = sldns_buffer_new(bufsize);
1955 c->tcp_is_reading = 1;
1956 c->tcp_byte_count = 0;
1957 c->tcp_parent = NULL;
1958 c->max_tcp_count = 0;
1959 c->cur_tcp_count = 0;
1960 c->tcp_handlers = NULL;
1962 c->type = comm_local;
1963 c->tcp_do_close = 0;
1964 c->do_not_close = 1;
1965 c->tcp_do_toggle_rw = 0;
1966 c->tcp_check_nb_connect = 0;
1967 #ifdef USE_MSG_FASTOPEN
1968 c->tcp_do_fastopen = 0;
1972 c->dnscrypt_buffer = c->buffer;
1974 c->callback = callback;
1975 c->cb_arg = callback_arg;
1976 /* ub_event stuff */
1977 evbits = UB_EV_PERSIST | UB_EV_READ;
1978 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1979 comm_point_local_handle_callback, c);
1980 if(c->ev->ev == NULL) {
1981 log_err("could not baseset localhdl event");
1986 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
1987 log_err("could not add localhdl event");
1988 ub_event_free(c->ev->ev);
1997 comm_point_create_raw(struct comm_base* base, int fd, int writing,
1998 comm_point_callback_type* callback, void* callback_arg)
2000 struct comm_point* c = (struct comm_point*)calloc(1,
2001 sizeof(struct comm_point));
2005 c->ev = (struct internal_event*)calloc(1,
2006 sizeof(struct internal_event));
2015 c->tcp_is_reading = 0;
2016 c->tcp_byte_count = 0;
2017 c->tcp_parent = NULL;
2018 c->max_tcp_count = 0;
2019 c->cur_tcp_count = 0;
2020 c->tcp_handlers = NULL;
2023 c->tcp_do_close = 0;
2024 c->do_not_close = 1;
2025 c->tcp_do_toggle_rw = 0;
2026 c->tcp_check_nb_connect = 0;
2027 #ifdef USE_MSG_FASTOPEN
2028 c->tcp_do_fastopen = 0;
2032 c->dnscrypt_buffer = c->buffer;
2034 c->callback = callback;
2035 c->cb_arg = callback_arg;
2036 /* ub_event stuff */
2038 evbits = UB_EV_PERSIST | UB_EV_WRITE;
2039 else evbits = UB_EV_PERSIST | UB_EV_READ;
2040 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
2041 comm_point_raw_handle_callback, c);
2042 if(c->ev->ev == NULL) {
2043 log_err("could not baseset rawhdl event");
2048 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
2049 log_err("could not add rawhdl event");
2050 ub_event_free(c->ev->ev);
2059 comm_point_close(struct comm_point* c)
2064 if(ub_event_del(c->ev->ev) != 0) {
2065 log_err("could not event_del on close");
2067 /* close fd after removing from event lists, or epoll.. is messed up */
2068 if(c->fd != -1 && !c->do_not_close) {
2069 verbose(VERB_ALGO, "close fd %d", c->fd);
2080 comm_point_delete(struct comm_point* c)
2084 if(c->type == comm_tcp && c->ssl) {
2086 SSL_shutdown(c->ssl);
2090 comm_point_close(c);
2091 if(c->tcp_handlers) {
2093 for(i=0; i<c->max_tcp_count; i++)
2094 comm_point_delete(c->tcp_handlers[i]);
2095 free(c->tcp_handlers);
2098 if(c->type == comm_tcp || c->type == comm_local) {
2099 sldns_buffer_free(c->buffer);
2101 if(c->dnscrypt && c->dnscrypt_buffer != c->buffer) {
2102 sldns_buffer_free(c->dnscrypt_buffer);
2106 ub_event_free(c->ev->ev);
2112 comm_point_send_reply(struct comm_reply *repinfo)
2114 struct sldns_buffer* buffer;
2115 log_assert(repinfo && repinfo->c);
2117 buffer = repinfo->c->dnscrypt_buffer;
2118 if(!dnsc_handle_uncurved_request(repinfo)) {
2122 buffer = repinfo->c->buffer;
2124 if(repinfo->c->type == comm_udp) {
2125 if(repinfo->srctype)
2126 comm_point_send_udp_msg_if(repinfo->c,
2127 buffer, (struct sockaddr*)&repinfo->addr,
2128 repinfo->addrlen, repinfo);
2130 comm_point_send_udp_msg(repinfo->c, buffer,
2131 (struct sockaddr*)&repinfo->addr, repinfo->addrlen);
2133 if(repinfo->c->dtenv != NULL &&
2134 repinfo->c->dtenv->log_client_response_messages)
2135 dt_msg_send_client_response(repinfo->c->dtenv,
2136 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2140 if(repinfo->c->tcp_parent->dtenv != NULL &&
2141 repinfo->c->tcp_parent->dtenv->log_client_response_messages)
2142 dt_msg_send_client_response(repinfo->c->tcp_parent->dtenv,
2143 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2145 comm_point_start_listening(repinfo->c, -1,
2146 repinfo->c->tcp_timeout_msec);
2151 comm_point_drop_reply(struct comm_reply* repinfo)
2155 log_assert(repinfo && repinfo->c);
2156 log_assert(repinfo->c->type != comm_tcp_accept);
2157 if(repinfo->c->type == comm_udp)
2159 reclaim_tcp_handler(repinfo->c);
2163 comm_point_stop_listening(struct comm_point* c)
2165 verbose(VERB_ALGO, "comm point stop listening %d", c->fd);
2166 if(ub_event_del(c->ev->ev) != 0) {
2167 log_err("event_del error to stoplisten");
2172 comm_point_start_listening(struct comm_point* c, int newfd, int msec)
2174 verbose(VERB_ALGO, "comm point start listening %d",
2175 c->fd==-1?newfd:c->fd);
2176 if(c->type == comm_tcp_accept && !c->tcp_free) {
2177 /* no use to start listening no free slots. */
2180 if(msec != -1 && msec != 0) {
2182 c->timeout = (struct timeval*)malloc(sizeof(
2185 log_err("cpsl: malloc failed. No net read.");
2189 ub_event_add_bits(c->ev->ev, UB_EV_TIMEOUT);
2190 #ifndef S_SPLINT_S /* splint fails on struct timeval. */
2191 c->timeout->tv_sec = msec/1000;
2192 c->timeout->tv_usec = (msec%1000)*1000;
2193 #endif /* S_SPLINT_S */
2195 if(c->type == comm_tcp) {
2196 ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
2197 if(c->tcp_is_reading)
2198 ub_event_add_bits(c->ev->ev, UB_EV_READ);
2199 else ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
2210 ub_event_set_fd(c->ev->ev, c->fd);
2212 if(ub_event_add(c->ev->ev, msec==0?NULL:c->timeout) != 0) {
2213 log_err("event_add failed. in cpsl.");
2217 void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr)
2219 verbose(VERB_ALGO, "comm point listen_for_rw %d %d", c->fd, wr);
2220 if(ub_event_del(c->ev->ev) != 0) {
2221 log_err("event_del error to cplf");
2223 ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
2224 if(rd) ub_event_add_bits(c->ev->ev, UB_EV_READ);
2225 if(wr) ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
2226 if(ub_event_add(c->ev->ev, c->timeout) != 0) {
2227 log_err("event_add failed. in cplf.");
2231 size_t comm_point_get_mem(struct comm_point* c)
2236 s = sizeof(*c) + sizeof(*c->ev);
2238 s += sizeof(*c->timeout);
2239 if(c->type == comm_tcp || c->type == comm_local) {
2240 s += sizeof(*c->buffer) + sldns_buffer_capacity(c->buffer);
2242 s += sizeof(*c->dnscrypt_buffer);
2243 if(c->buffer != c->dnscrypt_buffer) {
2244 s += sldns_buffer_capacity(c->dnscrypt_buffer);
2248 if(c->type == comm_tcp_accept) {
2250 for(i=0; i<c->max_tcp_count; i++)
2251 s += comm_point_get_mem(c->tcp_handlers[i]);
2257 comm_timer_create(struct comm_base* base, void (*cb)(void*), void* cb_arg)
2259 struct internal_timer *tm = (struct internal_timer*)calloc(1,
2260 sizeof(struct internal_timer));
2262 log_err("malloc failed");
2265 tm->super.ev_timer = tm;
2267 tm->super.callback = cb;
2268 tm->super.cb_arg = cb_arg;
2269 tm->ev = ub_event_new(base->eb->base, -1, UB_EV_TIMEOUT,
2270 comm_timer_callback, &tm->super);
2271 if(tm->ev == NULL) {
2272 log_err("timer_create: event_base_set failed.");
2280 comm_timer_disable(struct comm_timer* timer)
2284 ub_timer_del(timer->ev_timer->ev);
2285 timer->ev_timer->enabled = 0;
2289 comm_timer_set(struct comm_timer* timer, struct timeval* tv)
2292 if(timer->ev_timer->enabled)
2293 comm_timer_disable(timer);
2294 if(ub_timer_add(timer->ev_timer->ev, timer->ev_timer->base->eb->base,
2295 comm_timer_callback, timer, tv) != 0)
2296 log_err("comm_timer_set: evtimer_add failed.");
2297 timer->ev_timer->enabled = 1;
2301 comm_timer_delete(struct comm_timer* timer)
2305 comm_timer_disable(timer);
2306 /* Free the sub struct timer->ev_timer derived from the super struct timer.
2307 * i.e. assert(timer == timer->ev_timer)
2309 ub_event_free(timer->ev_timer->ev);
2310 free(timer->ev_timer);
2314 comm_timer_callback(int ATTR_UNUSED(fd), short event, void* arg)
2316 struct comm_timer* tm = (struct comm_timer*)arg;
2317 if(!(event&UB_EV_TIMEOUT))
2319 ub_comm_base_now(tm->ev_timer->base);
2320 tm->ev_timer->enabled = 0;
2321 fptr_ok(fptr_whitelist_comm_timer(tm->callback));
2322 (*tm->callback)(tm->cb_arg);
2326 comm_timer_is_set(struct comm_timer* timer)
2328 return (int)timer->ev_timer->enabled;
2332 comm_timer_get_mem(struct comm_timer* ATTR_UNUSED(timer))
2334 return sizeof(struct internal_timer);
2338 comm_signal_create(struct comm_base* base,
2339 void (*callback)(int, void*), void* cb_arg)
2341 struct comm_signal* com = (struct comm_signal*)malloc(
2342 sizeof(struct comm_signal));
2344 log_err("malloc failed");
2348 com->callback = callback;
2349 com->cb_arg = cb_arg;
2350 com->ev_signal = NULL;
2355 comm_signal_callback(int sig, short event, void* arg)
2357 struct comm_signal* comsig = (struct comm_signal*)arg;
2358 if(!(event & UB_EV_SIGNAL))
2360 ub_comm_base_now(comsig->base);
2361 fptr_ok(fptr_whitelist_comm_signal(comsig->callback));
2362 (*comsig->callback)(sig, comsig->cb_arg);
2366 comm_signal_bind(struct comm_signal* comsig, int sig)
2368 struct internal_signal* entry = (struct internal_signal*)calloc(1,
2369 sizeof(struct internal_signal));
2371 log_err("malloc failed");
2375 /* add signal event */
2376 entry->ev = ub_signal_new(comsig->base->eb->base, sig,
2377 comm_signal_callback, comsig);
2378 if(entry->ev == NULL) {
2379 log_err("Could not create signal event");
2383 if(ub_signal_add(entry->ev, NULL) != 0) {
2384 log_err("Could not add signal handler");
2385 ub_event_free(entry->ev);
2389 /* link into list */
2390 entry->next = comsig->ev_signal;
2391 comsig->ev_signal = entry;
2396 comm_signal_delete(struct comm_signal* comsig)
2398 struct internal_signal* p, *np;
2401 p=comsig->ev_signal;
2404 ub_signal_del(p->ev);
2405 ub_event_free(p->ev);