2 * util/netevent.c - event notification
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains event notification functions.
42 #include "util/netevent.h"
43 #include "util/ub_event.h"
45 #include "util/net_help.h"
46 #include "util/fptr_wlist.h"
47 #include "sldns/pkthdr.h"
48 #include "sldns/sbuffer.h"
49 #include "dnstap/dnstap.h"
50 #include "dnscrypt/dnscrypt.h"
51 #ifdef HAVE_OPENSSL_SSL_H
52 #include <openssl/ssl.h>
54 #ifdef HAVE_OPENSSL_ERR_H
55 #include <openssl/err.h>
58 /* -------- Start of local definitions -------- */
59 /** if CMSG_ALIGN is not defined on this platform, a workaround */
62 # define CMSG_ALIGN(n) __CMSG_ALIGN(n)
63 # elif defined(CMSG_DATA_ALIGN)
64 # define CMSG_ALIGN _CMSG_DATA_ALIGN
66 # define CMSG_ALIGN(len) (((len)+sizeof(long)-1) & ~(sizeof(long)-1))
70 /** if CMSG_LEN is not defined on this platform, a workaround */
72 # define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr))+(len))
75 /** if CMSG_SPACE is not defined on this platform, a workaround */
77 # ifdef _CMSG_HDR_ALIGN
78 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+_CMSG_HDR_ALIGN(sizeof(struct cmsghdr)))
80 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+CMSG_ALIGN(sizeof(struct cmsghdr)))
84 /** The TCP reading or writing query timeout in milliseconds */
85 #define TCP_QUERY_TIMEOUT 120000
86 /** The TCP timeout in msec for fast queries, above half are used */
87 #define TCP_QUERY_TIMEOUT_FAST 200
89 #ifndef NONBLOCKING_IS_BROKEN
90 /** number of UDP reads to perform per read indication from select */
91 #define NUM_UDP_PER_SELECT 100
93 #define NUM_UDP_PER_SELECT 1
97 * The internal event structure for keeping ub_event info for the event.
98 * Possibly other structures (list, tree) this is part of.
100 struct internal_event {
102 struct comm_base* base;
103 /** ub_event event type */
108 * Internal base structure, so that every thread has its own events.
110 struct internal_base {
111 /** ub_event event_base type. */
112 struct ub_event_base* base;
113 /** seconds time pointer points here */
115 /** timeval with current time */
117 /** the event used for slow_accept timeouts */
118 struct ub_event* slow_accept;
119 /** true if slow_accept is enabled */
120 int slow_accept_enabled;
124 * Internal timer structure, to store timer event in.
126 struct internal_timer {
127 /** the super struct from which derived */
128 struct comm_timer super;
130 struct comm_base* base;
131 /** ub_event event type */
133 /** is timer enabled */
138 * Internal signal structure, to store signal event in.
140 struct internal_signal {
141 /** ub_event event type */
143 /** next in signal list */
144 struct internal_signal* next;
147 /** create a tcp handler with a parent */
148 static struct comm_point* comm_point_create_tcp_handler(
149 struct comm_base *base, struct comm_point* parent, size_t bufsize,
150 comm_point_callback_type* callback, void* callback_arg);
152 /* -------- End of local definitions -------- */
155 comm_base_create(int sigs)
157 struct comm_base* b = (struct comm_base*)calloc(1,
158 sizeof(struct comm_base));
159 const char *evnm="event", *evsys="", *evmethod="";
163 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
168 b->eb->base = ub_default_event_base(sigs, &b->eb->secs, &b->eb->now);
175 ub_get_event_sys(b->eb->base, &evnm, &evsys, &evmethod);
176 verbose(VERB_ALGO, "%s %s user %s method.", evnm, evsys, evmethod);
181 comm_base_create_event(struct ub_event_base* base)
183 struct comm_base* b = (struct comm_base*)calloc(1,
184 sizeof(struct comm_base));
187 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
198 comm_base_delete(struct comm_base* b)
202 if(b->eb->slow_accept_enabled) {
203 if(ub_event_del(b->eb->slow_accept) != 0) {
204 log_err("could not event_del slow_accept");
206 ub_event_free(b->eb->slow_accept);
208 ub_event_base_free(b->eb->base);
215 comm_base_delete_no_base(struct comm_base* b)
219 if(b->eb->slow_accept_enabled) {
220 if(ub_event_del(b->eb->slow_accept) != 0) {
221 log_err("could not event_del slow_accept");
223 ub_event_free(b->eb->slow_accept);
231 comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
238 comm_base_dispatch(struct comm_base* b)
241 retval = ub_event_base_dispatch(b->eb->base);
243 fatal_exit("event_dispatch returned error %d, "
244 "errno is %s", retval, strerror(errno));
248 void comm_base_exit(struct comm_base* b)
250 if(ub_event_base_loopexit(b->eb->base) != 0) {
251 log_err("Could not loopexit");
255 void comm_base_set_slow_accept_handlers(struct comm_base* b,
256 void (*stop_acc)(void*), void (*start_acc)(void*), void* arg)
258 b->stop_accept = stop_acc;
259 b->start_accept = start_acc;
263 struct ub_event_base* comm_base_internal(struct comm_base* b)
268 /** see if errno for udp has to be logged or not uses globals */
270 udp_send_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
272 /* do not log transient errors (unless high verbosity) */
273 #if defined(ENETUNREACH) || defined(EHOSTDOWN) || defined(EHOSTUNREACH) || defined(ENETDOWN)
287 if(verbosity < VERB_ALGO)
293 /* permission denied is gotten for every send if the
294 * network is disconnected (on some OS), squelch it */
295 if( ((errno == EPERM)
296 # ifdef EADDRNOTAVAIL
297 /* 'Cannot assign requested address' also when disconnected */
298 || (errno == EADDRNOTAVAIL)
300 ) && verbosity < VERB_DETAIL)
302 /* squelch errors where people deploy AAAA ::ffff:bla for
303 * authority servers, which we try for intranets. */
304 if(errno == EINVAL && addr_is_ip4mapped(
305 (struct sockaddr_storage*)addr, addrlen) &&
306 verbosity < VERB_DETAIL)
308 /* SO_BROADCAST sockopt can give access to 255.255.255.255,
309 * but a dns cache does not need it. */
310 if(errno == EACCES && addr_is_broadcast(
311 (struct sockaddr_storage*)addr, addrlen) &&
312 verbosity < VERB_DETAIL)
317 int tcp_connect_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
319 return udp_send_errno_needs_log(addr, addrlen);
322 /* send a UDP reply */
324 comm_point_send_udp_msg(struct comm_point *c, sldns_buffer* packet,
325 struct sockaddr* addr, socklen_t addrlen)
328 log_assert(c->fd != -1);
330 if(sldns_buffer_remaining(packet) == 0)
331 log_err("error: send empty UDP packet");
333 log_assert(addr && addrlen > 0);
334 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
335 sldns_buffer_remaining(packet), 0,
338 /* try again and block, waiting for IO to complete,
339 * we want to send the answer, and we will wait for
340 * the ethernet interface buffer to have space. */
342 if(errno == EAGAIN ||
344 errno == EWOULDBLOCK ||
348 if(WSAGetLastError() == WSAEINPROGRESS ||
349 WSAGetLastError() == WSAENOBUFS ||
350 WSAGetLastError() == WSAEWOULDBLOCK) {
354 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
355 sldns_buffer_remaining(packet), 0,
358 fd_set_nonblock(c->fd);
363 if(!udp_send_errno_needs_log(addr, addrlen))
366 verbose(VERB_OPS, "sendto failed: %s", strerror(errno));
368 verbose(VERB_OPS, "sendto failed: %s",
369 wsa_strerror(WSAGetLastError()));
371 log_addr(VERB_OPS, "remote address is",
372 (struct sockaddr_storage*)addr, addrlen);
374 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
375 log_err("sent %d in place of %d bytes",
376 (int)sent, (int)sldns_buffer_remaining(packet));
382 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && (defined(HAVE_RECVMSG) || defined(HAVE_SENDMSG))
383 /** print debug ancillary info */
384 static void p_ancil(const char* str, struct comm_reply* r)
386 if(r->srctype != 4 && r->srctype != 6) {
387 log_info("%s: unknown srctype %d", str, r->srctype);
390 if(r->srctype == 6) {
392 if(inet_ntop(AF_INET6, &r->pktinfo.v6info.ipi6_addr,
393 buf, (socklen_t)sizeof(buf)) == 0) {
394 (void)strlcpy(buf, "(inet_ntop error)", sizeof(buf));
396 buf[sizeof(buf)-1]=0;
397 log_info("%s: %s %d", str, buf, r->pktinfo.v6info.ipi6_ifindex);
398 } else if(r->srctype == 4) {
400 char buf1[1024], buf2[1024];
401 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_addr,
402 buf1, (socklen_t)sizeof(buf1)) == 0) {
403 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
405 buf1[sizeof(buf1)-1]=0;
406 #ifdef HAVE_STRUCT_IN_PKTINFO_IPI_SPEC_DST
407 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_spec_dst,
408 buf2, (socklen_t)sizeof(buf2)) == 0) {
409 (void)strlcpy(buf2, "(inet_ntop error)", sizeof(buf2));
411 buf2[sizeof(buf2)-1]=0;
415 log_info("%s: %d %s %s", str, r->pktinfo.v4info.ipi_ifindex,
417 #elif defined(IP_RECVDSTADDR)
419 if(inet_ntop(AF_INET, &r->pktinfo.v4addr,
420 buf1, (socklen_t)sizeof(buf1)) == 0) {
421 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
423 buf1[sizeof(buf1)-1]=0;
424 log_info("%s: %s", str, buf1);
425 #endif /* IP_PKTINFO or PI_RECVDSTDADDR */
428 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG||HAVE_SENDMSG */
430 /** send a UDP reply over specified interface*/
432 comm_point_send_udp_msg_if(struct comm_point *c, sldns_buffer* packet,
433 struct sockaddr* addr, socklen_t addrlen, struct comm_reply* r)
435 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_SENDMSG)
441 struct cmsghdr *cmsg;
442 #endif /* S_SPLINT_S */
444 log_assert(c->fd != -1);
446 if(sldns_buffer_remaining(packet) == 0)
447 log_err("error: send empty UDP packet");
449 log_assert(addr && addrlen > 0);
452 msg.msg_namelen = addrlen;
453 iov[0].iov_base = sldns_buffer_begin(packet);
454 iov[0].iov_len = sldns_buffer_remaining(packet);
457 msg.msg_control = control;
459 msg.msg_controllen = sizeof(control);
460 #endif /* S_SPLINT_S */
464 cmsg = CMSG_FIRSTHDR(&msg);
465 if(r->srctype == 4) {
468 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
469 log_assert(msg.msg_controllen <= sizeof(control));
470 cmsg->cmsg_level = IPPROTO_IP;
471 cmsg->cmsg_type = IP_PKTINFO;
472 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4info,
473 sizeof(struct in_pktinfo));
474 /* unset the ifindex to not bypass the routing tables */
475 cmsg_data = CMSG_DATA(cmsg);
476 ((struct in_pktinfo *) cmsg_data)->ipi_ifindex = 0;
477 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
478 #elif defined(IP_SENDSRCADDR)
479 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_addr));
480 log_assert(msg.msg_controllen <= sizeof(control));
481 cmsg->cmsg_level = IPPROTO_IP;
482 cmsg->cmsg_type = IP_SENDSRCADDR;
483 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4addr,
484 sizeof(struct in_addr));
485 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
487 verbose(VERB_ALGO, "no IP_PKTINFO or IP_SENDSRCADDR");
488 msg.msg_control = NULL;
489 #endif /* IP_PKTINFO or IP_SENDSRCADDR */
490 } else if(r->srctype == 6) {
492 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
493 log_assert(msg.msg_controllen <= sizeof(control));
494 cmsg->cmsg_level = IPPROTO_IPV6;
495 cmsg->cmsg_type = IPV6_PKTINFO;
496 memmove(CMSG_DATA(cmsg), &r->pktinfo.v6info,
497 sizeof(struct in6_pktinfo));
498 /* unset the ifindex to not bypass the routing tables */
499 cmsg_data = CMSG_DATA(cmsg);
500 ((struct in6_pktinfo *) cmsg_data)->ipi6_ifindex = 0;
501 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
503 /* try to pass all 0 to use default route */
504 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
505 log_assert(msg.msg_controllen <= sizeof(control));
506 cmsg->cmsg_level = IPPROTO_IPV6;
507 cmsg->cmsg_type = IPV6_PKTINFO;
508 memset(CMSG_DATA(cmsg), 0, sizeof(struct in6_pktinfo));
509 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
511 #endif /* S_SPLINT_S */
512 if(verbosity >= VERB_ALGO)
513 p_ancil("send_udp over interface", r);
514 sent = sendmsg(c->fd, &msg, 0);
516 /* try again and block, waiting for IO to complete,
517 * we want to send the answer, and we will wait for
518 * the ethernet interface buffer to have space. */
520 if(errno == EAGAIN ||
522 errno == EWOULDBLOCK ||
526 if(WSAGetLastError() == WSAEINPROGRESS ||
527 WSAGetLastError() == WSAENOBUFS ||
528 WSAGetLastError() == WSAEWOULDBLOCK) {
532 sent = sendmsg(c->fd, &msg, 0);
534 fd_set_nonblock(c->fd);
539 if(!udp_send_errno_needs_log(addr, addrlen))
541 verbose(VERB_OPS, "sendmsg failed: %s", strerror(errno));
542 log_addr(VERB_OPS, "remote address is",
543 (struct sockaddr_storage*)addr, addrlen);
545 /* netbsd 7 has IP_PKTINFO for recv but not send */
546 if(errno == EINVAL && r->srctype == 4)
547 log_err("sendmsg: No support for sendmsg(IP_PKTINFO). "
548 "Please disable interface-automatic");
551 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
552 log_err("sent %d in place of %d bytes",
553 (int)sent, (int)sldns_buffer_remaining(packet));
563 log_err("sendmsg: IPV6_PKTINFO not supported");
565 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_SENDMSG */
569 comm_point_udp_ancil_callback(int fd, short event, void* arg)
571 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_RECVMSG)
572 struct comm_reply rep;
579 struct cmsghdr* cmsg;
580 #endif /* S_SPLINT_S */
582 rep.c = (struct comm_point*)arg;
583 log_assert(rep.c->type == comm_udp);
585 if(!(event&UB_EV_READ))
587 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
588 ub_comm_base_now(rep.c->ev->base);
589 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
590 sldns_buffer_clear(rep.c->buffer);
591 rep.addrlen = (socklen_t)sizeof(rep.addr);
592 log_assert(fd != -1);
593 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
594 msg.msg_name = &rep.addr;
595 msg.msg_namelen = (socklen_t)sizeof(rep.addr);
596 iov[0].iov_base = sldns_buffer_begin(rep.c->buffer);
597 iov[0].iov_len = sldns_buffer_remaining(rep.c->buffer);
600 msg.msg_control = ancil;
602 msg.msg_controllen = sizeof(ancil);
603 #endif /* S_SPLINT_S */
605 rcv = recvmsg(fd, &msg, 0);
607 if(errno != EAGAIN && errno != EINTR) {
608 log_err("recvmsg failed: %s", strerror(errno));
612 rep.addrlen = msg.msg_namelen;
613 sldns_buffer_skip(rep.c->buffer, rcv);
614 sldns_buffer_flip(rep.c->buffer);
617 for(cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
618 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
619 if( cmsg->cmsg_level == IPPROTO_IPV6 &&
620 cmsg->cmsg_type == IPV6_PKTINFO) {
622 memmove(&rep.pktinfo.v6info, CMSG_DATA(cmsg),
623 sizeof(struct in6_pktinfo));
626 } else if( cmsg->cmsg_level == IPPROTO_IP &&
627 cmsg->cmsg_type == IP_PKTINFO) {
629 memmove(&rep.pktinfo.v4info, CMSG_DATA(cmsg),
630 sizeof(struct in_pktinfo));
632 #elif defined(IP_RECVDSTADDR)
633 } else if( cmsg->cmsg_level == IPPROTO_IP &&
634 cmsg->cmsg_type == IP_RECVDSTADDR) {
636 memmove(&rep.pktinfo.v4addr, CMSG_DATA(cmsg),
637 sizeof(struct in_addr));
639 #endif /* IP_PKTINFO or IP_RECVDSTADDR */
642 if(verbosity >= VERB_ALGO)
643 p_ancil("receive_udp on interface", &rep);
644 #endif /* S_SPLINT_S */
645 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
646 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
647 /* send back immediate reply */
648 (void)comm_point_send_udp_msg_if(rep.c, rep.c->buffer,
649 (struct sockaddr*)&rep.addr, rep.addrlen, &rep);
651 if(rep.c->fd == -1) /* commpoint closed */
658 fatal_exit("recvmsg: No support for IPV6_PKTINFO; IP_PKTINFO or IP_RECVDSTADDR. "
659 "Please disable interface-automatic");
660 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG */
664 comm_point_udp_callback(int fd, short event, void* arg)
666 struct comm_reply rep;
669 struct sldns_buffer *buffer;
671 rep.c = (struct comm_point*)arg;
672 log_assert(rep.c->type == comm_udp);
674 if(!(event&UB_EV_READ))
676 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
677 ub_comm_base_now(rep.c->ev->base);
678 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
679 sldns_buffer_clear(rep.c->buffer);
680 rep.addrlen = (socklen_t)sizeof(rep.addr);
681 log_assert(fd != -1);
682 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
683 rcv = recvfrom(fd, (void*)sldns_buffer_begin(rep.c->buffer),
684 sldns_buffer_remaining(rep.c->buffer), 0,
685 (struct sockaddr*)&rep.addr, &rep.addrlen);
688 if(errno != EAGAIN && errno != EINTR)
689 log_err("recvfrom %d failed: %s",
690 fd, strerror(errno));
692 if(WSAGetLastError() != WSAEINPROGRESS &&
693 WSAGetLastError() != WSAECONNRESET &&
694 WSAGetLastError()!= WSAEWOULDBLOCK)
695 log_err("recvfrom failed: %s",
696 wsa_strerror(WSAGetLastError()));
700 sldns_buffer_skip(rep.c->buffer, rcv);
701 sldns_buffer_flip(rep.c->buffer);
703 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
704 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
705 /* send back immediate reply */
707 buffer = rep.c->dnscrypt_buffer;
709 buffer = rep.c->buffer;
711 (void)comm_point_send_udp_msg(rep.c, buffer,
712 (struct sockaddr*)&rep.addr, rep.addrlen);
714 if(rep.c->fd != fd) /* commpoint closed to -1 or reused for
715 another UDP port. Note rep.c cannot be reused with TCP fd. */
720 /** Use a new tcp handler for new query fd, set to read query */
722 setup_tcp_handler(struct comm_point* c, int fd, int cur, int max)
724 log_assert(c->type == comm_tcp);
725 log_assert(c->fd == -1);
726 sldns_buffer_clear(c->buffer);
729 sldns_buffer_clear(c->dnscrypt_buffer);
731 c->tcp_is_reading = 1;
732 c->tcp_byte_count = 0;
733 c->tcp_timeout_msec = TCP_QUERY_TIMEOUT;
734 /* if more than half the tcp handlers are in use, use a shorter
735 * timeout for this TCP connection, we need to make space for
736 * other connections to be able to get attention */
738 c->tcp_timeout_msec = TCP_QUERY_TIMEOUT_FAST;
739 comm_point_start_listening(c, fd, c->tcp_timeout_msec);
742 void comm_base_handle_slow_accept(int ATTR_UNUSED(fd),
743 short ATTR_UNUSED(event), void* arg)
745 struct comm_base* b = (struct comm_base*)arg;
746 /* timeout for the slow accept, re-enable accepts again */
747 if(b->start_accept) {
748 verbose(VERB_ALGO, "wait is over, slow accept disabled");
749 fptr_ok(fptr_whitelist_start_accept(b->start_accept));
750 (*b->start_accept)(b->cb_arg);
751 b->eb->slow_accept_enabled = 0;
755 int comm_point_perform_accept(struct comm_point* c,
756 struct sockaddr_storage* addr, socklen_t* addrlen)
759 *addrlen = (socklen_t)sizeof(*addr);
760 new_fd = accept(c->fd, (struct sockaddr*)addr, addrlen);
763 /* EINTR is signal interrupt. others are closed connection. */
764 if( errno == EINTR || errno == EAGAIN
766 || errno == EWOULDBLOCK
769 || errno == ECONNABORTED
776 #if defined(ENFILE) && defined(EMFILE)
777 if(errno == ENFILE || errno == EMFILE) {
778 /* out of file descriptors, likely outside of our
779 * control. stop accept() calls for some time */
780 if(c->ev->base->stop_accept) {
781 struct comm_base* b = c->ev->base;
783 verbose(VERB_ALGO, "out of file descriptors: "
785 b->eb->slow_accept_enabled = 1;
786 fptr_ok(fptr_whitelist_stop_accept(
788 (*b->stop_accept)(b->cb_arg);
789 /* set timeout, no mallocs */
790 tv.tv_sec = NETEVENT_SLOW_ACCEPT_TIME/1000;
791 tv.tv_usec = (NETEVENT_SLOW_ACCEPT_TIME%1000)*1000;
792 b->eb->slow_accept = ub_event_new(b->eb->base,
794 comm_base_handle_slow_accept, b);
795 if(b->eb->slow_accept == NULL) {
796 /* we do not want to log here, because
797 * that would spam the logfiles.
798 * error: "event_base_set failed." */
800 else if(ub_event_add(b->eb->slow_accept, &tv)
802 /* we do not want to log here,
803 * error: "event_add failed." */
809 log_err_addr("accept failed", strerror(errno), addr, *addrlen);
810 #else /* USE_WINSOCK */
811 if(WSAGetLastError() == WSAEINPROGRESS ||
812 WSAGetLastError() == WSAECONNRESET)
814 if(WSAGetLastError() == WSAEWOULDBLOCK) {
815 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
818 log_err_addr("accept failed", wsa_strerror(WSAGetLastError()),
823 fd_set_nonblock(new_fd);
828 static long win_bio_cb(BIO *b, int oper, const char* ATTR_UNUSED(argp),
829 int ATTR_UNUSED(argi), long argl, long retvalue)
831 verbose(VERB_ALGO, "bio_cb %d, %s %s %s", oper,
832 (oper&BIO_CB_RETURN)?"return":"before",
833 (oper&BIO_CB_READ)?"read":((oper&BIO_CB_WRITE)?"write":"other"),
834 WSAGetLastError()==WSAEWOULDBLOCK?"wsawb":"");
835 /* on windows, check if previous operation caused EWOULDBLOCK */
836 if( (oper == (BIO_CB_READ|BIO_CB_RETURN) && argl == 0) ||
837 (oper == (BIO_CB_GETS|BIO_CB_RETURN) && argl == 0)) {
838 if(WSAGetLastError() == WSAEWOULDBLOCK)
839 ub_winsock_tcp_wouldblock((struct ub_event*)
840 BIO_get_callback_arg(b), UB_EV_READ);
842 if( (oper == (BIO_CB_WRITE|BIO_CB_RETURN) && argl == 0) ||
843 (oper == (BIO_CB_PUTS|BIO_CB_RETURN) && argl == 0)) {
844 if(WSAGetLastError() == WSAEWOULDBLOCK)
845 ub_winsock_tcp_wouldblock((struct ub_event*)
846 BIO_get_callback_arg(b), UB_EV_WRITE);
848 /* return original return value */
852 /** set win bio callbacks for nonblocking operations */
854 comm_point_tcp_win_bio_cb(struct comm_point* c, void* thessl)
856 SSL* ssl = (SSL*)thessl;
857 /* set them both just in case, but usually they are the same BIO */
858 BIO_set_callback(SSL_get_rbio(ssl), &win_bio_cb);
859 BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)c->ev->ev);
860 BIO_set_callback(SSL_get_wbio(ssl), &win_bio_cb);
861 BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)c->ev->ev);
866 comm_point_tcp_accept_callback(int fd, short event, void* arg)
868 struct comm_point* c = (struct comm_point*)arg, *c_hdl;
870 log_assert(c->type == comm_tcp_accept);
871 if(!(event & UB_EV_READ)) {
872 log_info("ignoring tcp accept event %d", (int)event);
875 ub_comm_base_now(c->ev->base);
876 /* find free tcp handler. */
878 log_warn("accepted too many tcp, connections full");
881 /* accept incoming connection. */
883 log_assert(fd != -1);
885 new_fd = comm_point_perform_accept(c, &c_hdl->repinfo.addr,
886 &c_hdl->repinfo.addrlen);
890 c_hdl->ssl = incoming_ssl_fd(c->ssl, new_fd);
893 comm_point_close(c_hdl);
896 c_hdl->ssl_shake_state = comm_ssl_shake_read;
898 comm_point_tcp_win_bio_cb(c_hdl, c_hdl->ssl);
902 /* grab the tcp handler buffers */
904 c->tcp_free = c_hdl->tcp_free;
906 /* stop accepting incoming queries for now. */
907 comm_point_stop_listening(c);
909 setup_tcp_handler(c_hdl, new_fd, c->cur_tcp_count, c->max_tcp_count);
912 /** Make tcp handler free for next assignment */
914 reclaim_tcp_handler(struct comm_point* c)
916 log_assert(c->type == comm_tcp);
919 SSL_shutdown(c->ssl);
926 c->tcp_parent->cur_tcp_count--;
927 c->tcp_free = c->tcp_parent->tcp_free;
928 c->tcp_parent->tcp_free = c;
930 /* re-enable listening on accept socket */
931 comm_point_start_listening(c->tcp_parent, -1, -1);
936 /** do the callback when writing is done */
938 tcp_callback_writer(struct comm_point* c)
940 log_assert(c->type == comm_tcp);
941 sldns_buffer_clear(c->buffer);
942 if(c->tcp_do_toggle_rw)
943 c->tcp_is_reading = 1;
944 c->tcp_byte_count = 0;
945 /* switch from listening(write) to listening(read) */
946 comm_point_stop_listening(c);
947 comm_point_start_listening(c, -1, -1);
950 /** do the callback when reading is done */
952 tcp_callback_reader(struct comm_point* c)
954 log_assert(c->type == comm_tcp || c->type == comm_local);
955 sldns_buffer_flip(c->buffer);
956 if(c->tcp_do_toggle_rw)
957 c->tcp_is_reading = 0;
958 c->tcp_byte_count = 0;
959 if(c->type == comm_tcp)
960 comm_point_stop_listening(c);
961 fptr_ok(fptr_whitelist_comm_point(c->callback));
962 if( (*c->callback)(c, c->cb_arg, NETEVENT_NOERROR, &c->repinfo) ) {
963 comm_point_start_listening(c, -1, c->tcp_timeout_msec);
967 /** continue ssl handshake */
970 ssl_handshake(struct comm_point* c)
973 if(c->ssl_shake_state == comm_ssl_shake_hs_read) {
974 /* read condition satisfied back to writing */
975 comm_point_listen_for_rw(c, 1, 1);
976 c->ssl_shake_state = comm_ssl_shake_none;
979 if(c->ssl_shake_state == comm_ssl_shake_hs_write) {
980 /* write condition satisfied, back to reading */
981 comm_point_listen_for_rw(c, 1, 0);
982 c->ssl_shake_state = comm_ssl_shake_none;
987 r = SSL_do_handshake(c->ssl);
989 int want = SSL_get_error(c->ssl, r);
990 if(want == SSL_ERROR_WANT_READ) {
991 if(c->ssl_shake_state == comm_ssl_shake_read)
993 c->ssl_shake_state = comm_ssl_shake_read;
994 comm_point_listen_for_rw(c, 1, 0);
996 } else if(want == SSL_ERROR_WANT_WRITE) {
997 if(c->ssl_shake_state == comm_ssl_shake_write)
999 c->ssl_shake_state = comm_ssl_shake_write;
1000 comm_point_listen_for_rw(c, 0, 1);
1003 return 0; /* closed */
1004 } else if(want == SSL_ERROR_SYSCALL) {
1005 /* SYSCALL and errno==0 means closed uncleanly */
1007 log_err("SSL_handshake syscall: %s",
1011 log_crypto_err("ssl handshake failed");
1012 log_addr(1, "ssl handshake failed", &c->repinfo.addr,
1013 c->repinfo.addrlen);
1017 /* this is where peer verification could take place */
1018 log_addr(VERB_ALGO, "SSL DNS connection", &c->repinfo.addr,
1019 c->repinfo.addrlen);
1021 /* setup listen rw correctly */
1022 if(c->tcp_is_reading) {
1023 if(c->ssl_shake_state != comm_ssl_shake_read)
1024 comm_point_listen_for_rw(c, 1, 0);
1026 comm_point_listen_for_rw(c, 1, 1);
1028 c->ssl_shake_state = comm_ssl_shake_none;
1031 #endif /* HAVE_SSL */
1033 /** ssl read callback on TCP */
1035 ssl_handle_read(struct comm_point* c)
1039 if(c->ssl_shake_state != comm_ssl_shake_none) {
1040 if(!ssl_handshake(c))
1042 if(c->ssl_shake_state != comm_ssl_shake_none)
1045 if(c->tcp_byte_count < sizeof(uint16_t)) {
1046 /* read length bytes */
1048 if((r=SSL_read(c->ssl, (void*)sldns_buffer_at(c->buffer,
1049 c->tcp_byte_count), (int)(sizeof(uint16_t) -
1050 c->tcp_byte_count))) <= 0) {
1051 int want = SSL_get_error(c->ssl, r);
1052 if(want == SSL_ERROR_ZERO_RETURN) {
1053 return 0; /* shutdown, closed */
1054 } else if(want == SSL_ERROR_WANT_READ) {
1055 return 1; /* read more later */
1056 } else if(want == SSL_ERROR_WANT_WRITE) {
1057 c->ssl_shake_state = comm_ssl_shake_hs_write;
1058 comm_point_listen_for_rw(c, 0, 1);
1060 } else if(want == SSL_ERROR_SYSCALL) {
1062 log_err("SSL_read syscall: %s",
1066 log_crypto_err("could not SSL_read");
1069 c->tcp_byte_count += r;
1070 if(c->tcp_byte_count != sizeof(uint16_t))
1072 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1073 sldns_buffer_capacity(c->buffer)) {
1074 verbose(VERB_QUERY, "ssl: dropped larger than buffer");
1077 sldns_buffer_set_limit(c->buffer,
1078 sldns_buffer_read_u16_at(c->buffer, 0));
1079 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1080 verbose(VERB_QUERY, "ssl: dropped bogus too short.");
1083 verbose(VERB_ALGO, "Reading ssl tcp query of length %d",
1084 (int)sldns_buffer_limit(c->buffer));
1086 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1088 r = SSL_read(c->ssl, (void*)sldns_buffer_current(c->buffer),
1089 (int)sldns_buffer_remaining(c->buffer));
1091 int want = SSL_get_error(c->ssl, r);
1092 if(want == SSL_ERROR_ZERO_RETURN) {
1093 return 0; /* shutdown, closed */
1094 } else if(want == SSL_ERROR_WANT_READ) {
1095 return 1; /* read more later */
1096 } else if(want == SSL_ERROR_WANT_WRITE) {
1097 c->ssl_shake_state = comm_ssl_shake_hs_write;
1098 comm_point_listen_for_rw(c, 0, 1);
1100 } else if(want == SSL_ERROR_SYSCALL) {
1102 log_err("SSL_read syscall: %s",
1106 log_crypto_err("could not SSL_read");
1109 sldns_buffer_skip(c->buffer, (ssize_t)r);
1110 if(sldns_buffer_remaining(c->buffer) <= 0) {
1111 tcp_callback_reader(c);
1117 #endif /* HAVE_SSL */
1120 /** ssl write callback on TCP */
1122 ssl_handle_write(struct comm_point* c)
1126 if(c->ssl_shake_state != comm_ssl_shake_none) {
1127 if(!ssl_handshake(c))
1129 if(c->ssl_shake_state != comm_ssl_shake_none)
1132 /* ignore return, if fails we may simply block */
1133 (void)SSL_set_mode(c->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
1134 if(c->tcp_byte_count < sizeof(uint16_t)) {
1135 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1137 r = SSL_write(c->ssl,
1138 (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1139 (int)(sizeof(uint16_t)-c->tcp_byte_count));
1141 int want = SSL_get_error(c->ssl, r);
1142 if(want == SSL_ERROR_ZERO_RETURN) {
1143 return 0; /* closed */
1144 } else if(want == SSL_ERROR_WANT_READ) {
1145 c->ssl_shake_state = comm_ssl_shake_read;
1146 comm_point_listen_for_rw(c, 1, 0);
1147 return 1; /* wait for read condition */
1148 } else if(want == SSL_ERROR_WANT_WRITE) {
1149 return 1; /* write more later */
1150 } else if(want == SSL_ERROR_SYSCALL) {
1152 log_err("SSL_write syscall: %s",
1156 log_crypto_err("could not SSL_write");
1159 c->tcp_byte_count += r;
1160 if(c->tcp_byte_count < sizeof(uint16_t))
1162 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1164 if(sldns_buffer_remaining(c->buffer) == 0) {
1165 tcp_callback_writer(c);
1169 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1171 r = SSL_write(c->ssl, (void*)sldns_buffer_current(c->buffer),
1172 (int)sldns_buffer_remaining(c->buffer));
1174 int want = SSL_get_error(c->ssl, r);
1175 if(want == SSL_ERROR_ZERO_RETURN) {
1176 return 0; /* closed */
1177 } else if(want == SSL_ERROR_WANT_READ) {
1178 c->ssl_shake_state = comm_ssl_shake_read;
1179 comm_point_listen_for_rw(c, 1, 0);
1180 return 1; /* wait for read condition */
1181 } else if(want == SSL_ERROR_WANT_WRITE) {
1182 return 1; /* write more later */
1183 } else if(want == SSL_ERROR_SYSCALL) {
1185 log_err("SSL_write syscall: %s",
1189 log_crypto_err("could not SSL_write");
1192 sldns_buffer_skip(c->buffer, (ssize_t)r);
1194 if(sldns_buffer_remaining(c->buffer) == 0) {
1195 tcp_callback_writer(c);
1201 #endif /* HAVE_SSL */
1204 /** handle ssl tcp connection with dns contents */
1206 ssl_handle_it(struct comm_point* c)
1208 if(c->tcp_is_reading)
1209 return ssl_handle_read(c);
1210 return ssl_handle_write(c);
1213 /** Handle tcp reading callback.
1214 * @param fd: file descriptor of socket.
1215 * @param c: comm point to read from into buffer.
1216 * @param short_ok: if true, very short packets are OK (for comm_local).
1217 * @return: 0 on error
1220 comm_point_tcp_handle_read(int fd, struct comm_point* c, int short_ok)
1223 log_assert(c->type == comm_tcp || c->type == comm_local);
1225 return ssl_handle_it(c);
1226 if(!c->tcp_is_reading)
1229 log_assert(fd != -1);
1230 if(c->tcp_byte_count < sizeof(uint16_t)) {
1231 /* read length bytes */
1232 r = recv(fd,(void*)sldns_buffer_at(c->buffer,c->tcp_byte_count),
1233 sizeof(uint16_t)-c->tcp_byte_count, 0);
1238 if(errno == EINTR || errno == EAGAIN)
1241 if(errno == ECONNRESET && verbosity < 2)
1242 return 0; /* silence reset by peer */
1244 log_err_addr("read (in tcp s)", strerror(errno),
1245 &c->repinfo.addr, c->repinfo.addrlen);
1246 #else /* USE_WINSOCK */
1247 if(WSAGetLastError() == WSAECONNRESET)
1249 if(WSAGetLastError() == WSAEINPROGRESS)
1251 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1252 ub_winsock_tcp_wouldblock(c->ev->ev,
1256 log_err_addr("read (in tcp s)",
1257 wsa_strerror(WSAGetLastError()),
1258 &c->repinfo.addr, c->repinfo.addrlen);
1262 c->tcp_byte_count += r;
1263 if(c->tcp_byte_count != sizeof(uint16_t))
1265 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1266 sldns_buffer_capacity(c->buffer)) {
1267 verbose(VERB_QUERY, "tcp: dropped larger than buffer");
1270 sldns_buffer_set_limit(c->buffer,
1271 sldns_buffer_read_u16_at(c->buffer, 0));
1273 sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1274 verbose(VERB_QUERY, "tcp: dropped bogus too short.");
1277 verbose(VERB_ALGO, "Reading tcp query of length %d",
1278 (int)sldns_buffer_limit(c->buffer));
1281 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1282 r = recv(fd, (void*)sldns_buffer_current(c->buffer),
1283 sldns_buffer_remaining(c->buffer), 0);
1286 } else if(r == -1) {
1288 if(errno == EINTR || errno == EAGAIN)
1290 log_err_addr("read (in tcp r)", strerror(errno),
1291 &c->repinfo.addr, c->repinfo.addrlen);
1292 #else /* USE_WINSOCK */
1293 if(WSAGetLastError() == WSAECONNRESET)
1295 if(WSAGetLastError() == WSAEINPROGRESS)
1297 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1298 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_READ);
1301 log_err_addr("read (in tcp r)",
1302 wsa_strerror(WSAGetLastError()),
1303 &c->repinfo.addr, c->repinfo.addrlen);
1307 sldns_buffer_skip(c->buffer, r);
1308 if(sldns_buffer_remaining(c->buffer) <= 0) {
1309 tcp_callback_reader(c);
1315 * Handle tcp writing callback.
1316 * @param fd: file descriptor of socket.
1317 * @param c: comm point to write buffer out of.
1318 * @return: 0 on error
1321 comm_point_tcp_handle_write(int fd, struct comm_point* c)
1324 struct sldns_buffer *buffer;
1325 log_assert(c->type == comm_tcp);
1327 buffer = c->dnscrypt_buffer;
1331 if(c->tcp_is_reading && !c->ssl)
1333 log_assert(fd != -1);
1334 if(c->tcp_byte_count == 0 && c->tcp_check_nb_connect) {
1335 /* check for pending error from nonblocking connect */
1336 /* from Stevens, unix network programming, vol1, 3rd ed, p450*/
1338 socklen_t len = (socklen_t)sizeof(error);
1339 if(getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&error,
1342 error = errno; /* on solaris errno is error */
1343 #else /* USE_WINSOCK */
1344 error = WSAGetLastError();
1348 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1349 if(error == EINPROGRESS || error == EWOULDBLOCK)
1350 return 1; /* try again later */
1353 if(error != 0 && verbosity < 2)
1354 return 0; /* silence lots of chatter in the logs */
1355 else if(error != 0) {
1356 log_err_addr("tcp connect", strerror(error),
1357 &c->repinfo.addr, c->repinfo.addrlen);
1358 #else /* USE_WINSOCK */
1360 if(error == WSAEINPROGRESS)
1362 else if(error == WSAEWOULDBLOCK) {
1363 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
1365 } else if(error != 0 && verbosity < 2)
1367 else if(error != 0) {
1368 log_err_addr("tcp connect", wsa_strerror(error),
1369 &c->repinfo.addr, c->repinfo.addrlen);
1370 #endif /* USE_WINSOCK */
1375 return ssl_handle_it(c);
1377 #ifdef USE_MSG_FASTOPEN
1378 /* Only try this on first use of a connection that uses tfo,
1379 otherwise fall through to normal write */
1380 /* Also, TFO support on WINDOWS not implemented at the moment */
1381 if(c->tcp_do_fastopen == 1) {
1382 /* this form of sendmsg() does both a connect() and send() so need to
1383 look for various flavours of error*/
1384 uint16_t len = htons(sldns_buffer_limit(buffer));
1386 struct iovec iov[2];
1387 c->tcp_do_fastopen = 0;
1388 memset(&msg, 0, sizeof(msg));
1389 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1390 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1391 iov[1].iov_base = sldns_buffer_begin(buffer);
1392 iov[1].iov_len = sldns_buffer_limit(buffer);
1393 log_assert(iov[0].iov_len > 0);
1394 log_assert(iov[1].iov_len > 0);
1395 msg.msg_name = &c->repinfo.addr;
1396 msg.msg_namelen = c->repinfo.addrlen;
1399 r = sendmsg(fd, &msg, MSG_FASTOPEN);
1401 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1402 /* Handshake is underway, maybe because no TFO cookie available.
1403 Come back to write the messsage*/
1404 if(errno == EINPROGRESS || errno == EWOULDBLOCK)
1407 if(errno == EINTR || errno == EAGAIN)
1409 /* Not handling EISCONN here as shouldn't ever hit that case.*/
1410 if(errno != EPIPE && errno != 0 && verbosity < 2)
1411 return 0; /* silence lots of chatter in the logs */
1412 if(errno != EPIPE && errno != 0) {
1413 log_err_addr("tcp sendmsg", strerror(errno),
1414 &c->repinfo.addr, c->repinfo.addrlen);
1417 /* fallthrough to nonFASTOPEN
1418 * (MSG_FASTOPEN on Linux 3 produces EPIPE)
1419 * we need to perform connect() */
1420 if(connect(fd, (struct sockaddr *)&c->repinfo.addr, c->repinfo.addrlen) == -1) {
1422 if(errno == EINPROGRESS)
1423 return 1; /* wait until connect done*/
1426 if(WSAGetLastError() == WSAEINPROGRESS ||
1427 WSAGetLastError() == WSAEWOULDBLOCK)
1428 return 1; /* wait until connect done*/
1430 if(tcp_connect_errno_needs_log(
1431 (struct sockaddr *)&c->repinfo.addr, c->repinfo.addrlen)) {
1432 log_err_addr("outgoing tcp: connect after EPIPE for fastopen",
1433 strerror(errno), &c->repinfo.addr, c->repinfo.addrlen);
1439 c->tcp_byte_count += r;
1440 if(c->tcp_byte_count < sizeof(uint16_t))
1442 sldns_buffer_set_position(buffer, c->tcp_byte_count -
1444 if(sldns_buffer_remaining(buffer) == 0) {
1445 tcp_callback_writer(c);
1450 #endif /* USE_MSG_FASTOPEN */
1452 if(c->tcp_byte_count < sizeof(uint16_t)) {
1453 uint16_t len = htons(sldns_buffer_limit(buffer));
1455 struct iovec iov[2];
1456 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1457 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1458 iov[1].iov_base = sldns_buffer_begin(buffer);
1459 iov[1].iov_len = sldns_buffer_limit(buffer);
1460 log_assert(iov[0].iov_len > 0);
1461 log_assert(iov[1].iov_len > 0);
1462 r = writev(fd, iov, 2);
1463 #else /* HAVE_WRITEV */
1464 r = send(fd, (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1465 sizeof(uint16_t)-c->tcp_byte_count, 0);
1466 #endif /* HAVE_WRITEV */
1470 if(errno == EPIPE && verbosity < 2)
1471 return 0; /* silence 'broken pipe' */
1473 if(errno == EINTR || errno == EAGAIN)
1476 log_err_addr("tcp writev", strerror(errno),
1477 &c->repinfo.addr, c->repinfo.addrlen);
1478 # else /* HAVE_WRITEV */
1479 log_err_addr("tcp send s", strerror(errno),
1480 &c->repinfo.addr, c->repinfo.addrlen);
1481 # endif /* HAVE_WRITEV */
1483 if(WSAGetLastError() == WSAENOTCONN)
1485 if(WSAGetLastError() == WSAEINPROGRESS)
1487 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1488 ub_winsock_tcp_wouldblock(c->ev->ev,
1492 log_err_addr("tcp send s",
1493 wsa_strerror(WSAGetLastError()),
1494 &c->repinfo.addr, c->repinfo.addrlen);
1498 c->tcp_byte_count += r;
1499 if(c->tcp_byte_count < sizeof(uint16_t))
1501 sldns_buffer_set_position(buffer, c->tcp_byte_count -
1503 if(sldns_buffer_remaining(buffer) == 0) {
1504 tcp_callback_writer(c);
1508 log_assert(sldns_buffer_remaining(buffer) > 0);
1509 r = send(fd, (void*)sldns_buffer_current(buffer),
1510 sldns_buffer_remaining(buffer), 0);
1513 if(errno == EINTR || errno == EAGAIN)
1515 log_err_addr("tcp send r", strerror(errno),
1516 &c->repinfo.addr, c->repinfo.addrlen);
1518 if(WSAGetLastError() == WSAEINPROGRESS)
1520 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1521 ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE);
1524 log_err_addr("tcp send r", wsa_strerror(WSAGetLastError()),
1525 &c->repinfo.addr, c->repinfo.addrlen);
1529 sldns_buffer_skip(buffer, r);
1531 if(sldns_buffer_remaining(buffer) == 0) {
1532 tcp_callback_writer(c);
1539 comm_point_tcp_handle_callback(int fd, short event, void* arg)
1541 struct comm_point* c = (struct comm_point*)arg;
1542 log_assert(c->type == comm_tcp);
1543 ub_comm_base_now(c->ev->base);
1546 /* Initialize if this is a dnscrypt socket */
1548 c->dnscrypt = c->tcp_parent->dnscrypt;
1550 if(c->dnscrypt && c->dnscrypt_buffer == c->buffer) {
1551 c->dnscrypt_buffer = sldns_buffer_new(sldns_buffer_capacity(c->buffer));
1552 if(!c->dnscrypt_buffer) {
1553 log_err("Could not allocate dnscrypt buffer");
1559 if(event&UB_EV_READ) {
1560 if(!comm_point_tcp_handle_read(fd, c, 0)) {
1561 reclaim_tcp_handler(c);
1562 if(!c->tcp_do_close) {
1563 fptr_ok(fptr_whitelist_comm_point(
1565 (void)(*c->callback)(c, c->cb_arg,
1566 NETEVENT_CLOSED, NULL);
1571 if(event&UB_EV_WRITE) {
1572 if(!comm_point_tcp_handle_write(fd, c)) {
1573 reclaim_tcp_handler(c);
1574 if(!c->tcp_do_close) {
1575 fptr_ok(fptr_whitelist_comm_point(
1577 (void)(*c->callback)(c, c->cb_arg,
1578 NETEVENT_CLOSED, NULL);
1583 if(event&UB_EV_TIMEOUT) {
1584 verbose(VERB_QUERY, "tcp took too long, dropped");
1585 reclaim_tcp_handler(c);
1586 if(!c->tcp_do_close) {
1587 fptr_ok(fptr_whitelist_comm_point(c->callback));
1588 (void)(*c->callback)(c, c->cb_arg,
1589 NETEVENT_TIMEOUT, NULL);
1593 log_err("Ignored event %d for tcphdl.", event);
1596 void comm_point_local_handle_callback(int fd, short event, void* arg)
1598 struct comm_point* c = (struct comm_point*)arg;
1599 log_assert(c->type == comm_local);
1600 ub_comm_base_now(c->ev->base);
1602 if(event&UB_EV_READ) {
1603 if(!comm_point_tcp_handle_read(fd, c, 1)) {
1604 fptr_ok(fptr_whitelist_comm_point(c->callback));
1605 (void)(*c->callback)(c, c->cb_arg, NETEVENT_CLOSED,
1610 log_err("Ignored event %d for localhdl.", event);
1613 void comm_point_raw_handle_callback(int ATTR_UNUSED(fd),
1614 short event, void* arg)
1616 struct comm_point* c = (struct comm_point*)arg;
1617 int err = NETEVENT_NOERROR;
1618 log_assert(c->type == comm_raw);
1619 ub_comm_base_now(c->ev->base);
1621 if(event&UB_EV_TIMEOUT)
1622 err = NETEVENT_TIMEOUT;
1623 fptr_ok(fptr_whitelist_comm_point_raw(c->callback));
1624 (void)(*c->callback)(c, c->cb_arg, err, NULL);
1628 comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
1629 comm_point_callback_type* callback, void* callback_arg)
1631 struct comm_point* c = (struct comm_point*)calloc(1,
1632 sizeof(struct comm_point));
1636 c->ev = (struct internal_event*)calloc(1,
1637 sizeof(struct internal_event));
1646 c->tcp_is_reading = 0;
1647 c->tcp_byte_count = 0;
1648 c->tcp_parent = NULL;
1649 c->max_tcp_count = 0;
1650 c->cur_tcp_count = 0;
1651 c->tcp_handlers = NULL;
1654 c->tcp_do_close = 0;
1655 c->do_not_close = 0;
1656 c->tcp_do_toggle_rw = 0;
1657 c->tcp_check_nb_connect = 0;
1658 #ifdef USE_MSG_FASTOPEN
1659 c->tcp_do_fastopen = 0;
1663 c->dnscrypt_buffer = buffer;
1666 c->callback = callback;
1667 c->cb_arg = callback_arg;
1668 evbits = UB_EV_READ | UB_EV_PERSIST;
1669 /* ub_event stuff */
1670 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1671 comm_point_udp_callback, c);
1672 if(c->ev->ev == NULL) {
1673 log_err("could not baseset udp event");
1674 comm_point_delete(c);
1677 if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
1678 log_err("could not add udp event");
1679 comm_point_delete(c);
1686 comm_point_create_udp_ancil(struct comm_base *base, int fd,
1687 sldns_buffer* buffer,
1688 comm_point_callback_type* callback, void* callback_arg)
1690 struct comm_point* c = (struct comm_point*)calloc(1,
1691 sizeof(struct comm_point));
1695 c->ev = (struct internal_event*)calloc(1,
1696 sizeof(struct internal_event));
1705 c->tcp_is_reading = 0;
1706 c->tcp_byte_count = 0;
1707 c->tcp_parent = NULL;
1708 c->max_tcp_count = 0;
1709 c->cur_tcp_count = 0;
1710 c->tcp_handlers = NULL;
1713 c->tcp_do_close = 0;
1714 c->do_not_close = 0;
1717 c->dnscrypt_buffer = buffer;
1720 c->tcp_do_toggle_rw = 0;
1721 c->tcp_check_nb_connect = 0;
1722 #ifdef USE_MSG_FASTOPEN
1723 c->tcp_do_fastopen = 0;
1725 c->callback = callback;
1726 c->cb_arg = callback_arg;
1727 evbits = UB_EV_READ | UB_EV_PERSIST;
1728 /* ub_event stuff */
1729 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1730 comm_point_udp_ancil_callback, c);
1731 if(c->ev->ev == NULL) {
1732 log_err("could not baseset udp event");
1733 comm_point_delete(c);
1736 if(fd!=-1 && ub_event_add(c->ev->ev, c->timeout) != 0 ) {
1737 log_err("could not add udp event");
1738 comm_point_delete(c);
1744 static struct comm_point*
1745 comm_point_create_tcp_handler(struct comm_base *base,
1746 struct comm_point* parent, size_t bufsize,
1747 comm_point_callback_type* callback, void* callback_arg)
1749 struct comm_point* c = (struct comm_point*)calloc(1,
1750 sizeof(struct comm_point));
1754 c->ev = (struct internal_event*)calloc(1,
1755 sizeof(struct internal_event));
1762 c->buffer = sldns_buffer_new(bufsize);
1768 c->timeout = (struct timeval*)malloc(sizeof(struct timeval));
1770 sldns_buffer_free(c->buffer);
1775 c->tcp_is_reading = 0;
1776 c->tcp_byte_count = 0;
1777 c->tcp_parent = parent;
1778 c->max_tcp_count = 0;
1779 c->cur_tcp_count = 0;
1780 c->tcp_handlers = NULL;
1783 c->tcp_do_close = 0;
1784 c->do_not_close = 0;
1785 c->tcp_do_toggle_rw = 1;
1786 c->tcp_check_nb_connect = 0;
1787 #ifdef USE_MSG_FASTOPEN
1788 c->tcp_do_fastopen = 0;
1792 /* We don't know just yet if this is a dnscrypt channel. Allocation
1793 * will be done when handling the callback. */
1794 c->dnscrypt_buffer = c->buffer;
1797 c->callback = callback;
1798 c->cb_arg = callback_arg;
1799 /* add to parent free list */
1800 c->tcp_free = parent->tcp_free;
1801 parent->tcp_free = c;
1802 /* ub_event stuff */
1803 evbits = UB_EV_PERSIST | UB_EV_READ | UB_EV_TIMEOUT;
1804 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1805 comm_point_tcp_handle_callback, c);
1806 if(c->ev->ev == NULL)
1808 log_err("could not basetset tcphdl event");
1809 parent->tcp_free = c->tcp_free;
1818 comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
1819 comm_point_callback_type* callback, void* callback_arg)
1821 struct comm_point* c = (struct comm_point*)calloc(1,
1822 sizeof(struct comm_point));
1825 /* first allocate the TCP accept listener */
1828 c->ev = (struct internal_event*)calloc(1,
1829 sizeof(struct internal_event));
1838 c->tcp_is_reading = 0;
1839 c->tcp_byte_count = 0;
1840 c->tcp_parent = NULL;
1841 c->max_tcp_count = num;
1842 c->cur_tcp_count = 0;
1843 c->tcp_handlers = (struct comm_point**)calloc((size_t)num,
1844 sizeof(struct comm_point*));
1845 if(!c->tcp_handlers) {
1851 c->type = comm_tcp_accept;
1852 c->tcp_do_close = 0;
1853 c->do_not_close = 0;
1854 c->tcp_do_toggle_rw = 0;
1855 c->tcp_check_nb_connect = 0;
1856 #ifdef USE_MSG_FASTOPEN
1857 c->tcp_do_fastopen = 0;
1861 c->dnscrypt_buffer = NULL;
1865 evbits = UB_EV_READ | UB_EV_PERSIST;
1866 /* ub_event stuff */
1867 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1868 comm_point_tcp_accept_callback, c);
1869 if(c->ev->ev == NULL) {
1870 log_err("could not baseset tcpacc event");
1871 comm_point_delete(c);
1874 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
1875 log_err("could not add tcpacc event");
1876 comm_point_delete(c);
1879 /* now prealloc the tcp handlers */
1880 for(i=0; i<num; i++) {
1881 c->tcp_handlers[i] = comm_point_create_tcp_handler(base,
1882 c, bufsize, callback, callback_arg);
1883 if(!c->tcp_handlers[i]) {
1884 comm_point_delete(c);
1893 comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
1894 comm_point_callback_type* callback, void* callback_arg)
1896 struct comm_point* c = (struct comm_point*)calloc(1,
1897 sizeof(struct comm_point));
1901 c->ev = (struct internal_event*)calloc(1,
1902 sizeof(struct internal_event));
1909 c->buffer = sldns_buffer_new(bufsize);
1916 c->tcp_is_reading = 0;
1917 c->tcp_byte_count = 0;
1918 c->tcp_parent = NULL;
1919 c->max_tcp_count = 0;
1920 c->cur_tcp_count = 0;
1921 c->tcp_handlers = NULL;
1924 c->tcp_do_close = 0;
1925 c->do_not_close = 0;
1926 c->tcp_do_toggle_rw = 1;
1927 c->tcp_check_nb_connect = 1;
1928 #ifdef USE_MSG_FASTOPEN
1929 c->tcp_do_fastopen = 1;
1933 c->dnscrypt_buffer = c->buffer;
1936 c->callback = callback;
1937 c->cb_arg = callback_arg;
1938 evbits = UB_EV_PERSIST | UB_EV_WRITE;
1939 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
1940 comm_point_tcp_handle_callback, c);
1941 if(c->ev->ev == NULL)
1943 log_err("could not baseset tcpout event");
1944 sldns_buffer_free(c->buffer);
1954 comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
1955 comm_point_callback_type* callback, void* callback_arg)
1957 struct comm_point* c = (struct comm_point*)calloc(1,
1958 sizeof(struct comm_point));
1962 c->ev = (struct internal_event*)calloc(1,
1963 sizeof(struct internal_event));
1970 c->buffer = sldns_buffer_new(bufsize);
1977 c->tcp_is_reading = 1;
1978 c->tcp_byte_count = 0;
1979 c->tcp_parent = NULL;
1980 c->max_tcp_count = 0;
1981 c->cur_tcp_count = 0;
1982 c->tcp_handlers = NULL;
1984 c->type = comm_local;
1985 c->tcp_do_close = 0;
1986 c->do_not_close = 1;
1987 c->tcp_do_toggle_rw = 0;
1988 c->tcp_check_nb_connect = 0;
1989 #ifdef USE_MSG_FASTOPEN
1990 c->tcp_do_fastopen = 0;
1994 c->dnscrypt_buffer = c->buffer;
1996 c->callback = callback;
1997 c->cb_arg = callback_arg;
1998 /* ub_event stuff */
1999 evbits = UB_EV_PERSIST | UB_EV_READ;
2000 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
2001 comm_point_local_handle_callback, c);
2002 if(c->ev->ev == NULL) {
2003 log_err("could not baseset localhdl event");
2008 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
2009 log_err("could not add localhdl event");
2010 ub_event_free(c->ev->ev);
2019 comm_point_create_raw(struct comm_base* base, int fd, int writing,
2020 comm_point_callback_type* callback, void* callback_arg)
2022 struct comm_point* c = (struct comm_point*)calloc(1,
2023 sizeof(struct comm_point));
2027 c->ev = (struct internal_event*)calloc(1,
2028 sizeof(struct internal_event));
2037 c->tcp_is_reading = 0;
2038 c->tcp_byte_count = 0;
2039 c->tcp_parent = NULL;
2040 c->max_tcp_count = 0;
2041 c->cur_tcp_count = 0;
2042 c->tcp_handlers = NULL;
2045 c->tcp_do_close = 0;
2046 c->do_not_close = 1;
2047 c->tcp_do_toggle_rw = 0;
2048 c->tcp_check_nb_connect = 0;
2049 #ifdef USE_MSG_FASTOPEN
2050 c->tcp_do_fastopen = 0;
2054 c->dnscrypt_buffer = c->buffer;
2056 c->callback = callback;
2057 c->cb_arg = callback_arg;
2058 /* ub_event stuff */
2060 evbits = UB_EV_PERSIST | UB_EV_WRITE;
2061 else evbits = UB_EV_PERSIST | UB_EV_READ;
2062 c->ev->ev = ub_event_new(base->eb->base, c->fd, evbits,
2063 comm_point_raw_handle_callback, c);
2064 if(c->ev->ev == NULL) {
2065 log_err("could not baseset rawhdl event");
2070 if (ub_event_add(c->ev->ev, c->timeout) != 0) {
2071 log_err("could not add rawhdl event");
2072 ub_event_free(c->ev->ev);
2081 comm_point_close(struct comm_point* c)
2086 if(ub_event_del(c->ev->ev) != 0) {
2087 log_err("could not event_del on close");
2089 /* close fd after removing from event lists, or epoll.. is messed up */
2090 if(c->fd != -1 && !c->do_not_close) {
2091 verbose(VERB_ALGO, "close fd %d", c->fd);
2102 comm_point_delete(struct comm_point* c)
2106 if(c->type == comm_tcp && c->ssl) {
2108 SSL_shutdown(c->ssl);
2112 comm_point_close(c);
2113 if(c->tcp_handlers) {
2115 for(i=0; i<c->max_tcp_count; i++)
2116 comm_point_delete(c->tcp_handlers[i]);
2117 free(c->tcp_handlers);
2120 if(c->type == comm_tcp || c->type == comm_local) {
2121 sldns_buffer_free(c->buffer);
2123 if(c->dnscrypt && c->dnscrypt_buffer != c->buffer) {
2124 sldns_buffer_free(c->dnscrypt_buffer);
2128 ub_event_free(c->ev->ev);
2134 comm_point_send_reply(struct comm_reply *repinfo)
2136 struct sldns_buffer* buffer;
2137 log_assert(repinfo && repinfo->c);
2139 buffer = repinfo->c->dnscrypt_buffer;
2140 if(!dnsc_handle_uncurved_request(repinfo)) {
2144 buffer = repinfo->c->buffer;
2146 if(repinfo->c->type == comm_udp) {
2147 if(repinfo->srctype)
2148 comm_point_send_udp_msg_if(repinfo->c,
2149 buffer, (struct sockaddr*)&repinfo->addr,
2150 repinfo->addrlen, repinfo);
2152 comm_point_send_udp_msg(repinfo->c, buffer,
2153 (struct sockaddr*)&repinfo->addr, repinfo->addrlen);
2155 if(repinfo->c->dtenv != NULL &&
2156 repinfo->c->dtenv->log_client_response_messages)
2157 dt_msg_send_client_response(repinfo->c->dtenv,
2158 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2162 if(repinfo->c->tcp_parent->dtenv != NULL &&
2163 repinfo->c->tcp_parent->dtenv->log_client_response_messages)
2164 dt_msg_send_client_response(repinfo->c->tcp_parent->dtenv,
2165 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2167 comm_point_start_listening(repinfo->c, -1,
2168 repinfo->c->tcp_timeout_msec);
2173 comm_point_drop_reply(struct comm_reply* repinfo)
2177 log_assert(repinfo && repinfo->c);
2178 log_assert(repinfo->c->type != comm_tcp_accept);
2179 if(repinfo->c->type == comm_udp)
2181 reclaim_tcp_handler(repinfo->c);
2185 comm_point_stop_listening(struct comm_point* c)
2187 verbose(VERB_ALGO, "comm point stop listening %d", c->fd);
2188 if(ub_event_del(c->ev->ev) != 0) {
2189 log_err("event_del error to stoplisten");
2194 comm_point_start_listening(struct comm_point* c, int newfd, int msec)
2196 verbose(VERB_ALGO, "comm point start listening %d",
2197 c->fd==-1?newfd:c->fd);
2198 if(c->type == comm_tcp_accept && !c->tcp_free) {
2199 /* no use to start listening no free slots. */
2202 if(msec != -1 && msec != 0) {
2204 c->timeout = (struct timeval*)malloc(sizeof(
2207 log_err("cpsl: malloc failed. No net read.");
2211 ub_event_add_bits(c->ev->ev, UB_EV_TIMEOUT);
2212 #ifndef S_SPLINT_S /* splint fails on struct timeval. */
2213 c->timeout->tv_sec = msec/1000;
2214 c->timeout->tv_usec = (msec%1000)*1000;
2215 #endif /* S_SPLINT_S */
2217 if(c->type == comm_tcp) {
2218 ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
2219 if(c->tcp_is_reading)
2220 ub_event_add_bits(c->ev->ev, UB_EV_READ);
2221 else ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
2232 ub_event_set_fd(c->ev->ev, c->fd);
2234 if(ub_event_add(c->ev->ev, msec==0?NULL:c->timeout) != 0) {
2235 log_err("event_add failed. in cpsl.");
2239 void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr)
2241 verbose(VERB_ALGO, "comm point listen_for_rw %d %d", c->fd, wr);
2242 if(ub_event_del(c->ev->ev) != 0) {
2243 log_err("event_del error to cplf");
2245 ub_event_del_bits(c->ev->ev, UB_EV_READ|UB_EV_WRITE);
2246 if(rd) ub_event_add_bits(c->ev->ev, UB_EV_READ);
2247 if(wr) ub_event_add_bits(c->ev->ev, UB_EV_WRITE);
2248 if(ub_event_add(c->ev->ev, c->timeout) != 0) {
2249 log_err("event_add failed. in cplf.");
2253 size_t comm_point_get_mem(struct comm_point* c)
2258 s = sizeof(*c) + sizeof(*c->ev);
2260 s += sizeof(*c->timeout);
2261 if(c->type == comm_tcp || c->type == comm_local) {
2262 s += sizeof(*c->buffer) + sldns_buffer_capacity(c->buffer);
2264 s += sizeof(*c->dnscrypt_buffer);
2265 if(c->buffer != c->dnscrypt_buffer) {
2266 s += sldns_buffer_capacity(c->dnscrypt_buffer);
2270 if(c->type == comm_tcp_accept) {
2272 for(i=0; i<c->max_tcp_count; i++)
2273 s += comm_point_get_mem(c->tcp_handlers[i]);
2279 comm_timer_create(struct comm_base* base, void (*cb)(void*), void* cb_arg)
2281 struct internal_timer *tm = (struct internal_timer*)calloc(1,
2282 sizeof(struct internal_timer));
2284 log_err("malloc failed");
2287 tm->super.ev_timer = tm;
2289 tm->super.callback = cb;
2290 tm->super.cb_arg = cb_arg;
2291 tm->ev = ub_event_new(base->eb->base, -1, UB_EV_TIMEOUT,
2292 comm_timer_callback, &tm->super);
2293 if(tm->ev == NULL) {
2294 log_err("timer_create: event_base_set failed.");
2302 comm_timer_disable(struct comm_timer* timer)
2306 ub_timer_del(timer->ev_timer->ev);
2307 timer->ev_timer->enabled = 0;
2311 comm_timer_set(struct comm_timer* timer, struct timeval* tv)
2314 if(timer->ev_timer->enabled)
2315 comm_timer_disable(timer);
2316 if(ub_timer_add(timer->ev_timer->ev, timer->ev_timer->base->eb->base,
2317 comm_timer_callback, timer, tv) != 0)
2318 log_err("comm_timer_set: evtimer_add failed.");
2319 timer->ev_timer->enabled = 1;
2323 comm_timer_delete(struct comm_timer* timer)
2327 comm_timer_disable(timer);
2328 /* Free the sub struct timer->ev_timer derived from the super struct timer.
2329 * i.e. assert(timer == timer->ev_timer)
2331 ub_event_free(timer->ev_timer->ev);
2332 free(timer->ev_timer);
2336 comm_timer_callback(int ATTR_UNUSED(fd), short event, void* arg)
2338 struct comm_timer* tm = (struct comm_timer*)arg;
2339 if(!(event&UB_EV_TIMEOUT))
2341 ub_comm_base_now(tm->ev_timer->base);
2342 tm->ev_timer->enabled = 0;
2343 fptr_ok(fptr_whitelist_comm_timer(tm->callback));
2344 (*tm->callback)(tm->cb_arg);
2348 comm_timer_is_set(struct comm_timer* timer)
2350 return (int)timer->ev_timer->enabled;
2354 comm_timer_get_mem(struct comm_timer* ATTR_UNUSED(timer))
2356 return sizeof(struct internal_timer);
2360 comm_signal_create(struct comm_base* base,
2361 void (*callback)(int, void*), void* cb_arg)
2363 struct comm_signal* com = (struct comm_signal*)malloc(
2364 sizeof(struct comm_signal));
2366 log_err("malloc failed");
2370 com->callback = callback;
2371 com->cb_arg = cb_arg;
2372 com->ev_signal = NULL;
2377 comm_signal_callback(int sig, short event, void* arg)
2379 struct comm_signal* comsig = (struct comm_signal*)arg;
2380 if(!(event & UB_EV_SIGNAL))
2382 ub_comm_base_now(comsig->base);
2383 fptr_ok(fptr_whitelist_comm_signal(comsig->callback));
2384 (*comsig->callback)(sig, comsig->cb_arg);
2388 comm_signal_bind(struct comm_signal* comsig, int sig)
2390 struct internal_signal* entry = (struct internal_signal*)calloc(1,
2391 sizeof(struct internal_signal));
2393 log_err("malloc failed");
2397 /* add signal event */
2398 entry->ev = ub_signal_new(comsig->base->eb->base, sig,
2399 comm_signal_callback, comsig);
2400 if(entry->ev == NULL) {
2401 log_err("Could not create signal event");
2405 if(ub_signal_add(entry->ev, NULL) != 0) {
2406 log_err("Could not add signal handler");
2407 ub_event_free(entry->ev);
2411 /* link into list */
2412 entry->next = comsig->ev_signal;
2413 comsig->ev_signal = entry;
2418 comm_signal_delete(struct comm_signal* comsig)
2420 struct internal_signal* p, *np;
2423 p=comsig->ev_signal;
2426 ub_signal_del(p->ev);
2427 ub_event_free(p->ev);