2 * util/netevent.c - event notification
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains event notification functions.
42 #include "util/netevent.h"
44 #include "util/net_help.h"
45 #include "util/fptr_wlist.h"
46 #include "sldns/pkthdr.h"
47 #include "sldns/sbuffer.h"
48 #include "dnstap/dnstap.h"
49 #ifdef HAVE_OPENSSL_SSL_H
50 #include <openssl/ssl.h>
52 #ifdef HAVE_OPENSSL_ERR_H
53 #include <openssl/err.h>
56 /* -------- Start of local definitions -------- */
57 /** if CMSG_ALIGN is not defined on this platform, a workaround */
59 # ifdef _CMSG_DATA_ALIGN
60 # define CMSG_ALIGN _CMSG_DATA_ALIGN
62 # define CMSG_ALIGN(len) (((len)+sizeof(long)-1) & ~(sizeof(long)-1))
66 /** if CMSG_LEN is not defined on this platform, a workaround */
68 # define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr))+(len))
71 /** if CMSG_SPACE is not defined on this platform, a workaround */
73 # ifdef _CMSG_HDR_ALIGN
74 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+_CMSG_HDR_ALIGN(sizeof(struct cmsghdr)))
76 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+CMSG_ALIGN(sizeof(struct cmsghdr)))
80 /** The TCP reading or writing query timeout in seconds */
81 #define TCP_QUERY_TIMEOUT 120
83 #ifndef NONBLOCKING_IS_BROKEN
84 /** number of UDP reads to perform per read indication from select */
85 #define NUM_UDP_PER_SELECT 100
87 #define NUM_UDP_PER_SELECT 1
90 /* We define libevent structures here to hide the libevent stuff. */
94 # include "util/winsock_event.h"
96 # include "util/mini_event.h"
97 # endif /* USE_WINSOCK */
98 #else /* USE_MINI_EVENT */
103 # include "event2/event.h"
104 # include "event2/event_struct.h"
105 # include "event2/event_compat.h"
107 #endif /* USE_MINI_EVENT */
110 * The internal event structure for keeping libevent info for the event.
111 * Possibly other structures (list, tree) this is part of.
113 struct internal_event {
115 struct comm_base* base;
116 /** libevent event type, alloced here */
121 * Internal base structure, so that every thread has its own events.
123 struct internal_base {
124 /** libevent event_base type. */
125 struct event_base* base;
126 /** seconds time pointer points here */
128 /** timeval with current time */
130 /** the event used for slow_accept timeouts */
131 struct event slow_accept;
132 /** true if slow_accept is enabled */
133 int slow_accept_enabled;
137 * Internal timer structure, to store timer event in.
139 struct internal_timer {
141 struct comm_base* base;
142 /** libevent event type, alloced here */
144 /** is timer enabled */
149 * Internal signal structure, to store signal event in.
151 struct internal_signal {
152 /** libevent event type, alloced here */
154 /** next in signal list */
155 struct internal_signal* next;
158 /** create a tcp handler with a parent */
159 static struct comm_point* comm_point_create_tcp_handler(
160 struct comm_base *base, struct comm_point* parent, size_t bufsize,
161 comm_point_callback_t* callback, void* callback_arg);
163 /* -------- End of local definitions -------- */
165 #ifdef USE_MINI_EVENT
166 /** minievent updates the time when it blocks. */
167 #define comm_base_now(x) /* nothing to do */
168 #else /* !USE_MINI_EVENT */
169 /** fillup the time values in the event base */
171 comm_base_now(struct comm_base* b)
173 if(gettimeofday(&b->eb->now, NULL) < 0) {
174 log_err("gettimeofday: %s", strerror(errno));
176 b->eb->secs = (time_t)b->eb->now.tv_sec;
178 #endif /* USE_MINI_EVENT */
181 comm_base_create(int sigs)
183 struct comm_base* b = (struct comm_base*)calloc(1,
184 sizeof(struct comm_base));
187 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
192 #ifdef USE_MINI_EVENT
194 /* use mini event time-sharing feature */
195 b->eb->base = event_init(&b->eb->secs, &b->eb->now);
197 # if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
200 b->eb->base=(struct event_base *)ev_default_loop(EVFLAG_AUTO);
202 b->eb->base=(struct event_base *)ev_loop_new(EVFLAG_AUTO);
205 # ifdef HAVE_EVENT_BASE_NEW
206 b->eb->base = event_base_new();
208 b->eb->base = event_init();
218 /* avoid event_get_method call which causes crashes even when
219 * not printing, because its result is passed */
221 #if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
223 #elif defined(USE_MINI_EVENT)
228 "%s uses %s method.",
230 #ifdef HAVE_EVENT_BASE_GET_METHOD
231 event_base_get_method(b->eb->base)
240 comm_base_create_event(struct event_base* base)
242 struct comm_base* b = (struct comm_base*)calloc(1,
243 sizeof(struct comm_base));
246 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
257 comm_base_delete(struct comm_base* b)
261 if(b->eb->slow_accept_enabled) {
262 if(event_del(&b->eb->slow_accept) != 0) {
263 log_err("could not event_del slow_accept");
266 #ifdef USE_MINI_EVENT
267 event_base_free(b->eb->base);
268 #elif defined(HAVE_EVENT_BASE_FREE) && defined(HAVE_EVENT_BASE_ONCE)
269 /* only libevent 1.2+ has it, but in 1.2 it is broken -
270 assertion fails on signal handling ev that is not deleted
271 in libevent 1.3c (event_base_once appears) this is fixed. */
272 event_base_free(b->eb->base);
273 #endif /* HAVE_EVENT_BASE_FREE and HAVE_EVENT_BASE_ONCE */
280 comm_base_delete_no_base(struct comm_base* b)
284 if(b->eb->slow_accept_enabled) {
285 if(event_del(&b->eb->slow_accept) != 0) {
286 log_err("could not event_del slow_accept");
295 comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
302 comm_base_dispatch(struct comm_base* b)
305 retval = event_base_dispatch(b->eb->base);
307 fatal_exit("event_dispatch returned error %d, "
308 "errno is %s", retval, strerror(errno));
312 void comm_base_exit(struct comm_base* b)
314 if(event_base_loopexit(b->eb->base, NULL) != 0) {
315 log_err("Could not loopexit");
319 void comm_base_set_slow_accept_handlers(struct comm_base* b,
320 void (*stop_acc)(void*), void (*start_acc)(void*), void* arg)
322 b->stop_accept = stop_acc;
323 b->start_accept = start_acc;
327 struct event_base* comm_base_internal(struct comm_base* b)
332 /** see if errno for udp has to be logged or not uses globals */
334 udp_send_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
336 /* do not log transient errors (unless high verbosity) */
337 #if defined(ENETUNREACH) || defined(EHOSTDOWN) || defined(EHOSTUNREACH) || defined(ENETDOWN)
351 if(verbosity < VERB_ALGO)
357 /* permission denied is gotten for every send if the
358 * network is disconnected (on some OS), squelch it */
359 if(errno == EPERM && verbosity < VERB_DETAIL)
361 /* squelch errors where people deploy AAAA ::ffff:bla for
362 * authority servers, which we try for intranets. */
363 if(errno == EINVAL && addr_is_ip4mapped(
364 (struct sockaddr_storage*)addr, addrlen) &&
365 verbosity < VERB_DETAIL)
367 /* SO_BROADCAST sockopt can give access to 255.255.255.255,
368 * but a dns cache does not need it. */
369 if(errno == EACCES && addr_is_broadcast(
370 (struct sockaddr_storage*)addr, addrlen) &&
371 verbosity < VERB_DETAIL)
376 int tcp_connect_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
378 return udp_send_errno_needs_log(addr, addrlen);
381 /* send a UDP reply */
383 comm_point_send_udp_msg(struct comm_point *c, sldns_buffer* packet,
384 struct sockaddr* addr, socklen_t addrlen)
387 log_assert(c->fd != -1);
389 if(sldns_buffer_remaining(packet) == 0)
390 log_err("error: send empty UDP packet");
392 log_assert(addr && addrlen > 0);
393 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
394 sldns_buffer_remaining(packet), 0,
397 if(!udp_send_errno_needs_log(addr, addrlen))
400 verbose(VERB_OPS, "sendto failed: %s", strerror(errno));
402 verbose(VERB_OPS, "sendto failed: %s",
403 wsa_strerror(WSAGetLastError()));
405 log_addr(VERB_OPS, "remote address is",
406 (struct sockaddr_storage*)addr, addrlen);
408 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
409 log_err("sent %d in place of %d bytes",
410 (int)sent, (int)sldns_buffer_remaining(packet));
416 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && (defined(HAVE_RECVMSG) || defined(HAVE_SENDMSG))
417 /** print debug ancillary info */
418 static void p_ancil(const char* str, struct comm_reply* r)
420 if(r->srctype != 4 && r->srctype != 6) {
421 log_info("%s: unknown srctype %d", str, r->srctype);
424 if(r->srctype == 6) {
426 if(inet_ntop(AF_INET6, &r->pktinfo.v6info.ipi6_addr,
427 buf, (socklen_t)sizeof(buf)) == 0) {
428 (void)strlcpy(buf, "(inet_ntop error)", sizeof(buf));
430 buf[sizeof(buf)-1]=0;
431 log_info("%s: %s %d", str, buf, r->pktinfo.v6info.ipi6_ifindex);
432 } else if(r->srctype == 4) {
434 char buf1[1024], buf2[1024];
435 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_addr,
436 buf1, (socklen_t)sizeof(buf1)) == 0) {
437 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
439 buf1[sizeof(buf1)-1]=0;
440 #ifdef HAVE_STRUCT_IN_PKTINFO_IPI_SPEC_DST
441 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_spec_dst,
442 buf2, (socklen_t)sizeof(buf2)) == 0) {
443 (void)strlcpy(buf2, "(inet_ntop error)", sizeof(buf2));
445 buf2[sizeof(buf2)-1]=0;
449 log_info("%s: %d %s %s", str, r->pktinfo.v4info.ipi_ifindex,
451 #elif defined(IP_RECVDSTADDR)
453 if(inet_ntop(AF_INET, &r->pktinfo.v4addr,
454 buf1, (socklen_t)sizeof(buf1)) == 0) {
455 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
457 buf1[sizeof(buf1)-1]=0;
458 log_info("%s: %s", str, buf1);
459 #endif /* IP_PKTINFO or PI_RECVDSTDADDR */
462 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG||HAVE_SENDMSG */
464 /** send a UDP reply over specified interface*/
466 comm_point_send_udp_msg_if(struct comm_point *c, sldns_buffer* packet,
467 struct sockaddr* addr, socklen_t addrlen, struct comm_reply* r)
469 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_SENDMSG)
475 struct cmsghdr *cmsg;
476 #endif /* S_SPLINT_S */
478 log_assert(c->fd != -1);
480 if(sldns_buffer_remaining(packet) == 0)
481 log_err("error: send empty UDP packet");
483 log_assert(addr && addrlen > 0);
486 msg.msg_namelen = addrlen;
487 iov[0].iov_base = sldns_buffer_begin(packet);
488 iov[0].iov_len = sldns_buffer_remaining(packet);
491 msg.msg_control = control;
493 msg.msg_controllen = sizeof(control);
494 #endif /* S_SPLINT_S */
498 cmsg = CMSG_FIRSTHDR(&msg);
499 if(r->srctype == 4) {
502 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
503 log_assert(msg.msg_controllen <= sizeof(control));
504 cmsg->cmsg_level = IPPROTO_IP;
505 cmsg->cmsg_type = IP_PKTINFO;
506 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4info,
507 sizeof(struct in_pktinfo));
508 /* unset the ifindex to not bypass the routing tables */
509 cmsg_data = CMSG_DATA(cmsg);
510 ((struct in_pktinfo *) cmsg_data)->ipi_ifindex = 0;
511 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
512 #elif defined(IP_SENDSRCADDR)
513 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_addr));
514 log_assert(msg.msg_controllen <= sizeof(control));
515 cmsg->cmsg_level = IPPROTO_IP;
516 cmsg->cmsg_type = IP_SENDSRCADDR;
517 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4addr,
518 sizeof(struct in_addr));
519 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
521 verbose(VERB_ALGO, "no IP_PKTINFO or IP_SENDSRCADDR");
522 msg.msg_control = NULL;
523 #endif /* IP_PKTINFO or IP_SENDSRCADDR */
524 } else if(r->srctype == 6) {
526 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
527 log_assert(msg.msg_controllen <= sizeof(control));
528 cmsg->cmsg_level = IPPROTO_IPV6;
529 cmsg->cmsg_type = IPV6_PKTINFO;
530 memmove(CMSG_DATA(cmsg), &r->pktinfo.v6info,
531 sizeof(struct in6_pktinfo));
532 /* unset the ifindex to not bypass the routing tables */
533 cmsg_data = CMSG_DATA(cmsg);
534 ((struct in6_pktinfo *) cmsg_data)->ipi6_ifindex = 0;
535 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
537 /* try to pass all 0 to use default route */
538 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
539 log_assert(msg.msg_controllen <= sizeof(control));
540 cmsg->cmsg_level = IPPROTO_IPV6;
541 cmsg->cmsg_type = IPV6_PKTINFO;
542 memset(CMSG_DATA(cmsg), 0, sizeof(struct in6_pktinfo));
543 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
545 #endif /* S_SPLINT_S */
546 if(verbosity >= VERB_ALGO)
547 p_ancil("send_udp over interface", r);
548 sent = sendmsg(c->fd, &msg, 0);
550 if(!udp_send_errno_needs_log(addr, addrlen))
552 verbose(VERB_OPS, "sendmsg failed: %s", strerror(errno));
553 log_addr(VERB_OPS, "remote address is",
554 (struct sockaddr_storage*)addr, addrlen);
556 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
557 log_err("sent %d in place of %d bytes",
558 (int)sent, (int)sldns_buffer_remaining(packet));
568 log_err("sendmsg: IPV6_PKTINFO not supported");
570 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_SENDMSG */
574 comm_point_udp_ancil_callback(int fd, short event, void* arg)
576 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_RECVMSG)
577 struct comm_reply rep;
584 struct cmsghdr* cmsg;
585 #endif /* S_SPLINT_S */
587 rep.c = (struct comm_point*)arg;
588 log_assert(rep.c->type == comm_udp);
592 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
593 comm_base_now(rep.c->ev->base);
594 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
595 sldns_buffer_clear(rep.c->buffer);
596 rep.addrlen = (socklen_t)sizeof(rep.addr);
597 log_assert(fd != -1);
598 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
599 msg.msg_name = &rep.addr;
600 msg.msg_namelen = (socklen_t)sizeof(rep.addr);
601 iov[0].iov_base = sldns_buffer_begin(rep.c->buffer);
602 iov[0].iov_len = sldns_buffer_remaining(rep.c->buffer);
605 msg.msg_control = ancil;
607 msg.msg_controllen = sizeof(ancil);
608 #endif /* S_SPLINT_S */
610 rcv = recvmsg(fd, &msg, 0);
612 if(errno != EAGAIN && errno != EINTR) {
613 log_err("recvmsg failed: %s", strerror(errno));
617 rep.addrlen = msg.msg_namelen;
618 sldns_buffer_skip(rep.c->buffer, rcv);
619 sldns_buffer_flip(rep.c->buffer);
622 for(cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
623 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
624 if( cmsg->cmsg_level == IPPROTO_IPV6 &&
625 cmsg->cmsg_type == IPV6_PKTINFO) {
627 memmove(&rep.pktinfo.v6info, CMSG_DATA(cmsg),
628 sizeof(struct in6_pktinfo));
631 } else if( cmsg->cmsg_level == IPPROTO_IP &&
632 cmsg->cmsg_type == IP_PKTINFO) {
634 memmove(&rep.pktinfo.v4info, CMSG_DATA(cmsg),
635 sizeof(struct in_pktinfo));
637 #elif defined(IP_RECVDSTADDR)
638 } else if( cmsg->cmsg_level == IPPROTO_IP &&
639 cmsg->cmsg_type == IP_RECVDSTADDR) {
641 memmove(&rep.pktinfo.v4addr, CMSG_DATA(cmsg),
642 sizeof(struct in_addr));
644 #endif /* IP_PKTINFO or IP_RECVDSTADDR */
647 if(verbosity >= VERB_ALGO)
648 p_ancil("receive_udp on interface", &rep);
649 #endif /* S_SPLINT_S */
650 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
651 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
652 /* send back immediate reply */
653 (void)comm_point_send_udp_msg_if(rep.c, rep.c->buffer,
654 (struct sockaddr*)&rep.addr, rep.addrlen, &rep);
656 if(rep.c->fd == -1) /* commpoint closed */
663 fatal_exit("recvmsg: No support for IPV6_PKTINFO. "
664 "Please disable interface-automatic");
665 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG */
669 comm_point_udp_callback(int fd, short event, void* arg)
671 struct comm_reply rep;
675 rep.c = (struct comm_point*)arg;
676 log_assert(rep.c->type == comm_udp);
680 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
681 comm_base_now(rep.c->ev->base);
682 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
683 sldns_buffer_clear(rep.c->buffer);
684 rep.addrlen = (socklen_t)sizeof(rep.addr);
685 log_assert(fd != -1);
686 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
687 rcv = recvfrom(fd, (void*)sldns_buffer_begin(rep.c->buffer),
688 sldns_buffer_remaining(rep.c->buffer), 0,
689 (struct sockaddr*)&rep.addr, &rep.addrlen);
692 if(errno != EAGAIN && errno != EINTR)
693 log_err("recvfrom %d failed: %s",
694 fd, strerror(errno));
696 if(WSAGetLastError() != WSAEINPROGRESS &&
697 WSAGetLastError() != WSAECONNRESET &&
698 WSAGetLastError()!= WSAEWOULDBLOCK)
699 log_err("recvfrom failed: %s",
700 wsa_strerror(WSAGetLastError()));
704 sldns_buffer_skip(rep.c->buffer, rcv);
705 sldns_buffer_flip(rep.c->buffer);
707 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
708 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
709 /* send back immediate reply */
710 (void)comm_point_send_udp_msg(rep.c, rep.c->buffer,
711 (struct sockaddr*)&rep.addr, rep.addrlen);
713 if(rep.c->fd != fd) /* commpoint closed to -1 or reused for
714 another UDP port. Note rep.c cannot be reused with TCP fd. */
719 /** Use a new tcp handler for new query fd, set to read query */
721 setup_tcp_handler(struct comm_point* c, int fd)
723 log_assert(c->type == comm_tcp);
724 log_assert(c->fd == -1);
725 sldns_buffer_clear(c->buffer);
726 c->tcp_is_reading = 1;
727 c->tcp_byte_count = 0;
728 comm_point_start_listening(c, fd, TCP_QUERY_TIMEOUT);
731 void comm_base_handle_slow_accept(int ATTR_UNUSED(fd),
732 short ATTR_UNUSED(event), void* arg)
734 struct comm_base* b = (struct comm_base*)arg;
735 /* timeout for the slow accept, re-enable accepts again */
736 if(b->start_accept) {
737 verbose(VERB_ALGO, "wait is over, slow accept disabled");
738 fptr_ok(fptr_whitelist_start_accept(b->start_accept));
739 (*b->start_accept)(b->cb_arg);
740 b->eb->slow_accept_enabled = 0;
744 int comm_point_perform_accept(struct comm_point* c,
745 struct sockaddr_storage* addr, socklen_t* addrlen)
748 *addrlen = (socklen_t)sizeof(*addr);
749 new_fd = accept(c->fd, (struct sockaddr*)addr, addrlen);
752 /* EINTR is signal interrupt. others are closed connection. */
753 if( errno == EINTR || errno == EAGAIN
755 || errno == EWOULDBLOCK
758 || errno == ECONNABORTED
765 #if defined(ENFILE) && defined(EMFILE)
766 if(errno == ENFILE || errno == EMFILE) {
767 /* out of file descriptors, likely outside of our
768 * control. stop accept() calls for some time */
769 if(c->ev->base->stop_accept) {
770 struct comm_base* b = c->ev->base;
772 verbose(VERB_ALGO, "out of file descriptors: "
774 b->eb->slow_accept_enabled = 1;
775 fptr_ok(fptr_whitelist_stop_accept(
777 (*b->stop_accept)(b->cb_arg);
778 /* set timeout, no mallocs */
779 tv.tv_sec = NETEVENT_SLOW_ACCEPT_TIME/1000;
780 tv.tv_usec = NETEVENT_SLOW_ACCEPT_TIME%1000;
781 event_set(&b->eb->slow_accept, -1, EV_TIMEOUT,
782 comm_base_handle_slow_accept, b);
783 if(event_base_set(b->eb->base,
784 &b->eb->slow_accept) != 0) {
785 /* we do not want to log here, because
786 * that would spam the logfiles.
787 * error: "event_base_set failed." */
789 if(event_add(&b->eb->slow_accept, &tv) != 0) {
790 /* we do not want to log here,
791 * error: "event_add failed." */
797 log_err_addr("accept failed", strerror(errno), addr, *addrlen);
798 #else /* USE_WINSOCK */
799 if(WSAGetLastError() == WSAEINPROGRESS ||
800 WSAGetLastError() == WSAECONNRESET)
802 if(WSAGetLastError() == WSAEWOULDBLOCK) {
803 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
806 log_err_addr("accept failed", wsa_strerror(WSAGetLastError()),
811 fd_set_nonblock(new_fd);
816 static long win_bio_cb(BIO *b, int oper, const char* ATTR_UNUSED(argp),
817 int ATTR_UNUSED(argi), long argl, long retvalue)
819 verbose(VERB_ALGO, "bio_cb %d, %s %s %s", oper,
820 (oper&BIO_CB_RETURN)?"return":"before",
821 (oper&BIO_CB_READ)?"read":((oper&BIO_CB_WRITE)?"write":"other"),
822 WSAGetLastError()==WSAEWOULDBLOCK?"wsawb":"");
823 /* on windows, check if previous operation caused EWOULDBLOCK */
824 if( (oper == (BIO_CB_READ|BIO_CB_RETURN) && argl == 0) ||
825 (oper == (BIO_CB_GETS|BIO_CB_RETURN) && argl == 0)) {
826 if(WSAGetLastError() == WSAEWOULDBLOCK)
827 winsock_tcp_wouldblock((struct event*)
828 BIO_get_callback_arg(b), EV_READ);
830 if( (oper == (BIO_CB_WRITE|BIO_CB_RETURN) && argl == 0) ||
831 (oper == (BIO_CB_PUTS|BIO_CB_RETURN) && argl == 0)) {
832 if(WSAGetLastError() == WSAEWOULDBLOCK)
833 winsock_tcp_wouldblock((struct event*)
834 BIO_get_callback_arg(b), EV_WRITE);
836 /* return original return value */
840 /** set win bio callbacks for nonblocking operations */
842 comm_point_tcp_win_bio_cb(struct comm_point* c, void* thessl)
844 SSL* ssl = (SSL*)thessl;
845 /* set them both just in case, but usually they are the same BIO */
846 BIO_set_callback(SSL_get_rbio(ssl), &win_bio_cb);
847 BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)&c->ev->ev);
848 BIO_set_callback(SSL_get_wbio(ssl), &win_bio_cb);
849 BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)&c->ev->ev);
854 comm_point_tcp_accept_callback(int fd, short event, void* arg)
856 struct comm_point* c = (struct comm_point*)arg, *c_hdl;
858 log_assert(c->type == comm_tcp_accept);
859 if(!(event & EV_READ)) {
860 log_info("ignoring tcp accept event %d", (int)event);
863 comm_base_now(c->ev->base);
864 /* find free tcp handler. */
866 log_warn("accepted too many tcp, connections full");
869 /* accept incoming connection. */
871 log_assert(fd != -1);
872 new_fd = comm_point_perform_accept(c, &c_hdl->repinfo.addr,
873 &c_hdl->repinfo.addrlen);
877 c_hdl->ssl = incoming_ssl_fd(c->ssl, new_fd);
880 comm_point_close(c_hdl);
883 c_hdl->ssl_shake_state = comm_ssl_shake_read;
885 comm_point_tcp_win_bio_cb(c_hdl, c_hdl->ssl);
889 /* grab the tcp handler buffers */
891 c->tcp_free = c_hdl->tcp_free;
893 /* stop accepting incoming queries for now. */
894 comm_point_stop_listening(c);
896 setup_tcp_handler(c_hdl, new_fd);
899 /** Make tcp handler free for next assignment */
901 reclaim_tcp_handler(struct comm_point* c)
903 log_assert(c->type == comm_tcp);
906 SSL_shutdown(c->ssl);
913 c->tcp_parent->cur_tcp_count--;
914 c->tcp_free = c->tcp_parent->tcp_free;
915 c->tcp_parent->tcp_free = c;
917 /* re-enable listening on accept socket */
918 comm_point_start_listening(c->tcp_parent, -1, -1);
923 /** do the callback when writing is done */
925 tcp_callback_writer(struct comm_point* c)
927 log_assert(c->type == comm_tcp);
928 sldns_buffer_clear(c->buffer);
929 if(c->tcp_do_toggle_rw)
930 c->tcp_is_reading = 1;
931 c->tcp_byte_count = 0;
932 /* switch from listening(write) to listening(read) */
933 comm_point_stop_listening(c);
934 comm_point_start_listening(c, -1, -1);
937 /** do the callback when reading is done */
939 tcp_callback_reader(struct comm_point* c)
941 log_assert(c->type == comm_tcp || c->type == comm_local);
942 sldns_buffer_flip(c->buffer);
943 if(c->tcp_do_toggle_rw)
944 c->tcp_is_reading = 0;
945 c->tcp_byte_count = 0;
946 if(c->type == comm_tcp)
947 comm_point_stop_listening(c);
948 fptr_ok(fptr_whitelist_comm_point(c->callback));
949 if( (*c->callback)(c, c->cb_arg, NETEVENT_NOERROR, &c->repinfo) ) {
950 comm_point_start_listening(c, -1, TCP_QUERY_TIMEOUT);
954 /** continue ssl handshake */
957 ssl_handshake(struct comm_point* c)
960 if(c->ssl_shake_state == comm_ssl_shake_hs_read) {
961 /* read condition satisfied back to writing */
962 comm_point_listen_for_rw(c, 1, 1);
963 c->ssl_shake_state = comm_ssl_shake_none;
966 if(c->ssl_shake_state == comm_ssl_shake_hs_write) {
967 /* write condition satisfied, back to reading */
968 comm_point_listen_for_rw(c, 1, 0);
969 c->ssl_shake_state = comm_ssl_shake_none;
974 r = SSL_do_handshake(c->ssl);
976 int want = SSL_get_error(c->ssl, r);
977 if(want == SSL_ERROR_WANT_READ) {
978 if(c->ssl_shake_state == comm_ssl_shake_read)
980 c->ssl_shake_state = comm_ssl_shake_read;
981 comm_point_listen_for_rw(c, 1, 0);
983 } else if(want == SSL_ERROR_WANT_WRITE) {
984 if(c->ssl_shake_state == comm_ssl_shake_write)
986 c->ssl_shake_state = comm_ssl_shake_write;
987 comm_point_listen_for_rw(c, 0, 1);
990 return 0; /* closed */
991 } else if(want == SSL_ERROR_SYSCALL) {
992 /* SYSCALL and errno==0 means closed uncleanly */
994 log_err("SSL_handshake syscall: %s",
998 log_crypto_err("ssl handshake failed");
999 log_addr(1, "ssl handshake failed", &c->repinfo.addr,
1000 c->repinfo.addrlen);
1004 /* this is where peer verification could take place */
1005 log_addr(VERB_ALGO, "SSL DNS connection", &c->repinfo.addr,
1006 c->repinfo.addrlen);
1008 /* setup listen rw correctly */
1009 if(c->tcp_is_reading) {
1010 if(c->ssl_shake_state != comm_ssl_shake_read)
1011 comm_point_listen_for_rw(c, 1, 0);
1013 comm_point_listen_for_rw(c, 1, 1);
1015 c->ssl_shake_state = comm_ssl_shake_none;
1018 #endif /* HAVE_SSL */
1020 /** ssl read callback on TCP */
1022 ssl_handle_read(struct comm_point* c)
1026 if(c->ssl_shake_state != comm_ssl_shake_none) {
1027 if(!ssl_handshake(c))
1029 if(c->ssl_shake_state != comm_ssl_shake_none)
1032 if(c->tcp_byte_count < sizeof(uint16_t)) {
1033 /* read length bytes */
1035 if((r=SSL_read(c->ssl, (void*)sldns_buffer_at(c->buffer,
1036 c->tcp_byte_count), (int)(sizeof(uint16_t) -
1037 c->tcp_byte_count))) <= 0) {
1038 int want = SSL_get_error(c->ssl, r);
1039 if(want == SSL_ERROR_ZERO_RETURN) {
1040 return 0; /* shutdown, closed */
1041 } else if(want == SSL_ERROR_WANT_READ) {
1042 return 1; /* read more later */
1043 } else if(want == SSL_ERROR_WANT_WRITE) {
1044 c->ssl_shake_state = comm_ssl_shake_hs_write;
1045 comm_point_listen_for_rw(c, 0, 1);
1047 } else if(want == SSL_ERROR_SYSCALL) {
1049 log_err("SSL_read syscall: %s",
1053 log_crypto_err("could not SSL_read");
1056 c->tcp_byte_count += r;
1057 if(c->tcp_byte_count != sizeof(uint16_t))
1059 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1060 sldns_buffer_capacity(c->buffer)) {
1061 verbose(VERB_QUERY, "ssl: dropped larger than buffer");
1064 sldns_buffer_set_limit(c->buffer,
1065 sldns_buffer_read_u16_at(c->buffer, 0));
1066 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1067 verbose(VERB_QUERY, "ssl: dropped bogus too short.");
1070 verbose(VERB_ALGO, "Reading ssl tcp query of length %d",
1071 (int)sldns_buffer_limit(c->buffer));
1073 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1075 r = SSL_read(c->ssl, (void*)sldns_buffer_current(c->buffer),
1076 (int)sldns_buffer_remaining(c->buffer));
1078 int want = SSL_get_error(c->ssl, r);
1079 if(want == SSL_ERROR_ZERO_RETURN) {
1080 return 0; /* shutdown, closed */
1081 } else if(want == SSL_ERROR_WANT_READ) {
1082 return 1; /* read more later */
1083 } else if(want == SSL_ERROR_WANT_WRITE) {
1084 c->ssl_shake_state = comm_ssl_shake_hs_write;
1085 comm_point_listen_for_rw(c, 0, 1);
1087 } else if(want == SSL_ERROR_SYSCALL) {
1089 log_err("SSL_read syscall: %s",
1093 log_crypto_err("could not SSL_read");
1096 sldns_buffer_skip(c->buffer, (ssize_t)r);
1097 if(sldns_buffer_remaining(c->buffer) <= 0) {
1098 tcp_callback_reader(c);
1104 #endif /* HAVE_SSL */
1107 /** ssl write callback on TCP */
1109 ssl_handle_write(struct comm_point* c)
1113 if(c->ssl_shake_state != comm_ssl_shake_none) {
1114 if(!ssl_handshake(c))
1116 if(c->ssl_shake_state != comm_ssl_shake_none)
1119 /* ignore return, if fails we may simply block */
1120 (void)SSL_set_mode(c->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
1121 if(c->tcp_byte_count < sizeof(uint16_t)) {
1122 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1124 r = SSL_write(c->ssl,
1125 (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1126 (int)(sizeof(uint16_t)-c->tcp_byte_count));
1128 int want = SSL_get_error(c->ssl, r);
1129 if(want == SSL_ERROR_ZERO_RETURN) {
1130 return 0; /* closed */
1131 } else if(want == SSL_ERROR_WANT_READ) {
1132 c->ssl_shake_state = comm_ssl_shake_read;
1133 comm_point_listen_for_rw(c, 1, 0);
1134 return 1; /* wait for read condition */
1135 } else if(want == SSL_ERROR_WANT_WRITE) {
1136 return 1; /* write more later */
1137 } else if(want == SSL_ERROR_SYSCALL) {
1139 log_err("SSL_write syscall: %s",
1143 log_crypto_err("could not SSL_write");
1146 c->tcp_byte_count += r;
1147 if(c->tcp_byte_count < sizeof(uint16_t))
1149 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1151 if(sldns_buffer_remaining(c->buffer) == 0) {
1152 tcp_callback_writer(c);
1156 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1158 r = SSL_write(c->ssl, (void*)sldns_buffer_current(c->buffer),
1159 (int)sldns_buffer_remaining(c->buffer));
1161 int want = SSL_get_error(c->ssl, r);
1162 if(want == SSL_ERROR_ZERO_RETURN) {
1163 return 0; /* closed */
1164 } else if(want == SSL_ERROR_WANT_READ) {
1165 c->ssl_shake_state = comm_ssl_shake_read;
1166 comm_point_listen_for_rw(c, 1, 0);
1167 return 1; /* wait for read condition */
1168 } else if(want == SSL_ERROR_WANT_WRITE) {
1169 return 1; /* write more later */
1170 } else if(want == SSL_ERROR_SYSCALL) {
1172 log_err("SSL_write syscall: %s",
1176 log_crypto_err("could not SSL_write");
1179 sldns_buffer_skip(c->buffer, (ssize_t)r);
1181 if(sldns_buffer_remaining(c->buffer) == 0) {
1182 tcp_callback_writer(c);
1188 #endif /* HAVE_SSL */
1191 /** handle ssl tcp connection with dns contents */
1193 ssl_handle_it(struct comm_point* c)
1195 if(c->tcp_is_reading)
1196 return ssl_handle_read(c);
1197 return ssl_handle_write(c);
1200 /** Handle tcp reading callback.
1201 * @param fd: file descriptor of socket.
1202 * @param c: comm point to read from into buffer.
1203 * @param short_ok: if true, very short packets are OK (for comm_local).
1204 * @return: 0 on error
1207 comm_point_tcp_handle_read(int fd, struct comm_point* c, int short_ok)
1210 log_assert(c->type == comm_tcp || c->type == comm_local);
1212 return ssl_handle_it(c);
1213 if(!c->tcp_is_reading)
1216 log_assert(fd != -1);
1217 if(c->tcp_byte_count < sizeof(uint16_t)) {
1218 /* read length bytes */
1219 r = recv(fd,(void*)sldns_buffer_at(c->buffer,c->tcp_byte_count),
1220 sizeof(uint16_t)-c->tcp_byte_count, 0);
1225 if(errno == EINTR || errno == EAGAIN)
1228 if(errno == ECONNRESET && verbosity < 2)
1229 return 0; /* silence reset by peer */
1231 log_err_addr("read (in tcp s)", strerror(errno),
1232 &c->repinfo.addr, c->repinfo.addrlen);
1233 #else /* USE_WINSOCK */
1234 if(WSAGetLastError() == WSAECONNRESET)
1236 if(WSAGetLastError() == WSAEINPROGRESS)
1238 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1239 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
1242 log_err_addr("read (in tcp s)",
1243 wsa_strerror(WSAGetLastError()),
1244 &c->repinfo.addr, c->repinfo.addrlen);
1248 c->tcp_byte_count += r;
1249 if(c->tcp_byte_count != sizeof(uint16_t))
1251 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1252 sldns_buffer_capacity(c->buffer)) {
1253 verbose(VERB_QUERY, "tcp: dropped larger than buffer");
1256 sldns_buffer_set_limit(c->buffer,
1257 sldns_buffer_read_u16_at(c->buffer, 0));
1259 sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1260 verbose(VERB_QUERY, "tcp: dropped bogus too short.");
1263 verbose(VERB_ALGO, "Reading tcp query of length %d",
1264 (int)sldns_buffer_limit(c->buffer));
1267 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1268 r = recv(fd, (void*)sldns_buffer_current(c->buffer),
1269 sldns_buffer_remaining(c->buffer), 0);
1272 } else if(r == -1) {
1274 if(errno == EINTR || errno == EAGAIN)
1276 log_err_addr("read (in tcp r)", strerror(errno),
1277 &c->repinfo.addr, c->repinfo.addrlen);
1278 #else /* USE_WINSOCK */
1279 if(WSAGetLastError() == WSAECONNRESET)
1281 if(WSAGetLastError() == WSAEINPROGRESS)
1283 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1284 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
1287 log_err_addr("read (in tcp r)",
1288 wsa_strerror(WSAGetLastError()),
1289 &c->repinfo.addr, c->repinfo.addrlen);
1293 sldns_buffer_skip(c->buffer, r);
1294 if(sldns_buffer_remaining(c->buffer) <= 0) {
1295 tcp_callback_reader(c);
1301 * Handle tcp writing callback.
1302 * @param fd: file descriptor of socket.
1303 * @param c: comm point to write buffer out of.
1304 * @return: 0 on error
1307 comm_point_tcp_handle_write(int fd, struct comm_point* c)
1310 log_assert(c->type == comm_tcp);
1311 if(c->tcp_is_reading && !c->ssl)
1313 log_assert(fd != -1);
1314 if(c->tcp_byte_count == 0 && c->tcp_check_nb_connect) {
1315 /* check for pending error from nonblocking connect */
1316 /* from Stevens, unix network programming, vol1, 3rd ed, p450*/
1318 socklen_t len = (socklen_t)sizeof(error);
1319 if(getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&error,
1322 error = errno; /* on solaris errno is error */
1323 #else /* USE_WINSOCK */
1324 error = WSAGetLastError();
1328 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1329 if(error == EINPROGRESS || error == EWOULDBLOCK)
1330 return 1; /* try again later */
1333 if(error != 0 && verbosity < 2)
1334 return 0; /* silence lots of chatter in the logs */
1335 else if(error != 0) {
1336 log_err_addr("tcp connect", strerror(error),
1337 &c->repinfo.addr, c->repinfo.addrlen);
1338 #else /* USE_WINSOCK */
1340 if(error == WSAEINPROGRESS)
1342 else if(error == WSAEWOULDBLOCK) {
1343 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1345 } else if(error != 0 && verbosity < 2)
1347 else if(error != 0) {
1348 log_err_addr("tcp connect", wsa_strerror(error),
1349 &c->repinfo.addr, c->repinfo.addrlen);
1350 #endif /* USE_WINSOCK */
1355 return ssl_handle_it(c);
1357 if(c->tcp_byte_count < sizeof(uint16_t)) {
1358 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1360 struct iovec iov[2];
1361 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1362 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1363 iov[1].iov_base = sldns_buffer_begin(c->buffer);
1364 iov[1].iov_len = sldns_buffer_limit(c->buffer);
1365 log_assert(iov[0].iov_len > 0);
1366 log_assert(iov[1].iov_len > 0);
1367 r = writev(fd, iov, 2);
1368 #else /* HAVE_WRITEV */
1369 r = send(fd, (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1370 sizeof(uint16_t)-c->tcp_byte_count, 0);
1371 #endif /* HAVE_WRITEV */
1375 if(errno == EPIPE && verbosity < 2)
1376 return 0; /* silence 'broken pipe' */
1378 if(errno == EINTR || errno == EAGAIN)
1381 log_err_addr("tcp writev", strerror(errno),
1382 &c->repinfo.addr, c->repinfo.addrlen);
1383 # else /* HAVE_WRITEV */
1384 log_err_addr("tcp send s", strerror(errno),
1385 &c->repinfo.addr, c->repinfo.addrlen);
1386 # endif /* HAVE_WRITEV */
1388 if(WSAGetLastError() == WSAENOTCONN)
1390 if(WSAGetLastError() == WSAEINPROGRESS)
1392 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1393 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1396 log_err_addr("tcp send s",
1397 wsa_strerror(WSAGetLastError()),
1398 &c->repinfo.addr, c->repinfo.addrlen);
1402 c->tcp_byte_count += r;
1403 if(c->tcp_byte_count < sizeof(uint16_t))
1405 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1407 if(sldns_buffer_remaining(c->buffer) == 0) {
1408 tcp_callback_writer(c);
1412 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1413 r = send(fd, (void*)sldns_buffer_current(c->buffer),
1414 sldns_buffer_remaining(c->buffer), 0);
1417 if(errno == EINTR || errno == EAGAIN)
1419 log_err_addr("tcp send r", strerror(errno),
1420 &c->repinfo.addr, c->repinfo.addrlen);
1422 if(WSAGetLastError() == WSAEINPROGRESS)
1424 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1425 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1428 log_err_addr("tcp send r", wsa_strerror(WSAGetLastError()),
1429 &c->repinfo.addr, c->repinfo.addrlen);
1433 sldns_buffer_skip(c->buffer, r);
1435 if(sldns_buffer_remaining(c->buffer) == 0) {
1436 tcp_callback_writer(c);
1443 comm_point_tcp_handle_callback(int fd, short event, void* arg)
1445 struct comm_point* c = (struct comm_point*)arg;
1446 log_assert(c->type == comm_tcp);
1447 comm_base_now(c->ev->base);
1450 if(!comm_point_tcp_handle_read(fd, c, 0)) {
1451 reclaim_tcp_handler(c);
1452 if(!c->tcp_do_close) {
1453 fptr_ok(fptr_whitelist_comm_point(
1455 (void)(*c->callback)(c, c->cb_arg,
1456 NETEVENT_CLOSED, NULL);
1461 if(event&EV_WRITE) {
1462 if(!comm_point_tcp_handle_write(fd, c)) {
1463 reclaim_tcp_handler(c);
1464 if(!c->tcp_do_close) {
1465 fptr_ok(fptr_whitelist_comm_point(
1467 (void)(*c->callback)(c, c->cb_arg,
1468 NETEVENT_CLOSED, NULL);
1473 if(event&EV_TIMEOUT) {
1474 verbose(VERB_QUERY, "tcp took too long, dropped");
1475 reclaim_tcp_handler(c);
1476 if(!c->tcp_do_close) {
1477 fptr_ok(fptr_whitelist_comm_point(c->callback));
1478 (void)(*c->callback)(c, c->cb_arg,
1479 NETEVENT_TIMEOUT, NULL);
1483 log_err("Ignored event %d for tcphdl.", event);
1486 void comm_point_local_handle_callback(int fd, short event, void* arg)
1488 struct comm_point* c = (struct comm_point*)arg;
1489 log_assert(c->type == comm_local);
1490 comm_base_now(c->ev->base);
1493 if(!comm_point_tcp_handle_read(fd, c, 1)) {
1494 fptr_ok(fptr_whitelist_comm_point(c->callback));
1495 (void)(*c->callback)(c, c->cb_arg, NETEVENT_CLOSED,
1500 log_err("Ignored event %d for localhdl.", event);
1503 void comm_point_raw_handle_callback(int ATTR_UNUSED(fd),
1504 short event, void* arg)
1506 struct comm_point* c = (struct comm_point*)arg;
1507 int err = NETEVENT_NOERROR;
1508 log_assert(c->type == comm_raw);
1509 comm_base_now(c->ev->base);
1511 if(event&EV_TIMEOUT)
1512 err = NETEVENT_TIMEOUT;
1513 fptr_ok(fptr_whitelist_comm_point_raw(c->callback));
1514 (void)(*c->callback)(c, c->cb_arg, err, NULL);
1518 comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
1519 comm_point_callback_t* callback, void* callback_arg)
1521 struct comm_point* c = (struct comm_point*)calloc(1,
1522 sizeof(struct comm_point));
1526 c->ev = (struct internal_event*)calloc(1,
1527 sizeof(struct internal_event));
1536 c->tcp_is_reading = 0;
1537 c->tcp_byte_count = 0;
1538 c->tcp_parent = NULL;
1539 c->max_tcp_count = 0;
1540 c->cur_tcp_count = 0;
1541 c->tcp_handlers = NULL;
1544 c->tcp_do_close = 0;
1545 c->do_not_close = 0;
1546 c->tcp_do_toggle_rw = 0;
1547 c->tcp_check_nb_connect = 0;
1549 c->callback = callback;
1550 c->cb_arg = callback_arg;
1551 evbits = EV_READ | EV_PERSIST;
1552 /* libevent stuff */
1553 event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_callback, c);
1554 if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
1555 log_err("could not baseset udp event");
1556 comm_point_delete(c);
1559 if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
1560 log_err("could not add udp event");
1561 comm_point_delete(c);
1568 comm_point_create_udp_ancil(struct comm_base *base, int fd,
1569 sldns_buffer* buffer,
1570 comm_point_callback_t* callback, void* callback_arg)
1572 struct comm_point* c = (struct comm_point*)calloc(1,
1573 sizeof(struct comm_point));
1577 c->ev = (struct internal_event*)calloc(1,
1578 sizeof(struct internal_event));
1587 c->tcp_is_reading = 0;
1588 c->tcp_byte_count = 0;
1589 c->tcp_parent = NULL;
1590 c->max_tcp_count = 0;
1591 c->cur_tcp_count = 0;
1592 c->tcp_handlers = NULL;
1595 c->tcp_do_close = 0;
1596 c->do_not_close = 0;
1598 c->tcp_do_toggle_rw = 0;
1599 c->tcp_check_nb_connect = 0;
1600 c->callback = callback;
1601 c->cb_arg = callback_arg;
1602 evbits = EV_READ | EV_PERSIST;
1603 /* libevent stuff */
1604 event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_ancil_callback, c);
1605 if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
1606 log_err("could not baseset udp event");
1607 comm_point_delete(c);
1610 if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
1611 log_err("could not add udp event");
1612 comm_point_delete(c);
1618 static struct comm_point*
1619 comm_point_create_tcp_handler(struct comm_base *base,
1620 struct comm_point* parent, size_t bufsize,
1621 comm_point_callback_t* callback, void* callback_arg)
1623 struct comm_point* c = (struct comm_point*)calloc(1,
1624 sizeof(struct comm_point));
1628 c->ev = (struct internal_event*)calloc(1,
1629 sizeof(struct internal_event));
1636 c->buffer = sldns_buffer_new(bufsize);
1642 c->timeout = (struct timeval*)malloc(sizeof(struct timeval));
1644 sldns_buffer_free(c->buffer);
1649 c->tcp_is_reading = 0;
1650 c->tcp_byte_count = 0;
1651 c->tcp_parent = parent;
1652 c->max_tcp_count = 0;
1653 c->cur_tcp_count = 0;
1654 c->tcp_handlers = NULL;
1657 c->tcp_do_close = 0;
1658 c->do_not_close = 0;
1659 c->tcp_do_toggle_rw = 1;
1660 c->tcp_check_nb_connect = 0;
1662 c->callback = callback;
1663 c->cb_arg = callback_arg;
1664 /* add to parent free list */
1665 c->tcp_free = parent->tcp_free;
1666 parent->tcp_free = c;
1667 /* libevent stuff */
1668 evbits = EV_PERSIST | EV_READ | EV_TIMEOUT;
1669 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
1670 if(event_base_set(base->eb->base, &c->ev->ev) != 0)
1672 log_err("could not basetset tcphdl event");
1673 parent->tcp_free = c->tcp_free;
1682 comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
1683 comm_point_callback_t* callback, void* callback_arg)
1685 struct comm_point* c = (struct comm_point*)calloc(1,
1686 sizeof(struct comm_point));
1689 /* first allocate the TCP accept listener */
1692 c->ev = (struct internal_event*)calloc(1,
1693 sizeof(struct internal_event));
1702 c->tcp_is_reading = 0;
1703 c->tcp_byte_count = 0;
1704 c->tcp_parent = NULL;
1705 c->max_tcp_count = num;
1706 c->cur_tcp_count = 0;
1707 c->tcp_handlers = (struct comm_point**)calloc((size_t)num,
1708 sizeof(struct comm_point*));
1709 if(!c->tcp_handlers) {
1715 c->type = comm_tcp_accept;
1716 c->tcp_do_close = 0;
1717 c->do_not_close = 0;
1718 c->tcp_do_toggle_rw = 0;
1719 c->tcp_check_nb_connect = 0;
1722 evbits = EV_READ | EV_PERSIST;
1723 /* libevent stuff */
1724 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_accept_callback, c);
1725 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1726 event_add(&c->ev->ev, c->timeout) != 0 )
1728 log_err("could not add tcpacc event");
1729 comm_point_delete(c);
1733 /* now prealloc the tcp handlers */
1734 for(i=0; i<num; i++) {
1735 c->tcp_handlers[i] = comm_point_create_tcp_handler(base,
1736 c, bufsize, callback, callback_arg);
1737 if(!c->tcp_handlers[i]) {
1738 comm_point_delete(c);
1747 comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
1748 comm_point_callback_t* callback, void* callback_arg)
1750 struct comm_point* c = (struct comm_point*)calloc(1,
1751 sizeof(struct comm_point));
1755 c->ev = (struct internal_event*)calloc(1,
1756 sizeof(struct internal_event));
1763 c->buffer = sldns_buffer_new(bufsize);
1770 c->tcp_is_reading = 0;
1771 c->tcp_byte_count = 0;
1772 c->tcp_parent = NULL;
1773 c->max_tcp_count = 0;
1774 c->cur_tcp_count = 0;
1775 c->tcp_handlers = NULL;
1778 c->tcp_do_close = 0;
1779 c->do_not_close = 0;
1780 c->tcp_do_toggle_rw = 1;
1781 c->tcp_check_nb_connect = 1;
1783 c->callback = callback;
1784 c->cb_arg = callback_arg;
1785 evbits = EV_PERSIST | EV_WRITE;
1786 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
1787 if(event_base_set(base->eb->base, &c->ev->ev) != 0)
1789 log_err("could not basetset tcpout event");
1790 sldns_buffer_free(c->buffer);
1800 comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
1801 comm_point_callback_t* callback, void* callback_arg)
1803 struct comm_point* c = (struct comm_point*)calloc(1,
1804 sizeof(struct comm_point));
1808 c->ev = (struct internal_event*)calloc(1,
1809 sizeof(struct internal_event));
1816 c->buffer = sldns_buffer_new(bufsize);
1823 c->tcp_is_reading = 1;
1824 c->tcp_byte_count = 0;
1825 c->tcp_parent = NULL;
1826 c->max_tcp_count = 0;
1827 c->cur_tcp_count = 0;
1828 c->tcp_handlers = NULL;
1830 c->type = comm_local;
1831 c->tcp_do_close = 0;
1832 c->do_not_close = 1;
1833 c->tcp_do_toggle_rw = 0;
1834 c->tcp_check_nb_connect = 0;
1835 c->callback = callback;
1836 c->cb_arg = callback_arg;
1837 /* libevent stuff */
1838 evbits = EV_PERSIST | EV_READ;
1839 event_set(&c->ev->ev, c->fd, evbits, comm_point_local_handle_callback,
1841 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1842 event_add(&c->ev->ev, c->timeout) != 0 )
1844 log_err("could not add localhdl event");
1853 comm_point_create_raw(struct comm_base* base, int fd, int writing,
1854 comm_point_callback_t* callback, void* callback_arg)
1856 struct comm_point* c = (struct comm_point*)calloc(1,
1857 sizeof(struct comm_point));
1861 c->ev = (struct internal_event*)calloc(1,
1862 sizeof(struct internal_event));
1871 c->tcp_is_reading = 0;
1872 c->tcp_byte_count = 0;
1873 c->tcp_parent = NULL;
1874 c->max_tcp_count = 0;
1875 c->cur_tcp_count = 0;
1876 c->tcp_handlers = NULL;
1879 c->tcp_do_close = 0;
1880 c->do_not_close = 1;
1881 c->tcp_do_toggle_rw = 0;
1882 c->tcp_check_nb_connect = 0;
1883 c->callback = callback;
1884 c->cb_arg = callback_arg;
1885 /* libevent stuff */
1887 evbits = EV_PERSIST | EV_WRITE;
1888 else evbits = EV_PERSIST | EV_READ;
1889 event_set(&c->ev->ev, c->fd, evbits, comm_point_raw_handle_callback,
1891 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1892 event_add(&c->ev->ev, c->timeout) != 0 )
1894 log_err("could not add rawhdl event");
1903 comm_point_close(struct comm_point* c)
1908 if(event_del(&c->ev->ev) != 0) {
1909 log_err("could not event_del on close");
1911 /* close fd after removing from event lists, or epoll.. is messed up */
1912 if(c->fd != -1 && !c->do_not_close) {
1913 verbose(VERB_ALGO, "close fd %d", c->fd);
1924 comm_point_delete(struct comm_point* c)
1928 if(c->type == comm_tcp && c->ssl) {
1930 SSL_shutdown(c->ssl);
1934 comm_point_close(c);
1935 if(c->tcp_handlers) {
1937 for(i=0; i<c->max_tcp_count; i++)
1938 comm_point_delete(c->tcp_handlers[i]);
1939 free(c->tcp_handlers);
1942 if(c->type == comm_tcp || c->type == comm_local)
1943 sldns_buffer_free(c->buffer);
1949 comm_point_send_reply(struct comm_reply *repinfo)
1951 log_assert(repinfo && repinfo->c);
1952 if(repinfo->c->type == comm_udp) {
1953 if(repinfo->srctype)
1954 comm_point_send_udp_msg_if(repinfo->c,
1955 repinfo->c->buffer, (struct sockaddr*)&repinfo->addr,
1956 repinfo->addrlen, repinfo);
1958 comm_point_send_udp_msg(repinfo->c, repinfo->c->buffer,
1959 (struct sockaddr*)&repinfo->addr, repinfo->addrlen);
1961 if(repinfo->c->dtenv != NULL &&
1962 repinfo->c->dtenv->log_client_response_messages)
1963 dt_msg_send_client_response(repinfo->c->dtenv,
1964 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
1968 if(repinfo->c->tcp_parent->dtenv != NULL &&
1969 repinfo->c->tcp_parent->dtenv->log_client_response_messages)
1970 dt_msg_send_client_response(repinfo->c->tcp_parent->dtenv,
1971 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
1973 comm_point_start_listening(repinfo->c, -1, TCP_QUERY_TIMEOUT);
1978 comm_point_drop_reply(struct comm_reply* repinfo)
1982 log_assert(repinfo && repinfo->c);
1983 log_assert(repinfo->c->type != comm_tcp_accept);
1984 if(repinfo->c->type == comm_udp)
1986 reclaim_tcp_handler(repinfo->c);
1990 comm_point_stop_listening(struct comm_point* c)
1992 verbose(VERB_ALGO, "comm point stop listening %d", c->fd);
1993 if(event_del(&c->ev->ev) != 0) {
1994 log_err("event_del error to stoplisten");
1999 comm_point_start_listening(struct comm_point* c, int newfd, int sec)
2001 verbose(VERB_ALGO, "comm point start listening %d",
2002 c->fd==-1?newfd:c->fd);
2003 if(c->type == comm_tcp_accept && !c->tcp_free) {
2004 /* no use to start listening no free slots. */
2007 if(sec != -1 && sec != 0) {
2009 c->timeout = (struct timeval*)malloc(sizeof(
2012 log_err("cpsl: malloc failed. No net read.");
2016 c->ev->ev.ev_events |= EV_TIMEOUT;
2017 #ifndef S_SPLINT_S /* splint fails on struct timeval. */
2018 c->timeout->tv_sec = sec;
2019 c->timeout->tv_usec = 0;
2020 #endif /* S_SPLINT_S */
2022 if(c->type == comm_tcp) {
2023 c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
2024 if(c->tcp_is_reading)
2025 c->ev->ev.ev_events |= EV_READ;
2026 else c->ev->ev.ev_events |= EV_WRITE;
2037 c->ev->ev.ev_fd = c->fd;
2039 if(event_add(&c->ev->ev, sec==0?NULL:c->timeout) != 0) {
2040 log_err("event_add failed. in cpsl.");
2044 void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr)
2046 verbose(VERB_ALGO, "comm point listen_for_rw %d %d", c->fd, wr);
2047 if(event_del(&c->ev->ev) != 0) {
2048 log_err("event_del error to cplf");
2050 c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
2051 if(rd) c->ev->ev.ev_events |= EV_READ;
2052 if(wr) c->ev->ev.ev_events |= EV_WRITE;
2053 if(event_add(&c->ev->ev, c->timeout) != 0) {
2054 log_err("event_add failed. in cplf.");
2058 size_t comm_point_get_mem(struct comm_point* c)
2063 s = sizeof(*c) + sizeof(*c->ev);
2065 s += sizeof(*c->timeout);
2066 if(c->type == comm_tcp || c->type == comm_local)
2067 s += sizeof(*c->buffer) + sldns_buffer_capacity(c->buffer);
2068 if(c->type == comm_tcp_accept) {
2070 for(i=0; i<c->max_tcp_count; i++)
2071 s += comm_point_get_mem(c->tcp_handlers[i]);
2077 comm_timer_create(struct comm_base* base, void (*cb)(void*), void* cb_arg)
2079 struct comm_timer *tm = (struct comm_timer*)calloc(1,
2080 sizeof(struct comm_timer));
2083 tm->ev_timer = (struct internal_timer*)calloc(1,
2084 sizeof(struct internal_timer));
2086 log_err("malloc failed");
2090 tm->ev_timer->base = base;
2092 tm->cb_arg = cb_arg;
2093 event_set(&tm->ev_timer->ev, -1, EV_TIMEOUT,
2094 comm_timer_callback, tm);
2095 if(event_base_set(base->eb->base, &tm->ev_timer->ev) != 0) {
2096 log_err("timer_create: event_base_set failed.");
2105 comm_timer_disable(struct comm_timer* timer)
2109 evtimer_del(&timer->ev_timer->ev);
2110 timer->ev_timer->enabled = 0;
2114 comm_timer_set(struct comm_timer* timer, struct timeval* tv)
2117 if(timer->ev_timer->enabled)
2118 comm_timer_disable(timer);
2119 event_set(&timer->ev_timer->ev, -1, EV_TIMEOUT,
2120 comm_timer_callback, timer);
2121 if(event_base_set(timer->ev_timer->base->eb->base,
2122 &timer->ev_timer->ev) != 0)
2123 log_err("comm_timer_set: set_base failed.");
2124 if(evtimer_add(&timer->ev_timer->ev, tv) != 0)
2125 log_err("comm_timer_set: evtimer_add failed.");
2126 timer->ev_timer->enabled = 1;
2130 comm_timer_delete(struct comm_timer* timer)
2134 comm_timer_disable(timer);
2135 free(timer->ev_timer);
2140 comm_timer_callback(int ATTR_UNUSED(fd), short event, void* arg)
2142 struct comm_timer* tm = (struct comm_timer*)arg;
2143 if(!(event&EV_TIMEOUT))
2145 comm_base_now(tm->ev_timer->base);
2146 tm->ev_timer->enabled = 0;
2147 fptr_ok(fptr_whitelist_comm_timer(tm->callback));
2148 (*tm->callback)(tm->cb_arg);
2152 comm_timer_is_set(struct comm_timer* timer)
2154 return (int)timer->ev_timer->enabled;
2158 comm_timer_get_mem(struct comm_timer* timer)
2160 return sizeof(*timer) + sizeof(struct internal_timer);
2164 comm_signal_create(struct comm_base* base,
2165 void (*callback)(int, void*), void* cb_arg)
2167 struct comm_signal* com = (struct comm_signal*)malloc(
2168 sizeof(struct comm_signal));
2170 log_err("malloc failed");
2174 com->callback = callback;
2175 com->cb_arg = cb_arg;
2176 com->ev_signal = NULL;
2181 comm_signal_callback(int sig, short event, void* arg)
2183 struct comm_signal* comsig = (struct comm_signal*)arg;
2184 if(!(event & EV_SIGNAL))
2186 comm_base_now(comsig->base);
2187 fptr_ok(fptr_whitelist_comm_signal(comsig->callback));
2188 (*comsig->callback)(sig, comsig->cb_arg);
2192 comm_signal_bind(struct comm_signal* comsig, int sig)
2194 struct internal_signal* entry = (struct internal_signal*)calloc(1,
2195 sizeof(struct internal_signal));
2197 log_err("malloc failed");
2201 /* add signal event */
2202 signal_set(&entry->ev, sig, comm_signal_callback, comsig);
2203 if(event_base_set(comsig->base->eb->base, &entry->ev) != 0) {
2204 log_err("Could not set signal base");
2208 if(signal_add(&entry->ev, NULL) != 0) {
2209 log_err("Could not add signal handler");
2213 /* link into list */
2214 entry->next = comsig->ev_signal;
2215 comsig->ev_signal = entry;
2220 comm_signal_delete(struct comm_signal* comsig)
2222 struct internal_signal* p, *np;
2225 p=comsig->ev_signal;