2 * util/netevent.c - event notification
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains event notification functions.
42 #include "util/netevent.h"
44 #include "util/net_help.h"
45 #include "util/fptr_wlist.h"
46 #include "sldns/pkthdr.h"
47 #include "sldns/sbuffer.h"
48 #include "dnstap/dnstap.h"
49 #ifdef HAVE_OPENSSL_SSL_H
50 #include <openssl/ssl.h>
52 #ifdef HAVE_OPENSSL_ERR_H
53 #include <openssl/err.h>
56 /* -------- Start of local definitions -------- */
57 /** if CMSG_ALIGN is not defined on this platform, a workaround */
60 # define CMSG_ALIGN(n) __CMSG_ALIGN(n)
61 # elif defined(CMSG_DATA_ALIGN)
62 # define CMSG_ALIGN _CMSG_DATA_ALIGN
64 # define CMSG_ALIGN(len) (((len)+sizeof(long)-1) & ~(sizeof(long)-1))
68 /** if CMSG_LEN is not defined on this platform, a workaround */
70 # define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr))+(len))
73 /** if CMSG_SPACE is not defined on this platform, a workaround */
75 # ifdef _CMSG_HDR_ALIGN
76 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+_CMSG_HDR_ALIGN(sizeof(struct cmsghdr)))
78 # define CMSG_SPACE(l) (CMSG_ALIGN(l)+CMSG_ALIGN(sizeof(struct cmsghdr)))
82 /** The TCP reading or writing query timeout in seconds */
83 #define TCP_QUERY_TIMEOUT 120
85 #ifndef NONBLOCKING_IS_BROKEN
86 /** number of UDP reads to perform per read indication from select */
87 #define NUM_UDP_PER_SELECT 100
89 #define NUM_UDP_PER_SELECT 1
92 /* We define libevent structures here to hide the libevent stuff. */
96 # include "util/winsock_event.h"
98 # include "util/mini_event.h"
99 # endif /* USE_WINSOCK */
100 #else /* USE_MINI_EVENT */
101 /* we use libevent */
105 # include "event2/event.h"
106 # include "event2/event_struct.h"
107 # include "event2/event_compat.h"
109 #endif /* USE_MINI_EVENT */
112 * The internal event structure for keeping libevent info for the event.
113 * Possibly other structures (list, tree) this is part of.
115 struct internal_event {
117 struct comm_base* base;
118 /** libevent event type, alloced here */
123 * Internal base structure, so that every thread has its own events.
125 struct internal_base {
126 /** libevent event_base type. */
127 struct event_base* base;
128 /** seconds time pointer points here */
130 /** timeval with current time */
132 /** the event used for slow_accept timeouts */
133 struct event slow_accept;
134 /** true if slow_accept is enabled */
135 int slow_accept_enabled;
139 * Internal timer structure, to store timer event in.
141 struct internal_timer {
143 struct comm_base* base;
144 /** libevent event type, alloced here */
146 /** is timer enabled */
151 * Internal signal structure, to store signal event in.
153 struct internal_signal {
154 /** libevent event type, alloced here */
156 /** next in signal list */
157 struct internal_signal* next;
160 /** create a tcp handler with a parent */
161 static struct comm_point* comm_point_create_tcp_handler(
162 struct comm_base *base, struct comm_point* parent, size_t bufsize,
163 comm_point_callback_t* callback, void* callback_arg);
165 /* -------- End of local definitions -------- */
167 #ifdef USE_MINI_EVENT
168 /** minievent updates the time when it blocks. */
169 #define comm_base_now(x) /* nothing to do */
170 #else /* !USE_MINI_EVENT */
171 /** fillup the time values in the event base */
173 comm_base_now(struct comm_base* b)
175 if(gettimeofday(&b->eb->now, NULL) < 0) {
176 log_err("gettimeofday: %s", strerror(errno));
178 b->eb->secs = (time_t)b->eb->now.tv_sec;
180 #endif /* USE_MINI_EVENT */
183 comm_base_create(int sigs)
185 struct comm_base* b = (struct comm_base*)calloc(1,
186 sizeof(struct comm_base));
189 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
194 #ifdef USE_MINI_EVENT
196 /* use mini event time-sharing feature */
197 b->eb->base = event_init(&b->eb->secs, &b->eb->now);
199 # if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
202 b->eb->base=(struct event_base *)ev_default_loop(EVFLAG_AUTO);
204 b->eb->base=(struct event_base *)ev_loop_new(EVFLAG_AUTO);
207 # ifdef HAVE_EVENT_BASE_NEW
208 b->eb->base = event_base_new();
210 b->eb->base = event_init();
220 /* avoid event_get_method call which causes crashes even when
221 * not printing, because its result is passed */
223 #if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
225 #elif defined(USE_MINI_EVENT)
230 "%s uses %s method.",
232 #ifdef HAVE_EVENT_BASE_GET_METHOD
233 event_base_get_method(b->eb->base)
242 comm_base_create_event(struct event_base* base)
244 struct comm_base* b = (struct comm_base*)calloc(1,
245 sizeof(struct comm_base));
248 b->eb = (struct internal_base*)calloc(1, sizeof(struct internal_base));
259 comm_base_delete(struct comm_base* b)
263 if(b->eb->slow_accept_enabled) {
264 if(event_del(&b->eb->slow_accept) != 0) {
265 log_err("could not event_del slow_accept");
268 #ifdef USE_MINI_EVENT
269 event_base_free(b->eb->base);
270 #elif defined(HAVE_EVENT_BASE_FREE) && defined(HAVE_EVENT_BASE_ONCE)
271 /* only libevent 1.2+ has it, but in 1.2 it is broken -
272 assertion fails on signal handling ev that is not deleted
273 in libevent 1.3c (event_base_once appears) this is fixed. */
274 event_base_free(b->eb->base);
275 #endif /* HAVE_EVENT_BASE_FREE and HAVE_EVENT_BASE_ONCE */
282 comm_base_delete_no_base(struct comm_base* b)
286 if(b->eb->slow_accept_enabled) {
287 if(event_del(&b->eb->slow_accept) != 0) {
288 log_err("could not event_del slow_accept");
297 comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
304 comm_base_dispatch(struct comm_base* b)
307 retval = event_base_dispatch(b->eb->base);
309 fatal_exit("event_dispatch returned error %d, "
310 "errno is %s", retval, strerror(errno));
314 void comm_base_exit(struct comm_base* b)
316 if(event_base_loopexit(b->eb->base, NULL) != 0) {
317 log_err("Could not loopexit");
321 void comm_base_set_slow_accept_handlers(struct comm_base* b,
322 void (*stop_acc)(void*), void (*start_acc)(void*), void* arg)
324 b->stop_accept = stop_acc;
325 b->start_accept = start_acc;
329 struct event_base* comm_base_internal(struct comm_base* b)
334 /** see if errno for udp has to be logged or not uses globals */
336 udp_send_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
338 /* do not log transient errors (unless high verbosity) */
339 #if defined(ENETUNREACH) || defined(EHOSTDOWN) || defined(EHOSTUNREACH) || defined(ENETDOWN)
353 if(verbosity < VERB_ALGO)
359 /* permission denied is gotten for every send if the
360 * network is disconnected (on some OS), squelch it */
361 if( ((errno == EPERM)
362 # ifdef EADDRNOTAVAIL
363 /* 'Cannot assign requested address' also when disconnected */
364 || (errno == EADDRNOTAVAIL)
366 ) && verbosity < VERB_DETAIL)
368 /* squelch errors where people deploy AAAA ::ffff:bla for
369 * authority servers, which we try for intranets. */
370 if(errno == EINVAL && addr_is_ip4mapped(
371 (struct sockaddr_storage*)addr, addrlen) &&
372 verbosity < VERB_DETAIL)
374 /* SO_BROADCAST sockopt can give access to 255.255.255.255,
375 * but a dns cache does not need it. */
376 if(errno == EACCES && addr_is_broadcast(
377 (struct sockaddr_storage*)addr, addrlen) &&
378 verbosity < VERB_DETAIL)
383 int tcp_connect_errno_needs_log(struct sockaddr* addr, socklen_t addrlen)
385 return udp_send_errno_needs_log(addr, addrlen);
388 /* send a UDP reply */
390 comm_point_send_udp_msg(struct comm_point *c, sldns_buffer* packet,
391 struct sockaddr* addr, socklen_t addrlen)
394 log_assert(c->fd != -1);
396 if(sldns_buffer_remaining(packet) == 0)
397 log_err("error: send empty UDP packet");
399 log_assert(addr && addrlen > 0);
400 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
401 sldns_buffer_remaining(packet), 0,
404 /* try again and block, waiting for IO to complete,
405 * we want to send the answer, and we will wait for
406 * the ethernet interface buffer to have space. */
408 if(errno == EAGAIN ||
410 errno == EWOULDBLOCK ||
414 if(WSAGetLastError() == WSAEINPROGRESS ||
415 WSAGetLastError() == WSAENOBUFS ||
416 WSAGetLastError() == WSAEWOULDBLOCK) {
420 sent = sendto(c->fd, (void*)sldns_buffer_begin(packet),
421 sldns_buffer_remaining(packet), 0,
424 fd_set_nonblock(c->fd);
429 if(!udp_send_errno_needs_log(addr, addrlen))
432 verbose(VERB_OPS, "sendto failed: %s", strerror(errno));
434 verbose(VERB_OPS, "sendto failed: %s",
435 wsa_strerror(WSAGetLastError()));
437 log_addr(VERB_OPS, "remote address is",
438 (struct sockaddr_storage*)addr, addrlen);
440 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
441 log_err("sent %d in place of %d bytes",
442 (int)sent, (int)sldns_buffer_remaining(packet));
448 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && (defined(HAVE_RECVMSG) || defined(HAVE_SENDMSG))
449 /** print debug ancillary info */
450 static void p_ancil(const char* str, struct comm_reply* r)
452 if(r->srctype != 4 && r->srctype != 6) {
453 log_info("%s: unknown srctype %d", str, r->srctype);
456 if(r->srctype == 6) {
458 if(inet_ntop(AF_INET6, &r->pktinfo.v6info.ipi6_addr,
459 buf, (socklen_t)sizeof(buf)) == 0) {
460 (void)strlcpy(buf, "(inet_ntop error)", sizeof(buf));
462 buf[sizeof(buf)-1]=0;
463 log_info("%s: %s %d", str, buf, r->pktinfo.v6info.ipi6_ifindex);
464 } else if(r->srctype == 4) {
466 char buf1[1024], buf2[1024];
467 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_addr,
468 buf1, (socklen_t)sizeof(buf1)) == 0) {
469 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
471 buf1[sizeof(buf1)-1]=0;
472 #ifdef HAVE_STRUCT_IN_PKTINFO_IPI_SPEC_DST
473 if(inet_ntop(AF_INET, &r->pktinfo.v4info.ipi_spec_dst,
474 buf2, (socklen_t)sizeof(buf2)) == 0) {
475 (void)strlcpy(buf2, "(inet_ntop error)", sizeof(buf2));
477 buf2[sizeof(buf2)-1]=0;
481 log_info("%s: %d %s %s", str, r->pktinfo.v4info.ipi_ifindex,
483 #elif defined(IP_RECVDSTADDR)
485 if(inet_ntop(AF_INET, &r->pktinfo.v4addr,
486 buf1, (socklen_t)sizeof(buf1)) == 0) {
487 (void)strlcpy(buf1, "(inet_ntop error)", sizeof(buf1));
489 buf1[sizeof(buf1)-1]=0;
490 log_info("%s: %s", str, buf1);
491 #endif /* IP_PKTINFO or PI_RECVDSTDADDR */
494 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG||HAVE_SENDMSG */
496 /** send a UDP reply over specified interface*/
498 comm_point_send_udp_msg_if(struct comm_point *c, sldns_buffer* packet,
499 struct sockaddr* addr, socklen_t addrlen, struct comm_reply* r)
501 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_SENDMSG)
507 struct cmsghdr *cmsg;
508 #endif /* S_SPLINT_S */
510 log_assert(c->fd != -1);
512 if(sldns_buffer_remaining(packet) == 0)
513 log_err("error: send empty UDP packet");
515 log_assert(addr && addrlen > 0);
518 msg.msg_namelen = addrlen;
519 iov[0].iov_base = sldns_buffer_begin(packet);
520 iov[0].iov_len = sldns_buffer_remaining(packet);
523 msg.msg_control = control;
525 msg.msg_controllen = sizeof(control);
526 #endif /* S_SPLINT_S */
530 cmsg = CMSG_FIRSTHDR(&msg);
531 if(r->srctype == 4) {
534 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
535 log_assert(msg.msg_controllen <= sizeof(control));
536 cmsg->cmsg_level = IPPROTO_IP;
537 cmsg->cmsg_type = IP_PKTINFO;
538 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4info,
539 sizeof(struct in_pktinfo));
540 /* unset the ifindex to not bypass the routing tables */
541 cmsg_data = CMSG_DATA(cmsg);
542 ((struct in_pktinfo *) cmsg_data)->ipi_ifindex = 0;
543 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
544 #elif defined(IP_SENDSRCADDR)
545 msg.msg_controllen = CMSG_SPACE(sizeof(struct in_addr));
546 log_assert(msg.msg_controllen <= sizeof(control));
547 cmsg->cmsg_level = IPPROTO_IP;
548 cmsg->cmsg_type = IP_SENDSRCADDR;
549 memmove(CMSG_DATA(cmsg), &r->pktinfo.v4addr,
550 sizeof(struct in_addr));
551 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
553 verbose(VERB_ALGO, "no IP_PKTINFO or IP_SENDSRCADDR");
554 msg.msg_control = NULL;
555 #endif /* IP_PKTINFO or IP_SENDSRCADDR */
556 } else if(r->srctype == 6) {
558 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
559 log_assert(msg.msg_controllen <= sizeof(control));
560 cmsg->cmsg_level = IPPROTO_IPV6;
561 cmsg->cmsg_type = IPV6_PKTINFO;
562 memmove(CMSG_DATA(cmsg), &r->pktinfo.v6info,
563 sizeof(struct in6_pktinfo));
564 /* unset the ifindex to not bypass the routing tables */
565 cmsg_data = CMSG_DATA(cmsg);
566 ((struct in6_pktinfo *) cmsg_data)->ipi6_ifindex = 0;
567 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
569 /* try to pass all 0 to use default route */
570 msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
571 log_assert(msg.msg_controllen <= sizeof(control));
572 cmsg->cmsg_level = IPPROTO_IPV6;
573 cmsg->cmsg_type = IPV6_PKTINFO;
574 memset(CMSG_DATA(cmsg), 0, sizeof(struct in6_pktinfo));
575 cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
577 #endif /* S_SPLINT_S */
578 if(verbosity >= VERB_ALGO)
579 p_ancil("send_udp over interface", r);
580 sent = sendmsg(c->fd, &msg, 0);
582 /* try again and block, waiting for IO to complete,
583 * we want to send the answer, and we will wait for
584 * the ethernet interface buffer to have space. */
586 if(errno == EAGAIN ||
588 errno == EWOULDBLOCK ||
592 if(WSAGetLastError() == WSAEINPROGRESS ||
593 WSAGetLastError() == WSAENOBUFS ||
594 WSAGetLastError() == WSAEWOULDBLOCK) {
598 sent = sendmsg(c->fd, &msg, 0);
600 fd_set_nonblock(c->fd);
605 if(!udp_send_errno_needs_log(addr, addrlen))
607 verbose(VERB_OPS, "sendmsg failed: %s", strerror(errno));
608 log_addr(VERB_OPS, "remote address is",
609 (struct sockaddr_storage*)addr, addrlen);
611 /* netbsd 7 has IP_PKTINFO for recv but not send */
612 if(errno == EINVAL && r->srctype == 4)
613 log_err("sendmsg: No support for sendmsg(IP_PKTINFO). "
614 "Please disable interface-automatic");
617 } else if((size_t)sent != sldns_buffer_remaining(packet)) {
618 log_err("sent %d in place of %d bytes",
619 (int)sent, (int)sldns_buffer_remaining(packet));
629 log_err("sendmsg: IPV6_PKTINFO not supported");
631 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_SENDMSG */
635 comm_point_udp_ancil_callback(int fd, short event, void* arg)
637 #if defined(AF_INET6) && defined(IPV6_PKTINFO) && defined(HAVE_RECVMSG)
638 struct comm_reply rep;
645 struct cmsghdr* cmsg;
646 #endif /* S_SPLINT_S */
648 rep.c = (struct comm_point*)arg;
649 log_assert(rep.c->type == comm_udp);
653 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
654 comm_base_now(rep.c->ev->base);
655 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
656 sldns_buffer_clear(rep.c->buffer);
657 rep.addrlen = (socklen_t)sizeof(rep.addr);
658 log_assert(fd != -1);
659 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
660 msg.msg_name = &rep.addr;
661 msg.msg_namelen = (socklen_t)sizeof(rep.addr);
662 iov[0].iov_base = sldns_buffer_begin(rep.c->buffer);
663 iov[0].iov_len = sldns_buffer_remaining(rep.c->buffer);
666 msg.msg_control = ancil;
668 msg.msg_controllen = sizeof(ancil);
669 #endif /* S_SPLINT_S */
671 rcv = recvmsg(fd, &msg, 0);
673 if(errno != EAGAIN && errno != EINTR) {
674 log_err("recvmsg failed: %s", strerror(errno));
678 rep.addrlen = msg.msg_namelen;
679 sldns_buffer_skip(rep.c->buffer, rcv);
680 sldns_buffer_flip(rep.c->buffer);
683 for(cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
684 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
685 if( cmsg->cmsg_level == IPPROTO_IPV6 &&
686 cmsg->cmsg_type == IPV6_PKTINFO) {
688 memmove(&rep.pktinfo.v6info, CMSG_DATA(cmsg),
689 sizeof(struct in6_pktinfo));
692 } else if( cmsg->cmsg_level == IPPROTO_IP &&
693 cmsg->cmsg_type == IP_PKTINFO) {
695 memmove(&rep.pktinfo.v4info, CMSG_DATA(cmsg),
696 sizeof(struct in_pktinfo));
698 #elif defined(IP_RECVDSTADDR)
699 } else if( cmsg->cmsg_level == IPPROTO_IP &&
700 cmsg->cmsg_type == IP_RECVDSTADDR) {
702 memmove(&rep.pktinfo.v4addr, CMSG_DATA(cmsg),
703 sizeof(struct in_addr));
705 #endif /* IP_PKTINFO or IP_RECVDSTADDR */
708 if(verbosity >= VERB_ALGO)
709 p_ancil("receive_udp on interface", &rep);
710 #endif /* S_SPLINT_S */
711 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
712 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
713 /* send back immediate reply */
714 (void)comm_point_send_udp_msg_if(rep.c, rep.c->buffer,
715 (struct sockaddr*)&rep.addr, rep.addrlen, &rep);
717 if(rep.c->fd == -1) /* commpoint closed */
724 fatal_exit("recvmsg: No support for IPV6_PKTINFO. "
725 "Please disable interface-automatic");
726 #endif /* AF_INET6 && IPV6_PKTINFO && HAVE_RECVMSG */
730 comm_point_udp_callback(int fd, short event, void* arg)
732 struct comm_reply rep;
736 rep.c = (struct comm_point*)arg;
737 log_assert(rep.c->type == comm_udp);
741 log_assert(rep.c && rep.c->buffer && rep.c->fd == fd);
742 comm_base_now(rep.c->ev->base);
743 for(i=0; i<NUM_UDP_PER_SELECT; i++) {
744 sldns_buffer_clear(rep.c->buffer);
745 rep.addrlen = (socklen_t)sizeof(rep.addr);
746 log_assert(fd != -1);
747 log_assert(sldns_buffer_remaining(rep.c->buffer) > 0);
748 rcv = recvfrom(fd, (void*)sldns_buffer_begin(rep.c->buffer),
749 sldns_buffer_remaining(rep.c->buffer), 0,
750 (struct sockaddr*)&rep.addr, &rep.addrlen);
753 if(errno != EAGAIN && errno != EINTR)
754 log_err("recvfrom %d failed: %s",
755 fd, strerror(errno));
757 if(WSAGetLastError() != WSAEINPROGRESS &&
758 WSAGetLastError() != WSAECONNRESET &&
759 WSAGetLastError()!= WSAEWOULDBLOCK)
760 log_err("recvfrom failed: %s",
761 wsa_strerror(WSAGetLastError()));
765 sldns_buffer_skip(rep.c->buffer, rcv);
766 sldns_buffer_flip(rep.c->buffer);
768 fptr_ok(fptr_whitelist_comm_point(rep.c->callback));
769 if((*rep.c->callback)(rep.c, rep.c->cb_arg, NETEVENT_NOERROR, &rep)) {
770 /* send back immediate reply */
771 (void)comm_point_send_udp_msg(rep.c, rep.c->buffer,
772 (struct sockaddr*)&rep.addr, rep.addrlen);
774 if(rep.c->fd != fd) /* commpoint closed to -1 or reused for
775 another UDP port. Note rep.c cannot be reused with TCP fd. */
780 /** Use a new tcp handler for new query fd, set to read query */
782 setup_tcp_handler(struct comm_point* c, int fd)
784 log_assert(c->type == comm_tcp);
785 log_assert(c->fd == -1);
786 sldns_buffer_clear(c->buffer);
787 c->tcp_is_reading = 1;
788 c->tcp_byte_count = 0;
789 comm_point_start_listening(c, fd, TCP_QUERY_TIMEOUT);
792 void comm_base_handle_slow_accept(int ATTR_UNUSED(fd),
793 short ATTR_UNUSED(event), void* arg)
795 struct comm_base* b = (struct comm_base*)arg;
796 /* timeout for the slow accept, re-enable accepts again */
797 if(b->start_accept) {
798 verbose(VERB_ALGO, "wait is over, slow accept disabled");
799 fptr_ok(fptr_whitelist_start_accept(b->start_accept));
800 (*b->start_accept)(b->cb_arg);
801 b->eb->slow_accept_enabled = 0;
805 int comm_point_perform_accept(struct comm_point* c,
806 struct sockaddr_storage* addr, socklen_t* addrlen)
809 *addrlen = (socklen_t)sizeof(*addr);
810 new_fd = accept(c->fd, (struct sockaddr*)addr, addrlen);
813 /* EINTR is signal interrupt. others are closed connection. */
814 if( errno == EINTR || errno == EAGAIN
816 || errno == EWOULDBLOCK
819 || errno == ECONNABORTED
826 #if defined(ENFILE) && defined(EMFILE)
827 if(errno == ENFILE || errno == EMFILE) {
828 /* out of file descriptors, likely outside of our
829 * control. stop accept() calls for some time */
830 if(c->ev->base->stop_accept) {
831 struct comm_base* b = c->ev->base;
833 verbose(VERB_ALGO, "out of file descriptors: "
835 b->eb->slow_accept_enabled = 1;
836 fptr_ok(fptr_whitelist_stop_accept(
838 (*b->stop_accept)(b->cb_arg);
839 /* set timeout, no mallocs */
840 tv.tv_sec = NETEVENT_SLOW_ACCEPT_TIME/1000;
841 tv.tv_usec = NETEVENT_SLOW_ACCEPT_TIME%1000;
842 event_set(&b->eb->slow_accept, -1, EV_TIMEOUT,
843 comm_base_handle_slow_accept, b);
844 if(event_base_set(b->eb->base,
845 &b->eb->slow_accept) != 0) {
846 /* we do not want to log here, because
847 * that would spam the logfiles.
848 * error: "event_base_set failed." */
850 if(event_add(&b->eb->slow_accept, &tv) != 0) {
851 /* we do not want to log here,
852 * error: "event_add failed." */
858 log_err_addr("accept failed", strerror(errno), addr, *addrlen);
859 #else /* USE_WINSOCK */
860 if(WSAGetLastError() == WSAEINPROGRESS ||
861 WSAGetLastError() == WSAECONNRESET)
863 if(WSAGetLastError() == WSAEWOULDBLOCK) {
864 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
867 log_err_addr("accept failed", wsa_strerror(WSAGetLastError()),
872 fd_set_nonblock(new_fd);
877 static long win_bio_cb(BIO *b, int oper, const char* ATTR_UNUSED(argp),
878 int ATTR_UNUSED(argi), long argl, long retvalue)
880 verbose(VERB_ALGO, "bio_cb %d, %s %s %s", oper,
881 (oper&BIO_CB_RETURN)?"return":"before",
882 (oper&BIO_CB_READ)?"read":((oper&BIO_CB_WRITE)?"write":"other"),
883 WSAGetLastError()==WSAEWOULDBLOCK?"wsawb":"");
884 /* on windows, check if previous operation caused EWOULDBLOCK */
885 if( (oper == (BIO_CB_READ|BIO_CB_RETURN) && argl == 0) ||
886 (oper == (BIO_CB_GETS|BIO_CB_RETURN) && argl == 0)) {
887 if(WSAGetLastError() == WSAEWOULDBLOCK)
888 winsock_tcp_wouldblock((struct event*)
889 BIO_get_callback_arg(b), EV_READ);
891 if( (oper == (BIO_CB_WRITE|BIO_CB_RETURN) && argl == 0) ||
892 (oper == (BIO_CB_PUTS|BIO_CB_RETURN) && argl == 0)) {
893 if(WSAGetLastError() == WSAEWOULDBLOCK)
894 winsock_tcp_wouldblock((struct event*)
895 BIO_get_callback_arg(b), EV_WRITE);
897 /* return original return value */
901 /** set win bio callbacks for nonblocking operations */
903 comm_point_tcp_win_bio_cb(struct comm_point* c, void* thessl)
905 SSL* ssl = (SSL*)thessl;
906 /* set them both just in case, but usually they are the same BIO */
907 BIO_set_callback(SSL_get_rbio(ssl), &win_bio_cb);
908 BIO_set_callback_arg(SSL_get_rbio(ssl), (char*)&c->ev->ev);
909 BIO_set_callback(SSL_get_wbio(ssl), &win_bio_cb);
910 BIO_set_callback_arg(SSL_get_wbio(ssl), (char*)&c->ev->ev);
915 comm_point_tcp_accept_callback(int fd, short event, void* arg)
917 struct comm_point* c = (struct comm_point*)arg, *c_hdl;
919 log_assert(c->type == comm_tcp_accept);
920 if(!(event & EV_READ)) {
921 log_info("ignoring tcp accept event %d", (int)event);
924 comm_base_now(c->ev->base);
925 /* find free tcp handler. */
927 log_warn("accepted too many tcp, connections full");
930 /* accept incoming connection. */
932 log_assert(fd != -1);
933 new_fd = comm_point_perform_accept(c, &c_hdl->repinfo.addr,
934 &c_hdl->repinfo.addrlen);
938 c_hdl->ssl = incoming_ssl_fd(c->ssl, new_fd);
941 comm_point_close(c_hdl);
944 c_hdl->ssl_shake_state = comm_ssl_shake_read;
946 comm_point_tcp_win_bio_cb(c_hdl, c_hdl->ssl);
950 /* grab the tcp handler buffers */
952 c->tcp_free = c_hdl->tcp_free;
954 /* stop accepting incoming queries for now. */
955 comm_point_stop_listening(c);
957 setup_tcp_handler(c_hdl, new_fd);
960 /** Make tcp handler free for next assignment */
962 reclaim_tcp_handler(struct comm_point* c)
964 log_assert(c->type == comm_tcp);
967 SSL_shutdown(c->ssl);
974 c->tcp_parent->cur_tcp_count--;
975 c->tcp_free = c->tcp_parent->tcp_free;
976 c->tcp_parent->tcp_free = c;
978 /* re-enable listening on accept socket */
979 comm_point_start_listening(c->tcp_parent, -1, -1);
984 /** do the callback when writing is done */
986 tcp_callback_writer(struct comm_point* c)
988 log_assert(c->type == comm_tcp);
989 sldns_buffer_clear(c->buffer);
990 if(c->tcp_do_toggle_rw)
991 c->tcp_is_reading = 1;
992 c->tcp_byte_count = 0;
993 /* switch from listening(write) to listening(read) */
994 comm_point_stop_listening(c);
995 comm_point_start_listening(c, -1, -1);
998 /** do the callback when reading is done */
1000 tcp_callback_reader(struct comm_point* c)
1002 log_assert(c->type == comm_tcp || c->type == comm_local);
1003 sldns_buffer_flip(c->buffer);
1004 if(c->tcp_do_toggle_rw)
1005 c->tcp_is_reading = 0;
1006 c->tcp_byte_count = 0;
1007 if(c->type == comm_tcp)
1008 comm_point_stop_listening(c);
1009 fptr_ok(fptr_whitelist_comm_point(c->callback));
1010 if( (*c->callback)(c, c->cb_arg, NETEVENT_NOERROR, &c->repinfo) ) {
1011 comm_point_start_listening(c, -1, TCP_QUERY_TIMEOUT);
1015 /** continue ssl handshake */
1018 ssl_handshake(struct comm_point* c)
1021 if(c->ssl_shake_state == comm_ssl_shake_hs_read) {
1022 /* read condition satisfied back to writing */
1023 comm_point_listen_for_rw(c, 1, 1);
1024 c->ssl_shake_state = comm_ssl_shake_none;
1027 if(c->ssl_shake_state == comm_ssl_shake_hs_write) {
1028 /* write condition satisfied, back to reading */
1029 comm_point_listen_for_rw(c, 1, 0);
1030 c->ssl_shake_state = comm_ssl_shake_none;
1035 r = SSL_do_handshake(c->ssl);
1037 int want = SSL_get_error(c->ssl, r);
1038 if(want == SSL_ERROR_WANT_READ) {
1039 if(c->ssl_shake_state == comm_ssl_shake_read)
1041 c->ssl_shake_state = comm_ssl_shake_read;
1042 comm_point_listen_for_rw(c, 1, 0);
1044 } else if(want == SSL_ERROR_WANT_WRITE) {
1045 if(c->ssl_shake_state == comm_ssl_shake_write)
1047 c->ssl_shake_state = comm_ssl_shake_write;
1048 comm_point_listen_for_rw(c, 0, 1);
1051 return 0; /* closed */
1052 } else if(want == SSL_ERROR_SYSCALL) {
1053 /* SYSCALL and errno==0 means closed uncleanly */
1055 log_err("SSL_handshake syscall: %s",
1059 log_crypto_err("ssl handshake failed");
1060 log_addr(1, "ssl handshake failed", &c->repinfo.addr,
1061 c->repinfo.addrlen);
1065 /* this is where peer verification could take place */
1066 log_addr(VERB_ALGO, "SSL DNS connection", &c->repinfo.addr,
1067 c->repinfo.addrlen);
1069 /* setup listen rw correctly */
1070 if(c->tcp_is_reading) {
1071 if(c->ssl_shake_state != comm_ssl_shake_read)
1072 comm_point_listen_for_rw(c, 1, 0);
1074 comm_point_listen_for_rw(c, 1, 1);
1076 c->ssl_shake_state = comm_ssl_shake_none;
1079 #endif /* HAVE_SSL */
1081 /** ssl read callback on TCP */
1083 ssl_handle_read(struct comm_point* c)
1087 if(c->ssl_shake_state != comm_ssl_shake_none) {
1088 if(!ssl_handshake(c))
1090 if(c->ssl_shake_state != comm_ssl_shake_none)
1093 if(c->tcp_byte_count < sizeof(uint16_t)) {
1094 /* read length bytes */
1096 if((r=SSL_read(c->ssl, (void*)sldns_buffer_at(c->buffer,
1097 c->tcp_byte_count), (int)(sizeof(uint16_t) -
1098 c->tcp_byte_count))) <= 0) {
1099 int want = SSL_get_error(c->ssl, r);
1100 if(want == SSL_ERROR_ZERO_RETURN) {
1101 return 0; /* shutdown, closed */
1102 } else if(want == SSL_ERROR_WANT_READ) {
1103 return 1; /* read more later */
1104 } else if(want == SSL_ERROR_WANT_WRITE) {
1105 c->ssl_shake_state = comm_ssl_shake_hs_write;
1106 comm_point_listen_for_rw(c, 0, 1);
1108 } else if(want == SSL_ERROR_SYSCALL) {
1110 log_err("SSL_read syscall: %s",
1114 log_crypto_err("could not SSL_read");
1117 c->tcp_byte_count += r;
1118 if(c->tcp_byte_count != sizeof(uint16_t))
1120 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1121 sldns_buffer_capacity(c->buffer)) {
1122 verbose(VERB_QUERY, "ssl: dropped larger than buffer");
1125 sldns_buffer_set_limit(c->buffer,
1126 sldns_buffer_read_u16_at(c->buffer, 0));
1127 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1128 verbose(VERB_QUERY, "ssl: dropped bogus too short.");
1131 verbose(VERB_ALGO, "Reading ssl tcp query of length %d",
1132 (int)sldns_buffer_limit(c->buffer));
1134 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1136 r = SSL_read(c->ssl, (void*)sldns_buffer_current(c->buffer),
1137 (int)sldns_buffer_remaining(c->buffer));
1139 int want = SSL_get_error(c->ssl, r);
1140 if(want == SSL_ERROR_ZERO_RETURN) {
1141 return 0; /* shutdown, closed */
1142 } else if(want == SSL_ERROR_WANT_READ) {
1143 return 1; /* read more later */
1144 } else if(want == SSL_ERROR_WANT_WRITE) {
1145 c->ssl_shake_state = comm_ssl_shake_hs_write;
1146 comm_point_listen_for_rw(c, 0, 1);
1148 } else if(want == SSL_ERROR_SYSCALL) {
1150 log_err("SSL_read syscall: %s",
1154 log_crypto_err("could not SSL_read");
1157 sldns_buffer_skip(c->buffer, (ssize_t)r);
1158 if(sldns_buffer_remaining(c->buffer) <= 0) {
1159 tcp_callback_reader(c);
1165 #endif /* HAVE_SSL */
1168 /** ssl write callback on TCP */
1170 ssl_handle_write(struct comm_point* c)
1174 if(c->ssl_shake_state != comm_ssl_shake_none) {
1175 if(!ssl_handshake(c))
1177 if(c->ssl_shake_state != comm_ssl_shake_none)
1180 /* ignore return, if fails we may simply block */
1181 (void)SSL_set_mode(c->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
1182 if(c->tcp_byte_count < sizeof(uint16_t)) {
1183 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1185 r = SSL_write(c->ssl,
1186 (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1187 (int)(sizeof(uint16_t)-c->tcp_byte_count));
1189 int want = SSL_get_error(c->ssl, r);
1190 if(want == SSL_ERROR_ZERO_RETURN) {
1191 return 0; /* closed */
1192 } else if(want == SSL_ERROR_WANT_READ) {
1193 c->ssl_shake_state = comm_ssl_shake_read;
1194 comm_point_listen_for_rw(c, 1, 0);
1195 return 1; /* wait for read condition */
1196 } else if(want == SSL_ERROR_WANT_WRITE) {
1197 return 1; /* write more later */
1198 } else if(want == SSL_ERROR_SYSCALL) {
1200 log_err("SSL_write syscall: %s",
1204 log_crypto_err("could not SSL_write");
1207 c->tcp_byte_count += r;
1208 if(c->tcp_byte_count < sizeof(uint16_t))
1210 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1212 if(sldns_buffer_remaining(c->buffer) == 0) {
1213 tcp_callback_writer(c);
1217 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1219 r = SSL_write(c->ssl, (void*)sldns_buffer_current(c->buffer),
1220 (int)sldns_buffer_remaining(c->buffer));
1222 int want = SSL_get_error(c->ssl, r);
1223 if(want == SSL_ERROR_ZERO_RETURN) {
1224 return 0; /* closed */
1225 } else if(want == SSL_ERROR_WANT_READ) {
1226 c->ssl_shake_state = comm_ssl_shake_read;
1227 comm_point_listen_for_rw(c, 1, 0);
1228 return 1; /* wait for read condition */
1229 } else if(want == SSL_ERROR_WANT_WRITE) {
1230 return 1; /* write more later */
1231 } else if(want == SSL_ERROR_SYSCALL) {
1233 log_err("SSL_write syscall: %s",
1237 log_crypto_err("could not SSL_write");
1240 sldns_buffer_skip(c->buffer, (ssize_t)r);
1242 if(sldns_buffer_remaining(c->buffer) == 0) {
1243 tcp_callback_writer(c);
1249 #endif /* HAVE_SSL */
1252 /** handle ssl tcp connection with dns contents */
1254 ssl_handle_it(struct comm_point* c)
1256 if(c->tcp_is_reading)
1257 return ssl_handle_read(c);
1258 return ssl_handle_write(c);
1261 /** Handle tcp reading callback.
1262 * @param fd: file descriptor of socket.
1263 * @param c: comm point to read from into buffer.
1264 * @param short_ok: if true, very short packets are OK (for comm_local).
1265 * @return: 0 on error
1268 comm_point_tcp_handle_read(int fd, struct comm_point* c, int short_ok)
1271 log_assert(c->type == comm_tcp || c->type == comm_local);
1273 return ssl_handle_it(c);
1274 if(!c->tcp_is_reading)
1277 log_assert(fd != -1);
1278 if(c->tcp_byte_count < sizeof(uint16_t)) {
1279 /* read length bytes */
1280 r = recv(fd,(void*)sldns_buffer_at(c->buffer,c->tcp_byte_count),
1281 sizeof(uint16_t)-c->tcp_byte_count, 0);
1286 if(errno == EINTR || errno == EAGAIN)
1289 if(errno == ECONNRESET && verbosity < 2)
1290 return 0; /* silence reset by peer */
1292 log_err_addr("read (in tcp s)", strerror(errno),
1293 &c->repinfo.addr, c->repinfo.addrlen);
1294 #else /* USE_WINSOCK */
1295 if(WSAGetLastError() == WSAECONNRESET)
1297 if(WSAGetLastError() == WSAEINPROGRESS)
1299 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1300 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
1303 log_err_addr("read (in tcp s)",
1304 wsa_strerror(WSAGetLastError()),
1305 &c->repinfo.addr, c->repinfo.addrlen);
1309 c->tcp_byte_count += r;
1310 if(c->tcp_byte_count != sizeof(uint16_t))
1312 if(sldns_buffer_read_u16_at(c->buffer, 0) >
1313 sldns_buffer_capacity(c->buffer)) {
1314 verbose(VERB_QUERY, "tcp: dropped larger than buffer");
1317 sldns_buffer_set_limit(c->buffer,
1318 sldns_buffer_read_u16_at(c->buffer, 0));
1320 sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1321 verbose(VERB_QUERY, "tcp: dropped bogus too short.");
1324 verbose(VERB_ALGO, "Reading tcp query of length %d",
1325 (int)sldns_buffer_limit(c->buffer));
1328 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1329 r = recv(fd, (void*)sldns_buffer_current(c->buffer),
1330 sldns_buffer_remaining(c->buffer), 0);
1333 } else if(r == -1) {
1335 if(errno == EINTR || errno == EAGAIN)
1337 log_err_addr("read (in tcp r)", strerror(errno),
1338 &c->repinfo.addr, c->repinfo.addrlen);
1339 #else /* USE_WINSOCK */
1340 if(WSAGetLastError() == WSAECONNRESET)
1342 if(WSAGetLastError() == WSAEINPROGRESS)
1344 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1345 winsock_tcp_wouldblock(&c->ev->ev, EV_READ);
1348 log_err_addr("read (in tcp r)",
1349 wsa_strerror(WSAGetLastError()),
1350 &c->repinfo.addr, c->repinfo.addrlen);
1354 sldns_buffer_skip(c->buffer, r);
1355 if(sldns_buffer_remaining(c->buffer) <= 0) {
1356 tcp_callback_reader(c);
1362 * Handle tcp writing callback.
1363 * @param fd: file descriptor of socket.
1364 * @param c: comm point to write buffer out of.
1365 * @return: 0 on error
1368 comm_point_tcp_handle_write(int fd, struct comm_point* c)
1371 log_assert(c->type == comm_tcp);
1372 if(c->tcp_is_reading && !c->ssl)
1374 log_assert(fd != -1);
1375 if(c->tcp_byte_count == 0 && c->tcp_check_nb_connect) {
1376 /* check for pending error from nonblocking connect */
1377 /* from Stevens, unix network programming, vol1, 3rd ed, p450*/
1379 socklen_t len = (socklen_t)sizeof(error);
1380 if(getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&error,
1383 error = errno; /* on solaris errno is error */
1384 #else /* USE_WINSOCK */
1385 error = WSAGetLastError();
1389 #if defined(EINPROGRESS) && defined(EWOULDBLOCK)
1390 if(error == EINPROGRESS || error == EWOULDBLOCK)
1391 return 1; /* try again later */
1394 if(error != 0 && verbosity < 2)
1395 return 0; /* silence lots of chatter in the logs */
1396 else if(error != 0) {
1397 log_err_addr("tcp connect", strerror(error),
1398 &c->repinfo.addr, c->repinfo.addrlen);
1399 #else /* USE_WINSOCK */
1401 if(error == WSAEINPROGRESS)
1403 else if(error == WSAEWOULDBLOCK) {
1404 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1406 } else if(error != 0 && verbosity < 2)
1408 else if(error != 0) {
1409 log_err_addr("tcp connect", wsa_strerror(error),
1410 &c->repinfo.addr, c->repinfo.addrlen);
1411 #endif /* USE_WINSOCK */
1416 return ssl_handle_it(c);
1418 if(c->tcp_byte_count < sizeof(uint16_t)) {
1419 uint16_t len = htons(sldns_buffer_limit(c->buffer));
1421 struct iovec iov[2];
1422 iov[0].iov_base = (uint8_t*)&len + c->tcp_byte_count;
1423 iov[0].iov_len = sizeof(uint16_t) - c->tcp_byte_count;
1424 iov[1].iov_base = sldns_buffer_begin(c->buffer);
1425 iov[1].iov_len = sldns_buffer_limit(c->buffer);
1426 log_assert(iov[0].iov_len > 0);
1427 log_assert(iov[1].iov_len > 0);
1428 r = writev(fd, iov, 2);
1429 #else /* HAVE_WRITEV */
1430 r = send(fd, (void*)(((uint8_t*)&len)+c->tcp_byte_count),
1431 sizeof(uint16_t)-c->tcp_byte_count, 0);
1432 #endif /* HAVE_WRITEV */
1436 if(errno == EPIPE && verbosity < 2)
1437 return 0; /* silence 'broken pipe' */
1439 if(errno == EINTR || errno == EAGAIN)
1442 log_err_addr("tcp writev", strerror(errno),
1443 &c->repinfo.addr, c->repinfo.addrlen);
1444 # else /* HAVE_WRITEV */
1445 log_err_addr("tcp send s", strerror(errno),
1446 &c->repinfo.addr, c->repinfo.addrlen);
1447 # endif /* HAVE_WRITEV */
1449 if(WSAGetLastError() == WSAENOTCONN)
1451 if(WSAGetLastError() == WSAEINPROGRESS)
1453 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1454 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1457 log_err_addr("tcp send s",
1458 wsa_strerror(WSAGetLastError()),
1459 &c->repinfo.addr, c->repinfo.addrlen);
1463 c->tcp_byte_count += r;
1464 if(c->tcp_byte_count < sizeof(uint16_t))
1466 sldns_buffer_set_position(c->buffer, c->tcp_byte_count -
1468 if(sldns_buffer_remaining(c->buffer) == 0) {
1469 tcp_callback_writer(c);
1473 log_assert(sldns_buffer_remaining(c->buffer) > 0);
1474 r = send(fd, (void*)sldns_buffer_current(c->buffer),
1475 sldns_buffer_remaining(c->buffer), 0);
1478 if(errno == EINTR || errno == EAGAIN)
1480 log_err_addr("tcp send r", strerror(errno),
1481 &c->repinfo.addr, c->repinfo.addrlen);
1483 if(WSAGetLastError() == WSAEINPROGRESS)
1485 if(WSAGetLastError() == WSAEWOULDBLOCK) {
1486 winsock_tcp_wouldblock(&c->ev->ev, EV_WRITE);
1489 log_err_addr("tcp send r", wsa_strerror(WSAGetLastError()),
1490 &c->repinfo.addr, c->repinfo.addrlen);
1494 sldns_buffer_skip(c->buffer, r);
1496 if(sldns_buffer_remaining(c->buffer) == 0) {
1497 tcp_callback_writer(c);
1504 comm_point_tcp_handle_callback(int fd, short event, void* arg)
1506 struct comm_point* c = (struct comm_point*)arg;
1507 log_assert(c->type == comm_tcp);
1508 comm_base_now(c->ev->base);
1511 if(!comm_point_tcp_handle_read(fd, c, 0)) {
1512 reclaim_tcp_handler(c);
1513 if(!c->tcp_do_close) {
1514 fptr_ok(fptr_whitelist_comm_point(
1516 (void)(*c->callback)(c, c->cb_arg,
1517 NETEVENT_CLOSED, NULL);
1522 if(event&EV_WRITE) {
1523 if(!comm_point_tcp_handle_write(fd, c)) {
1524 reclaim_tcp_handler(c);
1525 if(!c->tcp_do_close) {
1526 fptr_ok(fptr_whitelist_comm_point(
1528 (void)(*c->callback)(c, c->cb_arg,
1529 NETEVENT_CLOSED, NULL);
1534 if(event&EV_TIMEOUT) {
1535 verbose(VERB_QUERY, "tcp took too long, dropped");
1536 reclaim_tcp_handler(c);
1537 if(!c->tcp_do_close) {
1538 fptr_ok(fptr_whitelist_comm_point(c->callback));
1539 (void)(*c->callback)(c, c->cb_arg,
1540 NETEVENT_TIMEOUT, NULL);
1544 log_err("Ignored event %d for tcphdl.", event);
1547 void comm_point_local_handle_callback(int fd, short event, void* arg)
1549 struct comm_point* c = (struct comm_point*)arg;
1550 log_assert(c->type == comm_local);
1551 comm_base_now(c->ev->base);
1554 if(!comm_point_tcp_handle_read(fd, c, 1)) {
1555 fptr_ok(fptr_whitelist_comm_point(c->callback));
1556 (void)(*c->callback)(c, c->cb_arg, NETEVENT_CLOSED,
1561 log_err("Ignored event %d for localhdl.", event);
1564 void comm_point_raw_handle_callback(int ATTR_UNUSED(fd),
1565 short event, void* arg)
1567 struct comm_point* c = (struct comm_point*)arg;
1568 int err = NETEVENT_NOERROR;
1569 log_assert(c->type == comm_raw);
1570 comm_base_now(c->ev->base);
1572 if(event&EV_TIMEOUT)
1573 err = NETEVENT_TIMEOUT;
1574 fptr_ok(fptr_whitelist_comm_point_raw(c->callback));
1575 (void)(*c->callback)(c, c->cb_arg, err, NULL);
1579 comm_point_create_udp(struct comm_base *base, int fd, sldns_buffer* buffer,
1580 comm_point_callback_t* callback, void* callback_arg)
1582 struct comm_point* c = (struct comm_point*)calloc(1,
1583 sizeof(struct comm_point));
1587 c->ev = (struct internal_event*)calloc(1,
1588 sizeof(struct internal_event));
1597 c->tcp_is_reading = 0;
1598 c->tcp_byte_count = 0;
1599 c->tcp_parent = NULL;
1600 c->max_tcp_count = 0;
1601 c->cur_tcp_count = 0;
1602 c->tcp_handlers = NULL;
1605 c->tcp_do_close = 0;
1606 c->do_not_close = 0;
1607 c->tcp_do_toggle_rw = 0;
1608 c->tcp_check_nb_connect = 0;
1610 c->callback = callback;
1611 c->cb_arg = callback_arg;
1612 evbits = EV_READ | EV_PERSIST;
1613 /* libevent stuff */
1614 event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_callback, c);
1615 if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
1616 log_err("could not baseset udp event");
1617 comm_point_delete(c);
1620 if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
1621 log_err("could not add udp event");
1622 comm_point_delete(c);
1629 comm_point_create_udp_ancil(struct comm_base *base, int fd,
1630 sldns_buffer* buffer,
1631 comm_point_callback_t* callback, void* callback_arg)
1633 struct comm_point* c = (struct comm_point*)calloc(1,
1634 sizeof(struct comm_point));
1638 c->ev = (struct internal_event*)calloc(1,
1639 sizeof(struct internal_event));
1648 c->tcp_is_reading = 0;
1649 c->tcp_byte_count = 0;
1650 c->tcp_parent = NULL;
1651 c->max_tcp_count = 0;
1652 c->cur_tcp_count = 0;
1653 c->tcp_handlers = NULL;
1656 c->tcp_do_close = 0;
1657 c->do_not_close = 0;
1659 c->tcp_do_toggle_rw = 0;
1660 c->tcp_check_nb_connect = 0;
1661 c->callback = callback;
1662 c->cb_arg = callback_arg;
1663 evbits = EV_READ | EV_PERSIST;
1664 /* libevent stuff */
1665 event_set(&c->ev->ev, c->fd, evbits, comm_point_udp_ancil_callback, c);
1666 if(event_base_set(base->eb->base, &c->ev->ev) != 0) {
1667 log_err("could not baseset udp event");
1668 comm_point_delete(c);
1671 if(fd!=-1 && event_add(&c->ev->ev, c->timeout) != 0 ) {
1672 log_err("could not add udp event");
1673 comm_point_delete(c);
1679 static struct comm_point*
1680 comm_point_create_tcp_handler(struct comm_base *base,
1681 struct comm_point* parent, size_t bufsize,
1682 comm_point_callback_t* callback, void* callback_arg)
1684 struct comm_point* c = (struct comm_point*)calloc(1,
1685 sizeof(struct comm_point));
1689 c->ev = (struct internal_event*)calloc(1,
1690 sizeof(struct internal_event));
1697 c->buffer = sldns_buffer_new(bufsize);
1703 c->timeout = (struct timeval*)malloc(sizeof(struct timeval));
1705 sldns_buffer_free(c->buffer);
1710 c->tcp_is_reading = 0;
1711 c->tcp_byte_count = 0;
1712 c->tcp_parent = parent;
1713 c->max_tcp_count = 0;
1714 c->cur_tcp_count = 0;
1715 c->tcp_handlers = NULL;
1718 c->tcp_do_close = 0;
1719 c->do_not_close = 0;
1720 c->tcp_do_toggle_rw = 1;
1721 c->tcp_check_nb_connect = 0;
1723 c->callback = callback;
1724 c->cb_arg = callback_arg;
1725 /* add to parent free list */
1726 c->tcp_free = parent->tcp_free;
1727 parent->tcp_free = c;
1728 /* libevent stuff */
1729 evbits = EV_PERSIST | EV_READ | EV_TIMEOUT;
1730 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
1731 if(event_base_set(base->eb->base, &c->ev->ev) != 0)
1733 log_err("could not basetset tcphdl event");
1734 parent->tcp_free = c->tcp_free;
1743 comm_point_create_tcp(struct comm_base *base, int fd, int num, size_t bufsize,
1744 comm_point_callback_t* callback, void* callback_arg)
1746 struct comm_point* c = (struct comm_point*)calloc(1,
1747 sizeof(struct comm_point));
1750 /* first allocate the TCP accept listener */
1753 c->ev = (struct internal_event*)calloc(1,
1754 sizeof(struct internal_event));
1763 c->tcp_is_reading = 0;
1764 c->tcp_byte_count = 0;
1765 c->tcp_parent = NULL;
1766 c->max_tcp_count = num;
1767 c->cur_tcp_count = 0;
1768 c->tcp_handlers = (struct comm_point**)calloc((size_t)num,
1769 sizeof(struct comm_point*));
1770 if(!c->tcp_handlers) {
1776 c->type = comm_tcp_accept;
1777 c->tcp_do_close = 0;
1778 c->do_not_close = 0;
1779 c->tcp_do_toggle_rw = 0;
1780 c->tcp_check_nb_connect = 0;
1783 evbits = EV_READ | EV_PERSIST;
1784 /* libevent stuff */
1785 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_accept_callback, c);
1786 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1787 event_add(&c->ev->ev, c->timeout) != 0 )
1789 log_err("could not add tcpacc event");
1790 comm_point_delete(c);
1794 /* now prealloc the tcp handlers */
1795 for(i=0; i<num; i++) {
1796 c->tcp_handlers[i] = comm_point_create_tcp_handler(base,
1797 c, bufsize, callback, callback_arg);
1798 if(!c->tcp_handlers[i]) {
1799 comm_point_delete(c);
1808 comm_point_create_tcp_out(struct comm_base *base, size_t bufsize,
1809 comm_point_callback_t* callback, void* callback_arg)
1811 struct comm_point* c = (struct comm_point*)calloc(1,
1812 sizeof(struct comm_point));
1816 c->ev = (struct internal_event*)calloc(1,
1817 sizeof(struct internal_event));
1824 c->buffer = sldns_buffer_new(bufsize);
1831 c->tcp_is_reading = 0;
1832 c->tcp_byte_count = 0;
1833 c->tcp_parent = NULL;
1834 c->max_tcp_count = 0;
1835 c->cur_tcp_count = 0;
1836 c->tcp_handlers = NULL;
1839 c->tcp_do_close = 0;
1840 c->do_not_close = 0;
1841 c->tcp_do_toggle_rw = 1;
1842 c->tcp_check_nb_connect = 1;
1844 c->callback = callback;
1845 c->cb_arg = callback_arg;
1846 evbits = EV_PERSIST | EV_WRITE;
1847 event_set(&c->ev->ev, c->fd, evbits, comm_point_tcp_handle_callback, c);
1848 if(event_base_set(base->eb->base, &c->ev->ev) != 0)
1850 log_err("could not basetset tcpout event");
1851 sldns_buffer_free(c->buffer);
1861 comm_point_create_local(struct comm_base *base, int fd, size_t bufsize,
1862 comm_point_callback_t* callback, void* callback_arg)
1864 struct comm_point* c = (struct comm_point*)calloc(1,
1865 sizeof(struct comm_point));
1869 c->ev = (struct internal_event*)calloc(1,
1870 sizeof(struct internal_event));
1877 c->buffer = sldns_buffer_new(bufsize);
1884 c->tcp_is_reading = 1;
1885 c->tcp_byte_count = 0;
1886 c->tcp_parent = NULL;
1887 c->max_tcp_count = 0;
1888 c->cur_tcp_count = 0;
1889 c->tcp_handlers = NULL;
1891 c->type = comm_local;
1892 c->tcp_do_close = 0;
1893 c->do_not_close = 1;
1894 c->tcp_do_toggle_rw = 0;
1895 c->tcp_check_nb_connect = 0;
1896 c->callback = callback;
1897 c->cb_arg = callback_arg;
1898 /* libevent stuff */
1899 evbits = EV_PERSIST | EV_READ;
1900 event_set(&c->ev->ev, c->fd, evbits, comm_point_local_handle_callback,
1902 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1903 event_add(&c->ev->ev, c->timeout) != 0 )
1905 log_err("could not add localhdl event");
1914 comm_point_create_raw(struct comm_base* base, int fd, int writing,
1915 comm_point_callback_t* callback, void* callback_arg)
1917 struct comm_point* c = (struct comm_point*)calloc(1,
1918 sizeof(struct comm_point));
1922 c->ev = (struct internal_event*)calloc(1,
1923 sizeof(struct internal_event));
1932 c->tcp_is_reading = 0;
1933 c->tcp_byte_count = 0;
1934 c->tcp_parent = NULL;
1935 c->max_tcp_count = 0;
1936 c->cur_tcp_count = 0;
1937 c->tcp_handlers = NULL;
1940 c->tcp_do_close = 0;
1941 c->do_not_close = 1;
1942 c->tcp_do_toggle_rw = 0;
1943 c->tcp_check_nb_connect = 0;
1944 c->callback = callback;
1945 c->cb_arg = callback_arg;
1946 /* libevent stuff */
1948 evbits = EV_PERSIST | EV_WRITE;
1949 else evbits = EV_PERSIST | EV_READ;
1950 event_set(&c->ev->ev, c->fd, evbits, comm_point_raw_handle_callback,
1952 if(event_base_set(base->eb->base, &c->ev->ev) != 0 ||
1953 event_add(&c->ev->ev, c->timeout) != 0 )
1955 log_err("could not add rawhdl event");
1964 comm_point_close(struct comm_point* c)
1969 if(event_del(&c->ev->ev) != 0) {
1970 log_err("could not event_del on close");
1972 /* close fd after removing from event lists, or epoll.. is messed up */
1973 if(c->fd != -1 && !c->do_not_close) {
1974 verbose(VERB_ALGO, "close fd %d", c->fd);
1985 comm_point_delete(struct comm_point* c)
1989 if(c->type == comm_tcp && c->ssl) {
1991 SSL_shutdown(c->ssl);
1995 comm_point_close(c);
1996 if(c->tcp_handlers) {
1998 for(i=0; i<c->max_tcp_count; i++)
1999 comm_point_delete(c->tcp_handlers[i]);
2000 free(c->tcp_handlers);
2003 if(c->type == comm_tcp || c->type == comm_local)
2004 sldns_buffer_free(c->buffer);
2010 comm_point_send_reply(struct comm_reply *repinfo)
2012 log_assert(repinfo && repinfo->c);
2013 if(repinfo->c->type == comm_udp) {
2014 if(repinfo->srctype)
2015 comm_point_send_udp_msg_if(repinfo->c,
2016 repinfo->c->buffer, (struct sockaddr*)&repinfo->addr,
2017 repinfo->addrlen, repinfo);
2019 comm_point_send_udp_msg(repinfo->c, repinfo->c->buffer,
2020 (struct sockaddr*)&repinfo->addr, repinfo->addrlen);
2022 if(repinfo->c->dtenv != NULL &&
2023 repinfo->c->dtenv->log_client_response_messages)
2024 dt_msg_send_client_response(repinfo->c->dtenv,
2025 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2029 if(repinfo->c->tcp_parent->dtenv != NULL &&
2030 repinfo->c->tcp_parent->dtenv->log_client_response_messages)
2031 dt_msg_send_client_response(repinfo->c->tcp_parent->dtenv,
2032 &repinfo->addr, repinfo->c->type, repinfo->c->buffer);
2034 comm_point_start_listening(repinfo->c, -1, TCP_QUERY_TIMEOUT);
2039 comm_point_drop_reply(struct comm_reply* repinfo)
2043 log_assert(repinfo && repinfo->c);
2044 log_assert(repinfo->c->type != comm_tcp_accept);
2045 if(repinfo->c->type == comm_udp)
2047 reclaim_tcp_handler(repinfo->c);
2051 comm_point_stop_listening(struct comm_point* c)
2053 verbose(VERB_ALGO, "comm point stop listening %d", c->fd);
2054 if(event_del(&c->ev->ev) != 0) {
2055 log_err("event_del error to stoplisten");
2060 comm_point_start_listening(struct comm_point* c, int newfd, int sec)
2062 verbose(VERB_ALGO, "comm point start listening %d",
2063 c->fd==-1?newfd:c->fd);
2064 if(c->type == comm_tcp_accept && !c->tcp_free) {
2065 /* no use to start listening no free slots. */
2068 if(sec != -1 && sec != 0) {
2070 c->timeout = (struct timeval*)malloc(sizeof(
2073 log_err("cpsl: malloc failed. No net read.");
2077 c->ev->ev.ev_events |= EV_TIMEOUT;
2078 #ifndef S_SPLINT_S /* splint fails on struct timeval. */
2079 c->timeout->tv_sec = sec;
2080 c->timeout->tv_usec = 0;
2081 #endif /* S_SPLINT_S */
2083 if(c->type == comm_tcp) {
2084 c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
2085 if(c->tcp_is_reading)
2086 c->ev->ev.ev_events |= EV_READ;
2087 else c->ev->ev.ev_events |= EV_WRITE;
2098 c->ev->ev.ev_fd = c->fd;
2100 if(event_add(&c->ev->ev, sec==0?NULL:c->timeout) != 0) {
2101 log_err("event_add failed. in cpsl.");
2105 void comm_point_listen_for_rw(struct comm_point* c, int rd, int wr)
2107 verbose(VERB_ALGO, "comm point listen_for_rw %d %d", c->fd, wr);
2108 if(event_del(&c->ev->ev) != 0) {
2109 log_err("event_del error to cplf");
2111 c->ev->ev.ev_events &= ~(EV_READ|EV_WRITE);
2112 if(rd) c->ev->ev.ev_events |= EV_READ;
2113 if(wr) c->ev->ev.ev_events |= EV_WRITE;
2114 if(event_add(&c->ev->ev, c->timeout) != 0) {
2115 log_err("event_add failed. in cplf.");
2119 size_t comm_point_get_mem(struct comm_point* c)
2124 s = sizeof(*c) + sizeof(*c->ev);
2126 s += sizeof(*c->timeout);
2127 if(c->type == comm_tcp || c->type == comm_local)
2128 s += sizeof(*c->buffer) + sldns_buffer_capacity(c->buffer);
2129 if(c->type == comm_tcp_accept) {
2131 for(i=0; i<c->max_tcp_count; i++)
2132 s += comm_point_get_mem(c->tcp_handlers[i]);
2138 comm_timer_create(struct comm_base* base, void (*cb)(void*), void* cb_arg)
2140 struct comm_timer *tm = (struct comm_timer*)calloc(1,
2141 sizeof(struct comm_timer));
2144 tm->ev_timer = (struct internal_timer*)calloc(1,
2145 sizeof(struct internal_timer));
2147 log_err("malloc failed");
2151 tm->ev_timer->base = base;
2153 tm->cb_arg = cb_arg;
2154 event_set(&tm->ev_timer->ev, -1, EV_TIMEOUT,
2155 comm_timer_callback, tm);
2156 if(event_base_set(base->eb->base, &tm->ev_timer->ev) != 0) {
2157 log_err("timer_create: event_base_set failed.");
2166 comm_timer_disable(struct comm_timer* timer)
2170 evtimer_del(&timer->ev_timer->ev);
2171 timer->ev_timer->enabled = 0;
2175 comm_timer_set(struct comm_timer* timer, struct timeval* tv)
2178 if(timer->ev_timer->enabled)
2179 comm_timer_disable(timer);
2180 event_set(&timer->ev_timer->ev, -1, EV_TIMEOUT,
2181 comm_timer_callback, timer);
2182 if(event_base_set(timer->ev_timer->base->eb->base,
2183 &timer->ev_timer->ev) != 0)
2184 log_err("comm_timer_set: set_base failed.");
2185 if(evtimer_add(&timer->ev_timer->ev, tv) != 0)
2186 log_err("comm_timer_set: evtimer_add failed.");
2187 timer->ev_timer->enabled = 1;
2191 comm_timer_delete(struct comm_timer* timer)
2195 comm_timer_disable(timer);
2196 free(timer->ev_timer);
2201 comm_timer_callback(int ATTR_UNUSED(fd), short event, void* arg)
2203 struct comm_timer* tm = (struct comm_timer*)arg;
2204 if(!(event&EV_TIMEOUT))
2206 comm_base_now(tm->ev_timer->base);
2207 tm->ev_timer->enabled = 0;
2208 fptr_ok(fptr_whitelist_comm_timer(tm->callback));
2209 (*tm->callback)(tm->cb_arg);
2213 comm_timer_is_set(struct comm_timer* timer)
2215 return (int)timer->ev_timer->enabled;
2219 comm_timer_get_mem(struct comm_timer* timer)
2221 return sizeof(*timer) + sizeof(struct internal_timer);
2225 comm_signal_create(struct comm_base* base,
2226 void (*callback)(int, void*), void* cb_arg)
2228 struct comm_signal* com = (struct comm_signal*)malloc(
2229 sizeof(struct comm_signal));
2231 log_err("malloc failed");
2235 com->callback = callback;
2236 com->cb_arg = cb_arg;
2237 com->ev_signal = NULL;
2242 comm_signal_callback(int sig, short event, void* arg)
2244 struct comm_signal* comsig = (struct comm_signal*)arg;
2245 if(!(event & EV_SIGNAL))
2247 comm_base_now(comsig->base);
2248 fptr_ok(fptr_whitelist_comm_signal(comsig->callback));
2249 (*comsig->callback)(sig, comsig->cb_arg);
2253 comm_signal_bind(struct comm_signal* comsig, int sig)
2255 struct internal_signal* entry = (struct internal_signal*)calloc(1,
2256 sizeof(struct internal_signal));
2258 log_err("malloc failed");
2262 /* add signal event */
2263 signal_set(&entry->ev, sig, comm_signal_callback, comsig);
2264 if(event_base_set(comsig->base->eb->base, &entry->ev) != 0) {
2265 log_err("Could not set signal base");
2269 if(signal_add(&entry->ev, NULL) != 0) {
2270 log_err("Could not add signal handler");
2274 /* link into list */
2275 entry->next = comsig->ev_signal;
2276 comsig->ev_signal = entry;
2281 comm_signal_delete(struct comm_signal* comsig)
2283 struct internal_signal* p, *np;
2286 p=comsig->ev_signal;