2 * unbound.c - unbound validating resolver public API implementation
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains functions to resolve DNS queries and
40 * validate the answers. Synchronously and asynchronously.
44 /* include the public api first, it should be able to stand alone */
45 #include "libunbound/unbound.h"
46 #include "libunbound/unbound-event.h"
49 #include "libunbound/context.h"
50 #include "libunbound/libworker.h"
51 #include "util/locks.h"
52 #include "util/config_file.h"
53 #include "util/alloc.h"
54 #include "util/module.h"
55 #include "util/regional.h"
57 #include "util/random.h"
58 #include "util/net_help.h"
59 #include "util/tube.h"
60 #include "util/ub_event.h"
61 #include "services/modstack.h"
62 #include "services/localzone.h"
63 #include "services/cache/infra.h"
64 #include "services/cache/rrset.h"
65 #include "services/authzone.h"
66 #include "sldns/sbuffer.h"
70 #ifdef HAVE_SYS_WAIT_H
77 #if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H)
80 #endif /* UB_ON_WINDOWS */
82 /** create context functionality, but no pipes */
83 static struct ub_ctx* ub_ctx_create_nopipe(void)
93 log_init(NULL, 0, NULL); /* logs to stderr */
94 log_ident_set("libunbound");
96 if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
97 log_err("could not init winsock. WSAStartup: %s",
102 verbosity = 0; /* errors only */
104 ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
109 alloc_init(&ctx->superalloc, NULL, 0);
110 seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
111 if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
112 explicit_bzero(&seed, sizeof(seed));
113 ub_randfree(ctx->seed_rnd);
118 explicit_bzero(&seed, sizeof(seed));
119 lock_basic_init(&ctx->qqpipe_lock);
120 lock_basic_init(&ctx->rrpipe_lock);
121 lock_basic_init(&ctx->cfglock);
122 ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
124 ub_randfree(ctx->seed_rnd);
129 ctx->env->cfg = config_create_forlib();
132 ub_randfree(ctx->seed_rnd);
137 /* init edns_known_options */
138 if(!edns_known_options_init(ctx->env)) {
139 config_delete(ctx->env->cfg);
141 ub_randfree(ctx->seed_rnd);
146 ctx->env->auth_zones = auth_zones_create();
147 if(!ctx->env->auth_zones) {
148 edns_known_options_delete(ctx->env);
149 config_delete(ctx->env->cfg);
151 ub_randfree(ctx->seed_rnd);
156 ctx->env->alloc = &ctx->superalloc;
157 ctx->env->worker = NULL;
158 ctx->env->need_to_validate = 0;
159 modstack_init(&ctx->mods);
160 rbtree_init(&ctx->queries, &context_query_cmp);
167 struct ub_ctx* ctx = ub_ctx_create_nopipe();
170 if((ctx->qq_pipe = tube_create()) == NULL) {
172 ub_randfree(ctx->seed_rnd);
173 config_delete(ctx->env->cfg);
174 modstack_desetup(&ctx->mods, ctx->env);
175 edns_known_options_delete(ctx->env);
181 if((ctx->rr_pipe = tube_create()) == NULL) {
183 tube_delete(ctx->qq_pipe);
184 ub_randfree(ctx->seed_rnd);
185 config_delete(ctx->env->cfg);
186 modstack_desetup(&ctx->mods, ctx->env);
187 edns_known_options_delete(ctx->env);
197 ub_ctx_create_ub_event(struct ub_event_base* ueb)
199 struct ub_ctx* ctx = ub_ctx_create_nopipe();
202 /* no pipes, but we have the locks to make sure everything works */
204 ctx->dothread = 1; /* the processing is in the same process,
205 makes ub_cancel and ub_ctx_delete do the right thing */
206 ctx->event_base = ueb;
211 ub_ctx_create_event(struct event_base* eb)
213 struct ub_ctx* ctx = ub_ctx_create_nopipe();
216 /* no pipes, but we have the locks to make sure everything works */
218 ctx->dothread = 1; /* the processing is in the same process,
219 makes ub_cancel and ub_ctx_delete do the right thing */
220 ctx->event_base = ub_libevent_event_base(eb);
221 if (!ctx->event_base) {
230 delq(rbnode_type* n, void* ATTR_UNUSED(arg))
232 struct ctx_query* q = (struct ctx_query*)n;
233 context_query_delete(q);
236 /** stop the bg thread */
237 static void ub_stop_bg(struct ub_ctx* ctx)
239 /* stop the bg thread */
240 lock_basic_lock(&ctx->cfglock);
241 if(ctx->created_bg) {
244 uint32_t cmd = UB_LIBCMD_QUIT;
245 lock_basic_unlock(&ctx->cfglock);
246 lock_basic_lock(&ctx->qqpipe_lock);
247 (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
248 (uint32_t)sizeof(cmd), 0);
249 lock_basic_unlock(&ctx->qqpipe_lock);
250 lock_basic_lock(&ctx->rrpipe_lock);
251 while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
252 /* discard all results except a quit confirm */
253 if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
259 lock_basic_unlock(&ctx->rrpipe_lock);
261 /* if bg worker is a thread, wait for it to exit, so that all
262 * resources are really gone. */
263 lock_basic_lock(&ctx->cfglock);
265 lock_basic_unlock(&ctx->cfglock);
266 ub_thread_join(ctx->bg_tid);
268 lock_basic_unlock(&ctx->cfglock);
269 #ifndef UB_ON_WINDOWS
270 if(waitpid(ctx->bg_pid, NULL, 0) == -1) {
272 log_err("waitpid: %s", strerror(errno));
278 lock_basic_unlock(&ctx->cfglock);
283 ub_ctx_delete(struct ub_ctx* ctx)
285 struct alloc_cache* a, *na;
289 /* see if bg thread is created and if threads have been killed */
290 /* no locks, because those may be held by terminated threads */
291 /* for processes the read pipe is closed and we see that on read */
293 if(ctx->created_bg && ctx->dothread) {
294 if(pthread_kill(ctx->bg_tid, 0) == ESRCH) {
295 /* thread has been killed */
299 #endif /* HAVE_PTHREAD */
302 libworker_delete_event(ctx->event_worker);
304 modstack_desetup(&ctx->mods, ctx->env);
308 a->super = &ctx->superalloc;
313 local_zones_delete(ctx->local_zones);
314 lock_basic_destroy(&ctx->qqpipe_lock);
315 lock_basic_destroy(&ctx->rrpipe_lock);
316 lock_basic_destroy(&ctx->cfglock);
317 tube_delete(ctx->qq_pipe);
318 tube_delete(ctx->rr_pipe);
320 slabhash_delete(ctx->env->msg_cache);
321 rrset_cache_delete(ctx->env->rrset_cache);
322 infra_delete(ctx->env->infra_cache);
323 config_delete(ctx->env->cfg);
324 edns_known_options_delete(ctx->env);
325 auth_zones_delete(ctx->env->auth_zones);
328 ub_randfree(ctx->seed_rnd);
329 alloc_clear(&ctx->superalloc);
330 traverse_postorder(&ctx->queries, delq, NULL);
338 ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val)
340 lock_basic_lock(&ctx->cfglock);
342 lock_basic_unlock(&ctx->cfglock);
343 return UB_AFTERFINAL;
345 if(!config_set_option(ctx->env->cfg, opt, val)) {
346 lock_basic_unlock(&ctx->cfglock);
349 lock_basic_unlock(&ctx->cfglock);
354 ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str)
357 lock_basic_lock(&ctx->cfglock);
358 r = config_get_option_collate(ctx->env->cfg, opt, str);
359 lock_basic_unlock(&ctx->cfglock);
360 if(r == 0) r = UB_NOERROR;
361 else if(r == 1) r = UB_SYNTAX;
362 else if(r == 2) r = UB_NOMEM;
367 ub_ctx_config(struct ub_ctx* ctx, const char* fname)
369 lock_basic_lock(&ctx->cfglock);
371 lock_basic_unlock(&ctx->cfglock);
372 return UB_AFTERFINAL;
374 if(!config_read(ctx->env->cfg, fname, NULL)) {
375 lock_basic_unlock(&ctx->cfglock);
378 lock_basic_unlock(&ctx->cfglock);
383 ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta)
385 char* dup = strdup(ta);
386 if(!dup) return UB_NOMEM;
387 lock_basic_lock(&ctx->cfglock);
389 lock_basic_unlock(&ctx->cfglock);
391 return UB_AFTERFINAL;
393 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) {
394 lock_basic_unlock(&ctx->cfglock);
397 lock_basic_unlock(&ctx->cfglock);
402 ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname)
404 char* dup = strdup(fname);
405 if(!dup) return UB_NOMEM;
406 lock_basic_lock(&ctx->cfglock);
408 lock_basic_unlock(&ctx->cfglock);
410 return UB_AFTERFINAL;
412 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) {
413 lock_basic_unlock(&ctx->cfglock);
416 lock_basic_unlock(&ctx->cfglock);
420 int ub_ctx_add_ta_autr(struct ub_ctx* ctx, const char* fname)
422 char* dup = strdup(fname);
423 if(!dup) return UB_NOMEM;
424 lock_basic_lock(&ctx->cfglock);
426 lock_basic_unlock(&ctx->cfglock);
428 return UB_AFTERFINAL;
430 if(!cfg_strlist_insert(&ctx->env->cfg->auto_trust_anchor_file_list,
432 lock_basic_unlock(&ctx->cfglock);
435 lock_basic_unlock(&ctx->cfglock);
440 ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname)
442 char* dup = strdup(fname);
443 if(!dup) return UB_NOMEM;
444 lock_basic_lock(&ctx->cfglock);
446 lock_basic_unlock(&ctx->cfglock);
448 return UB_AFTERFINAL;
450 if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) {
451 lock_basic_unlock(&ctx->cfglock);
454 lock_basic_unlock(&ctx->cfglock);
459 ub_ctx_debuglevel(struct ub_ctx* ctx, int d)
461 lock_basic_lock(&ctx->cfglock);
463 ctx->env->cfg->verbosity = d;
464 lock_basic_unlock(&ctx->cfglock);
468 int ub_ctx_debugout(struct ub_ctx* ctx, void* out)
470 lock_basic_lock(&ctx->cfglock);
471 log_file((FILE*)out);
472 ctx->logfile_override = 1;
474 lock_basic_unlock(&ctx->cfglock);
479 ub_ctx_async(struct ub_ctx* ctx, int dothread)
481 #ifdef THREADS_DISABLED
482 if(dothread) /* cannot do threading */
485 lock_basic_lock(&ctx->cfglock);
487 lock_basic_unlock(&ctx->cfglock);
488 return UB_AFTERFINAL;
490 ctx->dothread = dothread;
491 lock_basic_unlock(&ctx->cfglock);
496 ub_poll(struct ub_ctx* ctx)
498 /* no need to hold lock while testing for readability. */
499 return tube_poll(ctx->rr_pipe);
503 ub_fd(struct ub_ctx* ctx)
505 return tube_read_fd(ctx->rr_pipe);
508 /** process answer from bg worker */
510 process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len,
511 ub_callback_type* cb, void** cbarg, int* err,
512 struct ub_result** res)
515 if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) {
516 log_err("error: bad data from bg worker %d",
517 (int)context_serial_getcmd(msg, len));
521 lock_basic_lock(&ctx->cfglock);
522 q = context_deserialize_answer(ctx, msg, len, err);
524 lock_basic_unlock(&ctx->cfglock);
525 /* probably simply the lookup that failed, i.e.
526 * response returned before cancel was sent out, so noerror */
529 log_assert(q->async);
531 /* grab cb while locked */
541 ub_resolve_free(q->res);
543 /* parse the message, extract rcode, fill result */
544 sldns_buffer* buf = sldns_buffer_new(q->msg_len);
545 struct regional* region = regional_create();
547 (*res)->rcode = LDNS_RCODE_SERVFAIL;
549 sldns_buffer_clear(buf);
550 sldns_buffer_write(buf, q->msg, q->msg_len);
551 sldns_buffer_flip(buf);
552 libworker_enter_result(*res, buf, region,
555 (*res)->answer_packet = q->msg;
556 (*res)->answer_len = (int)q->msg_len;
558 sldns_buffer_free(buf);
559 regional_destroy(region);
562 /* delete the q from list */
563 (void)rbtree_delete(&ctx->queries, q->node.key);
565 context_query_delete(q);
566 lock_basic_unlock(&ctx->cfglock);
569 ub_resolve_free(*res);
573 /** process answer from bg worker */
575 process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len)
580 struct ub_result* res;
583 r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res);
585 /* no locks held while calling callback, so that library is
588 (*cb)(cbarg, err, res);
594 ub_process(struct ub_ctx* ctx)
601 lock_basic_lock(&ctx->rrpipe_lock);
602 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
603 lock_basic_unlock(&ctx->rrpipe_lock);
608 if(!process_answer(ctx, msg, len)) {
618 ub_wait(struct ub_ctx* ctx)
623 struct ub_result* res;
627 /* this is basically the same loop as _process(), but with changes.
628 * holds the rrpipe lock and waits with tube_wait */
630 lock_basic_lock(&ctx->rrpipe_lock);
631 lock_basic_lock(&ctx->cfglock);
632 if(ctx->num_async == 0) {
633 lock_basic_unlock(&ctx->cfglock);
634 lock_basic_unlock(&ctx->rrpipe_lock);
637 lock_basic_unlock(&ctx->cfglock);
639 /* keep rrpipe locked, while
640 * o waiting for pipe readable
642 * o possibly decrementing num_async
643 * do callback without lock
645 r = tube_wait(ctx->rr_pipe);
647 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
649 lock_basic_unlock(&ctx->rrpipe_lock);
653 lock_basic_unlock(&ctx->rrpipe_lock);
656 r = process_answer_detail(ctx, msg, len,
657 &cb, &cbarg, &err, &res);
658 lock_basic_unlock(&ctx->rrpipe_lock);
663 (*cb)(cbarg, err, res);
665 lock_basic_unlock(&ctx->rrpipe_lock);
672 ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype,
673 int rrclass, struct ub_result** result)
679 lock_basic_lock(&ctx->cfglock);
680 if(!ctx->finalized) {
681 r = context_finalize(ctx);
683 lock_basic_unlock(&ctx->cfglock);
687 /* create new ctx_query and attempt to add to the list */
688 lock_basic_unlock(&ctx->cfglock);
689 q = context_new(ctx, name, rrtype, rrclass, NULL, NULL, NULL);
692 /* become a resolver thread for a bit */
694 r = libworker_fg(ctx, q);
696 lock_basic_lock(&ctx->cfglock);
697 (void)rbtree_delete(&ctx->queries, q->node.key);
698 context_query_delete(q);
699 lock_basic_unlock(&ctx->cfglock);
702 q->res->answer_packet = q->msg;
703 q->res->answer_len = (int)q->msg_len;
708 lock_basic_lock(&ctx->cfglock);
709 (void)rbtree_delete(&ctx->queries, q->node.key);
710 context_query_delete(q);
711 lock_basic_unlock(&ctx->cfglock);
716 ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
717 int rrclass, void* mydata, ub_event_callback_type callback,
725 lock_basic_lock(&ctx->cfglock);
726 if(!ctx->finalized) {
727 int r = context_finalize(ctx);
729 lock_basic_unlock(&ctx->cfglock);
733 lock_basic_unlock(&ctx->cfglock);
734 if(!ctx->event_worker) {
735 ctx->event_worker = libworker_create_event(ctx,
737 if(!ctx->event_worker) {
742 /* set time in case answer comes from cache */
743 ub_comm_base_now(ctx->event_worker->base);
745 /* create new ctx_query and attempt to add to the list */
746 q = context_new(ctx, name, rrtype, rrclass, NULL, callback, mydata);
751 if((r=libworker_attach_mesh(ctx, q, async_id)) != 0)
758 ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype,
759 int rrclass, void* mydata, ub_callback_type callback, int* async_id)
767 lock_basic_lock(&ctx->cfglock);
768 if(!ctx->finalized) {
769 int r = context_finalize(ctx);
771 lock_basic_unlock(&ctx->cfglock);
775 if(!ctx->created_bg) {
778 lock_basic_unlock(&ctx->cfglock);
779 r = libworker_bg(ctx);
781 lock_basic_lock(&ctx->cfglock);
783 lock_basic_unlock(&ctx->cfglock);
787 lock_basic_unlock(&ctx->cfglock);
790 /* create new ctx_query and attempt to add to the list */
791 q = context_new(ctx, name, rrtype, rrclass, callback, NULL, mydata);
795 /* write over pipe to background worker */
796 lock_basic_lock(&ctx->cfglock);
797 msg = context_serialize_new_query(q, &len);
799 (void)rbtree_delete(&ctx->queries, q->node.key);
801 context_query_delete(q);
802 lock_basic_unlock(&ctx->cfglock);
806 *async_id = q->querynum;
807 lock_basic_unlock(&ctx->cfglock);
809 lock_basic_lock(&ctx->qqpipe_lock);
810 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
811 lock_basic_unlock(&ctx->qqpipe_lock);
815 lock_basic_unlock(&ctx->qqpipe_lock);
821 ub_cancel(struct ub_ctx* ctx, int async_id)
826 lock_basic_lock(&ctx->cfglock);
827 q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
828 if(!q || !q->async) {
829 /* it is not there, so nothing to do */
830 lock_basic_unlock(&ctx->cfglock);
833 log_assert(q->async);
837 if(!ctx->dothread) { /* if forked */
838 (void)rbtree_delete(&ctx->queries, q->node.key);
840 msg = context_serialize_cancel(q, &len);
841 context_query_delete(q);
842 lock_basic_unlock(&ctx->cfglock);
846 /* send cancel to background worker */
847 lock_basic_lock(&ctx->qqpipe_lock);
848 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
849 lock_basic_unlock(&ctx->qqpipe_lock);
853 lock_basic_unlock(&ctx->qqpipe_lock);
856 lock_basic_unlock(&ctx->cfglock);
862 ub_resolve_free(struct ub_result* result)
867 if(result->canonname != result->qname)
868 free(result->canonname);
870 for(p = result->data; *p; p++)
874 free(result->answer_packet);
875 free(result->why_bogus);
883 case UB_NOERROR: return "no error";
884 case UB_SOCKET: return "socket io error";
885 case UB_NOMEM: return "out of memory";
886 case UB_SYNTAX: return "syntax error";
887 case UB_SERVFAIL: return "server failure";
888 case UB_FORKFAIL: return "could not fork";
889 case UB_INITFAIL: return "initialization failure";
890 case UB_AFTERFINAL: return "setting change after finalize";
891 case UB_PIPE: return "error in pipe communication with async";
892 case UB_READFILE: return "error reading file";
893 case UB_NOID: return "error async_id does not exist";
894 default: return "unknown error";
899 ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr)
901 struct sockaddr_storage storage;
903 struct config_stub* s;
905 lock_basic_lock(&ctx->cfglock);
907 lock_basic_unlock(&ctx->cfglock);
909 return UB_AFTERFINAL;
912 /* disable fwd mode - the root stub should be first. */
913 if(ctx->env->cfg->forwards &&
914 strcmp(ctx->env->cfg->forwards->name, ".") == 0) {
915 s = ctx->env->cfg->forwards;
916 ctx->env->cfg->forwards = s->next;
920 lock_basic_unlock(&ctx->cfglock);
923 lock_basic_unlock(&ctx->cfglock);
925 /* check syntax for addr */
926 if(!extstrtoaddr(addr, &storage, &stlen)) {
931 /* it parses, add root stub in front of list */
932 lock_basic_lock(&ctx->cfglock);
933 if(!ctx->env->cfg->forwards ||
934 strcmp(ctx->env->cfg->forwards->name, ".") != 0) {
935 s = calloc(1, sizeof(*s));
937 lock_basic_unlock(&ctx->cfglock);
941 s->name = strdup(".");
944 lock_basic_unlock(&ctx->cfglock);
948 s->next = ctx->env->cfg->forwards;
949 ctx->env->cfg->forwards = s;
951 log_assert(ctx->env->cfg->forwards);
952 s = ctx->env->cfg->forwards;
956 lock_basic_unlock(&ctx->cfglock);
960 if(!cfg_strlist_insert(&s->addrs, dupl)) {
961 lock_basic_unlock(&ctx->cfglock);
965 lock_basic_unlock(&ctx->cfglock);
969 int ub_ctx_set_stub(struct ub_ctx* ctx, const char* zone, const char* addr,
973 struct config_stub **prev, *elem;
975 /* check syntax for zone name */
980 if(!parse_dname(zone, &nm, &nmlen, &nmlabs)) {
989 /* check syntax for addr (if not NULL) */
991 struct sockaddr_storage storage;
993 if(!extstrtoaddr(addr, &storage, &stlen)) {
999 lock_basic_lock(&ctx->cfglock);
1000 if(ctx->finalized) {
1001 lock_basic_unlock(&ctx->cfglock);
1003 return UB_AFTERFINAL;
1006 /* arguments all right, now find or add the stub */
1007 prev = &ctx->env->cfg->stubs;
1008 elem = cfg_stub_find(&prev, zone);
1009 if(!elem && !addr) {
1010 /* not found and we want to delete, nothing to do */
1011 lock_basic_unlock(&ctx->cfglock);
1013 } else if(elem && !addr) {
1014 /* found, and we want to delete */
1016 config_delstub(elem);
1017 lock_basic_unlock(&ctx->cfglock);
1020 /* not found, create the stub entry */
1021 elem=(struct config_stub*)calloc(1, sizeof(struct config_stub));
1022 if(elem) elem->name = strdup(zone);
1023 if(!elem || !elem->name) {
1025 lock_basic_unlock(&ctx->cfglock);
1029 elem->next = ctx->env->cfg->stubs;
1030 ctx->env->cfg->stubs = elem;
1033 /* add the address to the list and set settings */
1034 elem->isprime = isprime;
1037 lock_basic_unlock(&ctx->cfglock);
1041 if(!cfg_strlist_insert(&elem->addrs, a)) {
1042 lock_basic_unlock(&ctx->cfglock);
1046 lock_basic_unlock(&ctx->cfglock);
1051 ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname)
1060 #if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H)
1061 fname = "/etc/resolv.conf";
1064 ULONG buflen = sizeof(*info);
1065 IP_ADDR_STRING *ptr;
1067 info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO));
1071 if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) {
1073 info = (FIXED_INFO *) malloc(buflen);
1078 if (GetNetworkParams(info, &buflen) == NO_ERROR) {
1080 ptr = &(info->DnsServerList);
1083 if((retval=ub_ctx_set_fwd(ctx,
1084 ptr->IpAddress.String))!=0) {
1097 #endif /* WINDOWS */
1099 in = fopen(fname, "r");
1101 /* error in errno! perror(fname) */
1104 while(fgets(buf, (int)sizeof(buf), in)) {
1105 buf[sizeof(buf)-1] = 0;
1107 while(*parse == ' ' || *parse == '\t')
1109 if(strncmp(parse, "nameserver", 10) == 0) {
1111 parse += 10; /* skip 'nameserver' */
1112 /* skip whitespace */
1113 while(*parse == ' ' || *parse == '\t')
1116 /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */
1117 while(isxdigit((unsigned char)*parse) || *parse=='.' || *parse==':')
1119 /* terminate after the address, remove newline */
1122 if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) {
1130 /* from resolv.conf(5) if none given, use localhost */
1131 return ub_ctx_set_fwd(ctx, "127.0.0.1");
1137 ub_ctx_hosts(struct ub_ctx* ctx, const char* fname)
1140 char buf[1024], ldata[1024];
1141 char* parse, *addr, *name, *ins;
1142 lock_basic_lock(&ctx->cfglock);
1143 if(ctx->finalized) {
1144 lock_basic_unlock(&ctx->cfglock);
1146 return UB_AFTERFINAL;
1148 lock_basic_unlock(&ctx->cfglock);
1150 #if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H)
1152 * If this is Windows NT/XP/2K it's in
1153 * %WINDIR%\system32\drivers\etc\hosts.
1154 * If this is Windows 95/98/Me it's in %WINDIR%\hosts.
1156 name = getenv("WINDIR");
1159 snprintf(buf, sizeof(buf), "%s%s", name,
1160 "\\system32\\drivers\\etc\\hosts");
1161 if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) {
1162 snprintf(buf, sizeof(buf), "%s%s", name,
1164 retval=ub_ctx_hosts(ctx, buf);
1170 fname = "/etc/hosts";
1173 in = fopen(fname, "r");
1175 /* error in errno! perror(fname) */
1178 while(fgets(buf, (int)sizeof(buf), in)) {
1179 buf[sizeof(buf)-1] = 0;
1181 while(*parse == ' ' || *parse == '\t')
1184 continue; /* skip comment */
1185 /* format: <addr> spaces <name> spaces <name> ... */
1188 while(isxdigit((unsigned char)*parse) || *parse == '.' || *parse == ':')
1192 if(*parse == '\n' || *parse == 0)
1195 continue; /* ignore macOSX fe80::1%lo0 localhost */
1196 if(*parse != ' ' && *parse != '\t') {
1197 /* must have whitespace after address */
1202 *parse++ = 0; /* end delimiter for addr ... */
1203 /* go to names and add them */
1205 while(*parse == ' ' || *parse == '\t' || *parse=='\n'
1208 if(*parse == 0 || *parse == '#')
1210 /* skip name, allows (too) many printable characters */
1212 while('!' <= *parse && *parse <= '~')
1215 *parse++ = 0; /* end delimiter for name */
1216 snprintf(ldata, sizeof(ldata), "%s %s %s",
1217 name, str_is_ip6(addr)?"AAAA":"A", addr);
1218 ins = strdup(ldata);
1225 lock_basic_lock(&ctx->cfglock);
1226 if(!cfg_strlist_insert(&ctx->env->cfg->local_data,
1228 lock_basic_unlock(&ctx->cfglock);
1233 lock_basic_unlock(&ctx->cfglock);
1240 /** finalize the context, if not already finalized */
1241 static int ub_ctx_finalize(struct ub_ctx* ctx)
1244 lock_basic_lock(&ctx->cfglock);
1245 if (!ctx->finalized) {
1246 res = context_finalize(ctx);
1248 lock_basic_unlock(&ctx->cfglock);
1252 /* Print local zones and RR data */
1253 int ub_ctx_print_local_zones(struct ub_ctx* ctx)
1255 int res = ub_ctx_finalize(ctx);
1256 if (res) return res;
1258 local_zones_print(ctx->local_zones);
1263 /* Add a new zone */
1264 int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name,
1265 const char *zone_type)
1267 enum localzone_type t;
1268 struct local_zone* z;
1273 int res = ub_ctx_finalize(ctx);
1274 if (res) return res;
1276 if(!local_zone_str2type(zone_type, &t)) {
1280 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1284 lock_rw_wrlock(&ctx->local_zones->lock);
1285 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1286 LDNS_RR_CLASS_IN))) {
1287 /* already present in tree */
1288 lock_rw_wrlock(&z->lock);
1289 z->type = t; /* update type anyway */
1290 lock_rw_unlock(&z->lock);
1291 lock_rw_unlock(&ctx->local_zones->lock);
1295 if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
1296 LDNS_RR_CLASS_IN, t)) {
1297 lock_rw_unlock(&ctx->local_zones->lock);
1300 lock_rw_unlock(&ctx->local_zones->lock);
1305 int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name)
1307 struct local_zone* z;
1312 int res = ub_ctx_finalize(ctx);
1313 if (res) return res;
1315 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1319 lock_rw_wrlock(&ctx->local_zones->lock);
1320 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1321 LDNS_RR_CLASS_IN))) {
1322 /* present in tree */
1323 local_zones_del_zone(ctx->local_zones, z);
1325 lock_rw_unlock(&ctx->local_zones->lock);
1330 /* Add new RR data */
1331 int ub_ctx_data_add(struct ub_ctx* ctx, const char *data)
1333 int res = ub_ctx_finalize(ctx);
1334 if (res) return res;
1336 res = local_zones_add_RR(ctx->local_zones, data);
1337 return (!res) ? UB_NOMEM : UB_NOERROR;
1340 /* Remove RR data */
1341 int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data)
1346 int res = ub_ctx_finalize(ctx);
1347 if (res) return res;
1349 if(!parse_dname(data, &nm, &nmlen, &nmlabs))
1352 local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs,
1359 const char* ub_version(void)
1361 return PACKAGE_VERSION;
1365 ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) {
1366 struct ub_event_base* new_base;
1368 if (!ctx || !ctx->event_base || !base) {
1371 if (ub_libevent_get_event_base(ctx->event_base) == base) {
1376 lock_basic_lock(&ctx->cfglock);
1377 /* destroy the current worker - safe to pass in NULL */
1378 libworker_delete_event(ctx->event_worker);
1379 ctx->event_worker = NULL;
1380 new_base = ub_libevent_event_base(base);
1382 ctx->event_base = new_base;
1383 ctx->created_bg = 0;
1385 lock_basic_unlock(&ctx->cfglock);
1386 return new_base ? UB_NOERROR : UB_INITFAIL;