2 * unbound.c - unbound validating resolver public API implementation
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains functions to resolve DNS queries and
40 * validate the answers. Synchonously and asynchronously.
44 /* include the public api first, it should be able to stand alone */
45 #include "libunbound/unbound.h"
46 #include "libunbound/unbound-event.h"
49 #include "libunbound/context.h"
50 #include "libunbound/libworker.h"
51 #include "util/locks.h"
52 #include "util/config_file.h"
53 #include "util/alloc.h"
54 #include "util/module.h"
55 #include "util/regional.h"
57 #include "util/random.h"
58 #include "util/net_help.h"
59 #include "util/tube.h"
60 #include "services/modstack.h"
61 #include "services/localzone.h"
62 #include "services/cache/infra.h"
63 #include "services/cache/rrset.h"
64 #include "sldns/sbuffer.h"
68 #ifdef HAVE_SYS_WAIT_H
75 #if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H)
78 #endif /* UB_ON_WINDOWS */
80 /** create context functionality, but no pipes */
81 static struct ub_ctx* ub_ctx_create_nopipe(void)
90 log_init(NULL, 0, NULL); /* logs to stderr */
91 log_ident_set("libunbound");
93 if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
94 log_err("could not init winsock. WSAStartup: %s",
99 verbosity = 0; /* errors only */
101 ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
106 alloc_init(&ctx->superalloc, NULL, 0);
107 seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
108 if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
110 ub_randfree(ctx->seed_rnd);
116 lock_basic_init(&ctx->qqpipe_lock);
117 lock_basic_init(&ctx->rrpipe_lock);
118 lock_basic_init(&ctx->cfglock);
119 ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
121 ub_randfree(ctx->seed_rnd);
126 ctx->env->cfg = config_create_forlib();
129 ub_randfree(ctx->seed_rnd);
134 ctx->env->alloc = &ctx->superalloc;
135 ctx->env->worker = NULL;
136 ctx->env->need_to_validate = 0;
137 modstack_init(&ctx->mods);
138 rbtree_init(&ctx->queries, &context_query_cmp);
145 struct ub_ctx* ctx = ub_ctx_create_nopipe();
148 if((ctx->qq_pipe = tube_create()) == NULL) {
150 ub_randfree(ctx->seed_rnd);
151 config_delete(ctx->env->cfg);
152 modstack_desetup(&ctx->mods, ctx->env);
158 if((ctx->rr_pipe = tube_create()) == NULL) {
160 tube_delete(ctx->qq_pipe);
161 ub_randfree(ctx->seed_rnd);
162 config_delete(ctx->env->cfg);
163 modstack_desetup(&ctx->mods, ctx->env);
173 ub_ctx_create_event(struct event_base* eb)
175 struct ub_ctx* ctx = ub_ctx_create_nopipe();
178 /* no pipes, but we have the locks to make sure everything works */
180 ctx->dothread = 1; /* the processing is in the same process,
181 makes ub_cancel and ub_ctx_delete do the right thing */
182 ctx->event_base = eb;
188 delq(rbnode_t* n, void* ATTR_UNUSED(arg))
190 struct ctx_query* q = (struct ctx_query*)n;
191 context_query_delete(q);
194 /** stop the bg thread */
195 static void ub_stop_bg(struct ub_ctx* ctx)
197 /* stop the bg thread */
198 lock_basic_lock(&ctx->cfglock);
199 if(ctx->created_bg) {
202 uint32_t cmd = UB_LIBCMD_QUIT;
203 lock_basic_unlock(&ctx->cfglock);
204 lock_basic_lock(&ctx->qqpipe_lock);
205 (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
206 (uint32_t)sizeof(cmd), 0);
207 lock_basic_unlock(&ctx->qqpipe_lock);
208 lock_basic_lock(&ctx->rrpipe_lock);
209 while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
210 /* discard all results except a quit confirm */
211 if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
217 lock_basic_unlock(&ctx->rrpipe_lock);
219 /* if bg worker is a thread, wait for it to exit, so that all
220 * resources are really gone. */
221 lock_basic_lock(&ctx->cfglock);
223 lock_basic_unlock(&ctx->cfglock);
224 ub_thread_join(ctx->bg_tid);
226 lock_basic_unlock(&ctx->cfglock);
227 #ifndef UB_ON_WINDOWS
228 if(waitpid(ctx->bg_pid, NULL, 0) == -1) {
230 log_err("waitpid: %s", strerror(errno));
236 lock_basic_unlock(&ctx->cfglock);
241 ub_ctx_delete(struct ub_ctx* ctx)
243 struct alloc_cache* a, *na;
247 /* see if bg thread is created and if threads have been killed */
248 /* no locks, because those may be held by terminated threads */
249 /* for processes the read pipe is closed and we see that on read */
251 if(ctx->created_bg && ctx->dothread) {
252 if(pthread_kill(ctx->bg_tid, 0) == ESRCH) {
253 /* thread has been killed */
257 #endif /* HAVE_PTHREAD */
260 libworker_delete_event(ctx->event_worker);
262 modstack_desetup(&ctx->mods, ctx->env);
266 a->super = &ctx->superalloc;
271 local_zones_delete(ctx->local_zones);
272 lock_basic_destroy(&ctx->qqpipe_lock);
273 lock_basic_destroy(&ctx->rrpipe_lock);
274 lock_basic_destroy(&ctx->cfglock);
275 tube_delete(ctx->qq_pipe);
276 tube_delete(ctx->rr_pipe);
278 slabhash_delete(ctx->env->msg_cache);
279 rrset_cache_delete(ctx->env->rrset_cache);
280 infra_delete(ctx->env->infra_cache);
281 config_delete(ctx->env->cfg);
284 ub_randfree(ctx->seed_rnd);
285 alloc_clear(&ctx->superalloc);
286 traverse_postorder(&ctx->queries, delq, NULL);
294 ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val)
296 lock_basic_lock(&ctx->cfglock);
298 lock_basic_unlock(&ctx->cfglock);
299 return UB_AFTERFINAL;
301 if(!config_set_option(ctx->env->cfg, opt, val)) {
302 lock_basic_unlock(&ctx->cfglock);
305 lock_basic_unlock(&ctx->cfglock);
310 ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str)
313 lock_basic_lock(&ctx->cfglock);
314 r = config_get_option_collate(ctx->env->cfg, opt, str);
315 lock_basic_unlock(&ctx->cfglock);
316 if(r == 0) r = UB_NOERROR;
317 else if(r == 1) r = UB_SYNTAX;
318 else if(r == 2) r = UB_NOMEM;
323 ub_ctx_config(struct ub_ctx* ctx, const char* fname)
325 lock_basic_lock(&ctx->cfglock);
327 lock_basic_unlock(&ctx->cfglock);
328 return UB_AFTERFINAL;
330 if(!config_read(ctx->env->cfg, fname, NULL)) {
331 lock_basic_unlock(&ctx->cfglock);
334 lock_basic_unlock(&ctx->cfglock);
339 ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta)
341 char* dup = strdup(ta);
342 if(!dup) return UB_NOMEM;
343 lock_basic_lock(&ctx->cfglock);
345 lock_basic_unlock(&ctx->cfglock);
347 return UB_AFTERFINAL;
349 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) {
350 lock_basic_unlock(&ctx->cfglock);
354 lock_basic_unlock(&ctx->cfglock);
359 ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname)
361 char* dup = strdup(fname);
362 if(!dup) return UB_NOMEM;
363 lock_basic_lock(&ctx->cfglock);
365 lock_basic_unlock(&ctx->cfglock);
367 return UB_AFTERFINAL;
369 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) {
370 lock_basic_unlock(&ctx->cfglock);
374 lock_basic_unlock(&ctx->cfglock);
378 int ub_ctx_add_ta_autr(struct ub_ctx* ctx, const char* fname)
380 char* dup = strdup(fname);
381 if(!dup) return UB_NOMEM;
382 lock_basic_lock(&ctx->cfglock);
384 lock_basic_unlock(&ctx->cfglock);
386 return UB_AFTERFINAL;
388 if(!cfg_strlist_insert(&ctx->env->cfg->auto_trust_anchor_file_list,
390 lock_basic_unlock(&ctx->cfglock);
394 lock_basic_unlock(&ctx->cfglock);
399 ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname)
401 char* dup = strdup(fname);
402 if(!dup) return UB_NOMEM;
403 lock_basic_lock(&ctx->cfglock);
405 lock_basic_unlock(&ctx->cfglock);
407 return UB_AFTERFINAL;
409 if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) {
410 lock_basic_unlock(&ctx->cfglock);
414 lock_basic_unlock(&ctx->cfglock);
419 ub_ctx_debuglevel(struct ub_ctx* ctx, int d)
421 lock_basic_lock(&ctx->cfglock);
423 ctx->env->cfg->verbosity = d;
424 lock_basic_unlock(&ctx->cfglock);
428 int ub_ctx_debugout(struct ub_ctx* ctx, void* out)
430 lock_basic_lock(&ctx->cfglock);
431 log_file((FILE*)out);
432 ctx->logfile_override = 1;
434 lock_basic_unlock(&ctx->cfglock);
439 ub_ctx_async(struct ub_ctx* ctx, int dothread)
441 #ifdef THREADS_DISABLED
442 if(dothread) /* cannot do threading */
445 lock_basic_lock(&ctx->cfglock);
447 lock_basic_unlock(&ctx->cfglock);
448 return UB_AFTERFINAL;
450 ctx->dothread = dothread;
451 lock_basic_unlock(&ctx->cfglock);
456 ub_poll(struct ub_ctx* ctx)
458 /* no need to hold lock while testing for readability. */
459 return tube_poll(ctx->rr_pipe);
463 ub_fd(struct ub_ctx* ctx)
465 return tube_read_fd(ctx->rr_pipe);
468 /** process answer from bg worker */
470 process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len,
471 ub_callback_t* cb, void** cbarg, int* err,
472 struct ub_result** res)
475 if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) {
476 log_err("error: bad data from bg worker %d",
477 (int)context_serial_getcmd(msg, len));
481 lock_basic_lock(&ctx->cfglock);
482 q = context_deserialize_answer(ctx, msg, len, err);
484 lock_basic_unlock(&ctx->cfglock);
485 /* probably simply the lookup that failed, i.e.
486 * response returned before cancel was sent out, so noerror */
489 log_assert(q->async);
491 /* grab cb while locked */
501 ub_resolve_free(q->res);
503 /* parse the message, extract rcode, fill result */
504 sldns_buffer* buf = sldns_buffer_new(q->msg_len);
505 struct regional* region = regional_create();
507 (*res)->rcode = LDNS_RCODE_SERVFAIL;
509 sldns_buffer_clear(buf);
510 sldns_buffer_write(buf, q->msg, q->msg_len);
511 sldns_buffer_flip(buf);
512 libworker_enter_result(*res, buf, region,
515 (*res)->answer_packet = q->msg;
516 (*res)->answer_len = (int)q->msg_len;
518 sldns_buffer_free(buf);
519 regional_destroy(region);
522 /* delete the q from list */
523 (void)rbtree_delete(&ctx->queries, q->node.key);
525 context_query_delete(q);
526 lock_basic_unlock(&ctx->cfglock);
529 ub_resolve_free(*res);
533 /** process answer from bg worker */
535 process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len)
540 struct ub_result* res;
543 r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res);
545 /* no locks held while calling callback, so that library is
548 (*cb)(cbarg, err, res);
554 ub_process(struct ub_ctx* ctx)
561 lock_basic_lock(&ctx->rrpipe_lock);
562 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
563 lock_basic_unlock(&ctx->rrpipe_lock);
568 if(!process_answer(ctx, msg, len)) {
578 ub_wait(struct ub_ctx* ctx)
583 struct ub_result* res;
587 /* this is basically the same loop as _process(), but with changes.
588 * holds the rrpipe lock and waits with tube_wait */
590 lock_basic_lock(&ctx->rrpipe_lock);
591 lock_basic_lock(&ctx->cfglock);
592 if(ctx->num_async == 0) {
593 lock_basic_unlock(&ctx->cfglock);
594 lock_basic_unlock(&ctx->rrpipe_lock);
597 lock_basic_unlock(&ctx->cfglock);
599 /* keep rrpipe locked, while
600 * o waiting for pipe readable
602 * o possibly decrementing num_async
603 * do callback without lock
605 r = tube_wait(ctx->rr_pipe);
607 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
609 lock_basic_unlock(&ctx->rrpipe_lock);
613 lock_basic_unlock(&ctx->rrpipe_lock);
616 r = process_answer_detail(ctx, msg, len,
617 &cb, &cbarg, &err, &res);
618 lock_basic_unlock(&ctx->rrpipe_lock);
623 (*cb)(cbarg, err, res);
625 lock_basic_unlock(&ctx->rrpipe_lock);
632 ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype,
633 int rrclass, struct ub_result** result)
639 lock_basic_lock(&ctx->cfglock);
640 if(!ctx->finalized) {
641 r = context_finalize(ctx);
643 lock_basic_unlock(&ctx->cfglock);
647 /* create new ctx_query and attempt to add to the list */
648 lock_basic_unlock(&ctx->cfglock);
649 q = context_new(ctx, name, rrtype, rrclass, NULL, NULL);
652 /* become a resolver thread for a bit */
654 r = libworker_fg(ctx, q);
656 lock_basic_lock(&ctx->cfglock);
657 (void)rbtree_delete(&ctx->queries, q->node.key);
658 context_query_delete(q);
659 lock_basic_unlock(&ctx->cfglock);
662 q->res->answer_packet = q->msg;
663 q->res->answer_len = (int)q->msg_len;
668 lock_basic_lock(&ctx->cfglock);
669 (void)rbtree_delete(&ctx->queries, q->node.key);
670 context_query_delete(q);
671 lock_basic_unlock(&ctx->cfglock);
676 ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
677 int rrclass, void* mydata, ub_event_callback_t callback, int* async_id)
684 lock_basic_lock(&ctx->cfglock);
685 if(!ctx->finalized) {
686 int r = context_finalize(ctx);
688 lock_basic_unlock(&ctx->cfglock);
692 lock_basic_unlock(&ctx->cfglock);
693 if(!ctx->event_worker) {
694 ctx->event_worker = libworker_create_event(ctx,
696 if(!ctx->event_worker) {
701 /* create new ctx_query and attempt to add to the list */
702 q = context_new(ctx, name, rrtype, rrclass, (ub_callback_t)callback,
708 if((r=libworker_attach_mesh(ctx, q, async_id)) != 0)
715 ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype,
716 int rrclass, void* mydata, ub_callback_t callback, int* async_id)
724 lock_basic_lock(&ctx->cfglock);
725 if(!ctx->finalized) {
726 int r = context_finalize(ctx);
728 lock_basic_unlock(&ctx->cfglock);
732 if(!ctx->created_bg) {
735 lock_basic_unlock(&ctx->cfglock);
736 r = libworker_bg(ctx);
738 lock_basic_lock(&ctx->cfglock);
740 lock_basic_unlock(&ctx->cfglock);
744 lock_basic_unlock(&ctx->cfglock);
747 /* create new ctx_query and attempt to add to the list */
748 q = context_new(ctx, name, rrtype, rrclass, callback, mydata);
752 /* write over pipe to background worker */
753 lock_basic_lock(&ctx->cfglock);
754 msg = context_serialize_new_query(q, &len);
756 (void)rbtree_delete(&ctx->queries, q->node.key);
758 context_query_delete(q);
759 lock_basic_unlock(&ctx->cfglock);
763 *async_id = q->querynum;
764 lock_basic_unlock(&ctx->cfglock);
766 lock_basic_lock(&ctx->qqpipe_lock);
767 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
768 lock_basic_unlock(&ctx->qqpipe_lock);
772 lock_basic_unlock(&ctx->qqpipe_lock);
778 ub_cancel(struct ub_ctx* ctx, int async_id)
783 lock_basic_lock(&ctx->cfglock);
784 q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
785 if(!q || !q->async) {
786 /* it is not there, so nothing to do */
787 lock_basic_unlock(&ctx->cfglock);
790 log_assert(q->async);
794 if(!ctx->dothread) { /* if forked */
795 (void)rbtree_delete(&ctx->queries, q->node.key);
797 msg = context_serialize_cancel(q, &len);
798 context_query_delete(q);
799 lock_basic_unlock(&ctx->cfglock);
803 /* send cancel to background worker */
804 lock_basic_lock(&ctx->qqpipe_lock);
805 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
806 lock_basic_unlock(&ctx->qqpipe_lock);
810 lock_basic_unlock(&ctx->qqpipe_lock);
813 lock_basic_unlock(&ctx->cfglock);
819 ub_resolve_free(struct ub_result* result)
824 if(result->canonname != result->qname)
825 free(result->canonname);
827 for(p = result->data; *p; p++)
831 free(result->answer_packet);
832 free(result->why_bogus);
840 case UB_NOERROR: return "no error";
841 case UB_SOCKET: return "socket io error";
842 case UB_NOMEM: return "out of memory";
843 case UB_SYNTAX: return "syntax error";
844 case UB_SERVFAIL: return "server failure";
845 case UB_FORKFAIL: return "could not fork";
846 case UB_INITFAIL: return "initialization failure";
847 case UB_AFTERFINAL: return "setting change after finalize";
848 case UB_PIPE: return "error in pipe communication with async";
849 case UB_READFILE: return "error reading file";
850 case UB_NOID: return "error async_id does not exist";
851 default: return "unknown error";
856 ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr)
858 struct sockaddr_storage storage;
860 struct config_stub* s;
862 lock_basic_lock(&ctx->cfglock);
864 lock_basic_unlock(&ctx->cfglock);
866 return UB_AFTERFINAL;
869 /* disable fwd mode - the root stub should be first. */
870 if(ctx->env->cfg->forwards &&
871 strcmp(ctx->env->cfg->forwards->name, ".") == 0) {
872 s = ctx->env->cfg->forwards;
873 ctx->env->cfg->forwards = s->next;
877 lock_basic_unlock(&ctx->cfglock);
880 lock_basic_unlock(&ctx->cfglock);
882 /* check syntax for addr */
883 if(!extstrtoaddr(addr, &storage, &stlen)) {
888 /* it parses, add root stub in front of list */
889 lock_basic_lock(&ctx->cfglock);
890 if(!ctx->env->cfg->forwards ||
891 strcmp(ctx->env->cfg->forwards->name, ".") != 0) {
892 s = calloc(1, sizeof(*s));
894 lock_basic_unlock(&ctx->cfglock);
898 s->name = strdup(".");
901 lock_basic_unlock(&ctx->cfglock);
905 s->next = ctx->env->cfg->forwards;
906 ctx->env->cfg->forwards = s;
908 log_assert(ctx->env->cfg->forwards);
909 s = ctx->env->cfg->forwards;
913 lock_basic_unlock(&ctx->cfglock);
917 if(!cfg_strlist_insert(&s->addrs, dupl)) {
919 lock_basic_unlock(&ctx->cfglock);
923 lock_basic_unlock(&ctx->cfglock);
928 ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname)
937 #if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H)
938 fname = "/etc/resolv.conf";
941 ULONG buflen = sizeof(*info);
944 info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO));
948 if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) {
950 info = (FIXED_INFO *) malloc(buflen);
955 if (GetNetworkParams(info, &buflen) == NO_ERROR) {
957 ptr = &(info->DnsServerList);
960 if((retval=ub_ctx_set_fwd(ctx,
961 ptr->IpAddress.String))!=0) {
976 in = fopen(fname, "r");
978 /* error in errno! perror(fname) */
981 while(fgets(buf, (int)sizeof(buf), in)) {
982 buf[sizeof(buf)-1] = 0;
984 while(*parse == ' ' || *parse == '\t')
986 if(strncmp(parse, "nameserver", 10) == 0) {
988 parse += 10; /* skip 'nameserver' */
989 /* skip whitespace */
990 while(*parse == ' ' || *parse == '\t')
993 /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */
994 while(isxdigit((unsigned char)*parse) || *parse=='.' || *parse==':')
996 /* terminate after the address, remove newline */
999 if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) {
1007 /* from resolv.conf(5) if none given, use localhost */
1008 return ub_ctx_set_fwd(ctx, "127.0.0.1");
1014 ub_ctx_hosts(struct ub_ctx* ctx, const char* fname)
1017 char buf[1024], ldata[1024];
1018 char* parse, *addr, *name, *ins;
1019 lock_basic_lock(&ctx->cfglock);
1020 if(ctx->finalized) {
1021 lock_basic_unlock(&ctx->cfglock);
1023 return UB_AFTERFINAL;
1025 lock_basic_unlock(&ctx->cfglock);
1027 #if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H)
1029 * If this is Windows NT/XP/2K it's in
1030 * %WINDIR%\system32\drivers\etc\hosts.
1031 * If this is Windows 95/98/Me it's in %WINDIR%\hosts.
1033 name = getenv("WINDIR");
1036 snprintf(buf, sizeof(buf), "%s%s", name,
1037 "\\system32\\drivers\\etc\\hosts");
1038 if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) {
1039 snprintf(buf, sizeof(buf), "%s%s", name,
1041 retval=ub_ctx_hosts(ctx, buf);
1047 fname = "/etc/hosts";
1050 in = fopen(fname, "r");
1052 /* error in errno! perror(fname) */
1055 while(fgets(buf, (int)sizeof(buf), in)) {
1056 buf[sizeof(buf)-1] = 0;
1058 while(*parse == ' ' || *parse == '\t')
1061 continue; /* skip comment */
1062 /* format: <addr> spaces <name> spaces <name> ... */
1065 while(isxdigit((unsigned char)*parse) || *parse == '.' || *parse == ':')
1069 if(*parse == '\n' || *parse == 0)
1072 continue; /* ignore macOSX fe80::1%lo0 localhost */
1073 if(*parse != ' ' && *parse != '\t') {
1074 /* must have whitespace after address */
1079 *parse++ = 0; /* end delimiter for addr ... */
1080 /* go to names and add them */
1082 while(*parse == ' ' || *parse == '\t' || *parse=='\n'
1085 if(*parse == 0 || *parse == '#')
1087 /* skip name, allows (too) many printable characters */
1089 while('!' <= *parse && *parse <= '~')
1092 *parse++ = 0; /* end delimiter for name */
1093 snprintf(ldata, sizeof(ldata), "%s %s %s",
1094 name, str_is_ip6(addr)?"AAAA":"A", addr);
1095 ins = strdup(ldata);
1102 lock_basic_lock(&ctx->cfglock);
1103 if(!cfg_strlist_insert(&ctx->env->cfg->local_data,
1105 lock_basic_unlock(&ctx->cfglock);
1111 lock_basic_unlock(&ctx->cfglock);
1118 /** finalize the context, if not already finalized */
1119 static int ub_ctx_finalize(struct ub_ctx* ctx)
1122 lock_basic_lock(&ctx->cfglock);
1123 if (!ctx->finalized) {
1124 res = context_finalize(ctx);
1126 lock_basic_unlock(&ctx->cfglock);
1130 /* Print local zones and RR data */
1131 int ub_ctx_print_local_zones(struct ub_ctx* ctx)
1133 int res = ub_ctx_finalize(ctx);
1134 if (res) return res;
1136 local_zones_print(ctx->local_zones);
1141 /* Add a new zone */
1142 int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name,
1143 const char *zone_type)
1145 enum localzone_type t;
1146 struct local_zone* z;
1151 int res = ub_ctx_finalize(ctx);
1152 if (res) return res;
1154 if(!local_zone_str2type(zone_type, &t)) {
1158 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1162 lock_rw_wrlock(&ctx->local_zones->lock);
1163 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1164 LDNS_RR_CLASS_IN))) {
1165 /* already present in tree */
1166 lock_rw_wrlock(&z->lock);
1167 z->type = t; /* update type anyway */
1168 lock_rw_unlock(&z->lock);
1169 lock_rw_unlock(&ctx->local_zones->lock);
1173 if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
1174 LDNS_RR_CLASS_IN, t)) {
1175 lock_rw_unlock(&ctx->local_zones->lock);
1178 lock_rw_unlock(&ctx->local_zones->lock);
1183 int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name)
1185 struct local_zone* z;
1190 int res = ub_ctx_finalize(ctx);
1191 if (res) return res;
1193 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1197 lock_rw_wrlock(&ctx->local_zones->lock);
1198 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1199 LDNS_RR_CLASS_IN))) {
1200 /* present in tree */
1201 local_zones_del_zone(ctx->local_zones, z);
1203 lock_rw_unlock(&ctx->local_zones->lock);
1208 /* Add new RR data */
1209 int ub_ctx_data_add(struct ub_ctx* ctx, const char *data)
1211 int res = ub_ctx_finalize(ctx);
1212 if (res) return res;
1214 res = local_zones_add_RR(ctx->local_zones, data);
1215 return (!res) ? UB_NOMEM : UB_NOERROR;
1218 /* Remove RR data */
1219 int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data)
1224 int res = ub_ctx_finalize(ctx);
1225 if (res) return res;
1227 if(!parse_dname(data, &nm, &nmlen, &nmlabs))
1230 local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs,
1237 const char* ub_version(void)
1239 return PACKAGE_VERSION;
1243 ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) {
1244 if (!ctx || !ctx->event_base || !base) {
1247 if (ctx->event_base == base) {
1252 lock_basic_lock(&ctx->cfglock);
1253 /* destroy the current worker - safe to pass in NULL */
1254 libworker_delete_event(ctx->event_worker);
1255 ctx->event_worker = NULL;
1256 ctx->event_base = base;
1257 ctx->created_bg = 0;
1259 lock_basic_unlock(&ctx->cfglock);