2 * unbound.c - unbound validating resolver public API implementation
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains functions to resolve DNS queries and
40 * validate the answers. Synchonously and asynchronously.
44 /* include the public api first, it should be able to stand alone */
45 #include "libunbound/unbound.h"
46 #include "libunbound/unbound-event.h"
49 #include "libunbound/context.h"
50 #include "libunbound/libworker.h"
51 #include "util/locks.h"
52 #include "util/config_file.h"
53 #include "util/alloc.h"
54 #include "util/module.h"
55 #include "util/regional.h"
57 #include "util/random.h"
58 #include "util/net_help.h"
59 #include "util/tube.h"
60 #include "services/modstack.h"
61 #include "services/localzone.h"
62 #include "services/cache/infra.h"
63 #include "services/cache/rrset.h"
64 #include "sldns/sbuffer.h"
68 #ifdef HAVE_SYS_WAIT_H
72 #if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H)
75 #endif /* UB_ON_WINDOWS */
77 /** create context functionality, but no pipes */
78 static struct ub_ctx* ub_ctx_create_nopipe(void)
87 log_init(NULL, 0, NULL); /* logs to stderr */
88 log_ident_set("libunbound");
90 if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
91 log_err("could not init winsock. WSAStartup: %s",
96 verbosity = 0; /* errors only */
98 ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
103 alloc_init(&ctx->superalloc, NULL, 0);
104 seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
105 if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
107 ub_randfree(ctx->seed_rnd);
113 lock_basic_init(&ctx->qqpipe_lock);
114 lock_basic_init(&ctx->rrpipe_lock);
115 lock_basic_init(&ctx->cfglock);
116 ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
118 ub_randfree(ctx->seed_rnd);
123 ctx->env->cfg = config_create_forlib();
126 ub_randfree(ctx->seed_rnd);
131 ctx->env->alloc = &ctx->superalloc;
132 ctx->env->worker = NULL;
133 ctx->env->need_to_validate = 0;
134 modstack_init(&ctx->mods);
135 rbtree_init(&ctx->queries, &context_query_cmp);
142 struct ub_ctx* ctx = ub_ctx_create_nopipe();
145 if((ctx->qq_pipe = tube_create()) == NULL) {
147 ub_randfree(ctx->seed_rnd);
148 config_delete(ctx->env->cfg);
149 modstack_desetup(&ctx->mods, ctx->env);
155 if((ctx->rr_pipe = tube_create()) == NULL) {
157 tube_delete(ctx->qq_pipe);
158 ub_randfree(ctx->seed_rnd);
159 config_delete(ctx->env->cfg);
160 modstack_desetup(&ctx->mods, ctx->env);
170 ub_ctx_create_event(struct event_base* eb)
172 struct ub_ctx* ctx = ub_ctx_create_nopipe();
175 /* no pipes, but we have the locks to make sure everything works */
177 ctx->dothread = 1; /* the processing is in the same process,
178 makes ub_cancel and ub_ctx_delete do the right thing */
179 ctx->event_base = eb;
185 delq(rbnode_t* n, void* ATTR_UNUSED(arg))
187 struct ctx_query* q = (struct ctx_query*)n;
188 context_query_delete(q);
191 /** stop the bg thread */
192 static void ub_stop_bg(struct ub_ctx* ctx)
194 /* stop the bg thread */
195 lock_basic_lock(&ctx->cfglock);
196 if(ctx->created_bg) {
199 uint32_t cmd = UB_LIBCMD_QUIT;
200 lock_basic_unlock(&ctx->cfglock);
201 lock_basic_lock(&ctx->qqpipe_lock);
202 (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
203 (uint32_t)sizeof(cmd), 0);
204 lock_basic_unlock(&ctx->qqpipe_lock);
205 lock_basic_lock(&ctx->rrpipe_lock);
206 while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
207 /* discard all results except a quit confirm */
208 if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
214 lock_basic_unlock(&ctx->rrpipe_lock);
216 /* if bg worker is a thread, wait for it to exit, so that all
217 * resources are really gone. */
218 lock_basic_lock(&ctx->cfglock);
220 lock_basic_unlock(&ctx->cfglock);
221 ub_thread_join(ctx->bg_tid);
223 lock_basic_unlock(&ctx->cfglock);
224 #ifndef UB_ON_WINDOWS
225 if(waitpid(ctx->bg_pid, NULL, 0) == -1) {
227 log_err("waitpid: %s", strerror(errno));
233 lock_basic_unlock(&ctx->cfglock);
238 ub_ctx_delete(struct ub_ctx* ctx)
240 struct alloc_cache* a, *na;
244 /* see if bg thread is created and if threads have been killed */
245 /* no locks, because those may be held by terminated threads */
246 /* for processes the read pipe is closed and we see that on read */
248 if(ctx->created_bg && ctx->dothread) {
249 if(pthread_kill(ctx->bg_tid, 0) == ESRCH) {
250 /* thread has been killed */
254 #endif /* HAVE_PTHREAD */
257 libworker_delete_event(ctx->event_worker);
259 modstack_desetup(&ctx->mods, ctx->env);
263 a->super = &ctx->superalloc;
268 local_zones_delete(ctx->local_zones);
269 lock_basic_destroy(&ctx->qqpipe_lock);
270 lock_basic_destroy(&ctx->rrpipe_lock);
271 lock_basic_destroy(&ctx->cfglock);
272 tube_delete(ctx->qq_pipe);
273 tube_delete(ctx->rr_pipe);
275 slabhash_delete(ctx->env->msg_cache);
276 rrset_cache_delete(ctx->env->rrset_cache);
277 infra_delete(ctx->env->infra_cache);
278 config_delete(ctx->env->cfg);
281 ub_randfree(ctx->seed_rnd);
282 alloc_clear(&ctx->superalloc);
283 traverse_postorder(&ctx->queries, delq, NULL);
291 ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val)
293 lock_basic_lock(&ctx->cfglock);
295 lock_basic_unlock(&ctx->cfglock);
296 return UB_AFTERFINAL;
298 if(!config_set_option(ctx->env->cfg, opt, val)) {
299 lock_basic_unlock(&ctx->cfglock);
302 lock_basic_unlock(&ctx->cfglock);
307 ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str)
310 lock_basic_lock(&ctx->cfglock);
311 r = config_get_option_collate(ctx->env->cfg, opt, str);
312 lock_basic_unlock(&ctx->cfglock);
313 if(r == 0) r = UB_NOERROR;
314 else if(r == 1) r = UB_SYNTAX;
315 else if(r == 2) r = UB_NOMEM;
320 ub_ctx_config(struct ub_ctx* ctx, const char* fname)
322 lock_basic_lock(&ctx->cfglock);
324 lock_basic_unlock(&ctx->cfglock);
325 return UB_AFTERFINAL;
327 if(!config_read(ctx->env->cfg, fname, NULL)) {
328 lock_basic_unlock(&ctx->cfglock);
331 lock_basic_unlock(&ctx->cfglock);
336 ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta)
338 char* dup = strdup(ta);
339 if(!dup) return UB_NOMEM;
340 lock_basic_lock(&ctx->cfglock);
342 lock_basic_unlock(&ctx->cfglock);
344 return UB_AFTERFINAL;
346 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) {
347 lock_basic_unlock(&ctx->cfglock);
351 lock_basic_unlock(&ctx->cfglock);
356 ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname)
358 char* dup = strdup(fname);
359 if(!dup) return UB_NOMEM;
360 lock_basic_lock(&ctx->cfglock);
362 lock_basic_unlock(&ctx->cfglock);
364 return UB_AFTERFINAL;
366 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) {
367 lock_basic_unlock(&ctx->cfglock);
371 lock_basic_unlock(&ctx->cfglock);
375 int ub_ctx_add_ta_autr(struct ub_ctx* ctx, const char* fname)
377 char* dup = strdup(fname);
378 if(!dup) return UB_NOMEM;
379 lock_basic_lock(&ctx->cfglock);
381 lock_basic_unlock(&ctx->cfglock);
383 return UB_AFTERFINAL;
385 if(!cfg_strlist_insert(&ctx->env->cfg->auto_trust_anchor_file_list,
387 lock_basic_unlock(&ctx->cfglock);
391 lock_basic_unlock(&ctx->cfglock);
396 ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname)
398 char* dup = strdup(fname);
399 if(!dup) return UB_NOMEM;
400 lock_basic_lock(&ctx->cfglock);
402 lock_basic_unlock(&ctx->cfglock);
404 return UB_AFTERFINAL;
406 if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) {
407 lock_basic_unlock(&ctx->cfglock);
411 lock_basic_unlock(&ctx->cfglock);
416 ub_ctx_debuglevel(struct ub_ctx* ctx, int d)
418 lock_basic_lock(&ctx->cfglock);
420 ctx->env->cfg->verbosity = d;
421 lock_basic_unlock(&ctx->cfglock);
425 int ub_ctx_debugout(struct ub_ctx* ctx, void* out)
427 lock_basic_lock(&ctx->cfglock);
428 log_file((FILE*)out);
429 ctx->logfile_override = 1;
431 lock_basic_unlock(&ctx->cfglock);
436 ub_ctx_async(struct ub_ctx* ctx, int dothread)
438 #ifdef THREADS_DISABLED
439 if(dothread) /* cannot do threading */
442 lock_basic_lock(&ctx->cfglock);
444 lock_basic_unlock(&ctx->cfglock);
445 return UB_AFTERFINAL;
447 ctx->dothread = dothread;
448 lock_basic_unlock(&ctx->cfglock);
453 ub_poll(struct ub_ctx* ctx)
455 /* no need to hold lock while testing for readability. */
456 return tube_poll(ctx->rr_pipe);
460 ub_fd(struct ub_ctx* ctx)
462 return tube_read_fd(ctx->rr_pipe);
465 /** process answer from bg worker */
467 process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len,
468 ub_callback_t* cb, void** cbarg, int* err,
469 struct ub_result** res)
472 if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) {
473 log_err("error: bad data from bg worker %d",
474 (int)context_serial_getcmd(msg, len));
478 lock_basic_lock(&ctx->cfglock);
479 q = context_deserialize_answer(ctx, msg, len, err);
481 lock_basic_unlock(&ctx->cfglock);
482 /* probably simply the lookup that failed, i.e.
483 * response returned before cancel was sent out, so noerror */
486 log_assert(q->async);
488 /* grab cb while locked */
498 ub_resolve_free(q->res);
500 /* parse the message, extract rcode, fill result */
501 sldns_buffer* buf = sldns_buffer_new(q->msg_len);
502 struct regional* region = regional_create();
504 (*res)->rcode = LDNS_RCODE_SERVFAIL;
506 sldns_buffer_clear(buf);
507 sldns_buffer_write(buf, q->msg, q->msg_len);
508 sldns_buffer_flip(buf);
509 libworker_enter_result(*res, buf, region,
512 (*res)->answer_packet = q->msg;
513 (*res)->answer_len = (int)q->msg_len;
515 sldns_buffer_free(buf);
516 regional_destroy(region);
519 /* delete the q from list */
520 (void)rbtree_delete(&ctx->queries, q->node.key);
522 context_query_delete(q);
523 lock_basic_unlock(&ctx->cfglock);
526 ub_resolve_free(*res);
530 /** process answer from bg worker */
532 process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len)
537 struct ub_result* res;
540 r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res);
542 /* no locks held while calling callback, so that library is
545 (*cb)(cbarg, err, res);
551 ub_process(struct ub_ctx* ctx)
558 lock_basic_lock(&ctx->rrpipe_lock);
559 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
560 lock_basic_unlock(&ctx->rrpipe_lock);
565 if(!process_answer(ctx, msg, len)) {
575 ub_wait(struct ub_ctx* ctx)
580 struct ub_result* res;
584 /* this is basically the same loop as _process(), but with changes.
585 * holds the rrpipe lock and waits with tube_wait */
587 lock_basic_lock(&ctx->rrpipe_lock);
588 lock_basic_lock(&ctx->cfglock);
589 if(ctx->num_async == 0) {
590 lock_basic_unlock(&ctx->cfglock);
591 lock_basic_unlock(&ctx->rrpipe_lock);
594 lock_basic_unlock(&ctx->cfglock);
596 /* keep rrpipe locked, while
597 * o waiting for pipe readable
599 * o possibly decrementing num_async
600 * do callback without lock
602 r = tube_wait(ctx->rr_pipe);
604 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
606 lock_basic_unlock(&ctx->rrpipe_lock);
610 lock_basic_unlock(&ctx->rrpipe_lock);
613 r = process_answer_detail(ctx, msg, len,
614 &cb, &cbarg, &err, &res);
615 lock_basic_unlock(&ctx->rrpipe_lock);
620 (*cb)(cbarg, err, res);
622 lock_basic_unlock(&ctx->rrpipe_lock);
629 ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype,
630 int rrclass, struct ub_result** result)
636 lock_basic_lock(&ctx->cfglock);
637 if(!ctx->finalized) {
638 r = context_finalize(ctx);
640 lock_basic_unlock(&ctx->cfglock);
644 /* create new ctx_query and attempt to add to the list */
645 lock_basic_unlock(&ctx->cfglock);
646 q = context_new(ctx, name, rrtype, rrclass, NULL, NULL);
649 /* become a resolver thread for a bit */
651 r = libworker_fg(ctx, q);
653 lock_basic_lock(&ctx->cfglock);
654 (void)rbtree_delete(&ctx->queries, q->node.key);
655 context_query_delete(q);
656 lock_basic_unlock(&ctx->cfglock);
659 q->res->answer_packet = q->msg;
660 q->res->answer_len = (int)q->msg_len;
665 lock_basic_lock(&ctx->cfglock);
666 (void)rbtree_delete(&ctx->queries, q->node.key);
667 context_query_delete(q);
668 lock_basic_unlock(&ctx->cfglock);
673 ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
674 int rrclass, void* mydata, ub_event_callback_t callback, int* async_id)
681 lock_basic_lock(&ctx->cfglock);
682 if(!ctx->finalized) {
683 int r = context_finalize(ctx);
685 lock_basic_unlock(&ctx->cfglock);
689 lock_basic_unlock(&ctx->cfglock);
690 if(!ctx->event_worker) {
691 ctx->event_worker = libworker_create_event(ctx,
693 if(!ctx->event_worker) {
698 /* create new ctx_query and attempt to add to the list */
699 q = context_new(ctx, name, rrtype, rrclass, (ub_callback_t)callback,
705 if((r=libworker_attach_mesh(ctx, q, async_id)) != 0)
712 ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype,
713 int rrclass, void* mydata, ub_callback_t callback, int* async_id)
721 lock_basic_lock(&ctx->cfglock);
722 if(!ctx->finalized) {
723 int r = context_finalize(ctx);
725 lock_basic_unlock(&ctx->cfglock);
729 if(!ctx->created_bg) {
732 lock_basic_unlock(&ctx->cfglock);
733 r = libworker_bg(ctx);
735 lock_basic_lock(&ctx->cfglock);
737 lock_basic_unlock(&ctx->cfglock);
741 lock_basic_unlock(&ctx->cfglock);
744 /* create new ctx_query and attempt to add to the list */
745 q = context_new(ctx, name, rrtype, rrclass, callback, mydata);
749 /* write over pipe to background worker */
750 lock_basic_lock(&ctx->cfglock);
751 msg = context_serialize_new_query(q, &len);
753 (void)rbtree_delete(&ctx->queries, q->node.key);
755 context_query_delete(q);
756 lock_basic_unlock(&ctx->cfglock);
760 *async_id = q->querynum;
761 lock_basic_unlock(&ctx->cfglock);
763 lock_basic_lock(&ctx->qqpipe_lock);
764 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
765 lock_basic_unlock(&ctx->qqpipe_lock);
769 lock_basic_unlock(&ctx->qqpipe_lock);
775 ub_cancel(struct ub_ctx* ctx, int async_id)
780 lock_basic_lock(&ctx->cfglock);
781 q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
782 if(!q || !q->async) {
783 /* it is not there, so nothing to do */
784 lock_basic_unlock(&ctx->cfglock);
787 log_assert(q->async);
791 if(!ctx->dothread) { /* if forked */
792 (void)rbtree_delete(&ctx->queries, q->node.key);
794 msg = context_serialize_cancel(q, &len);
795 context_query_delete(q);
796 lock_basic_unlock(&ctx->cfglock);
800 /* send cancel to background worker */
801 lock_basic_lock(&ctx->qqpipe_lock);
802 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
803 lock_basic_unlock(&ctx->qqpipe_lock);
807 lock_basic_unlock(&ctx->qqpipe_lock);
810 lock_basic_unlock(&ctx->cfglock);
816 ub_resolve_free(struct ub_result* result)
821 if(result->canonname != result->qname)
822 free(result->canonname);
824 for(p = result->data; *p; p++)
828 free(result->answer_packet);
829 free(result->why_bogus);
837 case UB_NOERROR: return "no error";
838 case UB_SOCKET: return "socket io error";
839 case UB_NOMEM: return "out of memory";
840 case UB_SYNTAX: return "syntax error";
841 case UB_SERVFAIL: return "server failure";
842 case UB_FORKFAIL: return "could not fork";
843 case UB_INITFAIL: return "initialization failure";
844 case UB_AFTERFINAL: return "setting change after finalize";
845 case UB_PIPE: return "error in pipe communication with async";
846 case UB_READFILE: return "error reading file";
847 case UB_NOID: return "error async_id does not exist";
848 default: return "unknown error";
853 ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr)
855 struct sockaddr_storage storage;
857 struct config_stub* s;
859 lock_basic_lock(&ctx->cfglock);
861 lock_basic_unlock(&ctx->cfglock);
863 return UB_AFTERFINAL;
866 /* disable fwd mode - the root stub should be first. */
867 if(ctx->env->cfg->forwards &&
868 strcmp(ctx->env->cfg->forwards->name, ".") == 0) {
869 s = ctx->env->cfg->forwards;
870 ctx->env->cfg->forwards = s->next;
874 lock_basic_unlock(&ctx->cfglock);
877 lock_basic_unlock(&ctx->cfglock);
879 /* check syntax for addr */
880 if(!extstrtoaddr(addr, &storage, &stlen)) {
885 /* it parses, add root stub in front of list */
886 lock_basic_lock(&ctx->cfglock);
887 if(!ctx->env->cfg->forwards ||
888 strcmp(ctx->env->cfg->forwards->name, ".") != 0) {
889 s = calloc(1, sizeof(*s));
891 lock_basic_unlock(&ctx->cfglock);
895 s->name = strdup(".");
898 lock_basic_unlock(&ctx->cfglock);
902 s->next = ctx->env->cfg->forwards;
903 ctx->env->cfg->forwards = s;
905 log_assert(ctx->env->cfg->forwards);
906 s = ctx->env->cfg->forwards;
910 lock_basic_unlock(&ctx->cfglock);
914 if(!cfg_strlist_insert(&s->addrs, dupl)) {
916 lock_basic_unlock(&ctx->cfglock);
920 lock_basic_unlock(&ctx->cfglock);
925 ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname)
934 #if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H)
935 fname = "/etc/resolv.conf";
938 ULONG buflen = sizeof(*info);
941 info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO));
945 if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) {
947 info = (FIXED_INFO *) malloc(buflen);
952 if (GetNetworkParams(info, &buflen) == NO_ERROR) {
954 ptr = &(info->DnsServerList);
957 if((retval=ub_ctx_set_fwd(ctx,
958 ptr->IpAddress.String))!=0) {
973 in = fopen(fname, "r");
975 /* error in errno! perror(fname) */
978 while(fgets(buf, (int)sizeof(buf), in)) {
979 buf[sizeof(buf)-1] = 0;
981 while(*parse == ' ' || *parse == '\t')
983 if(strncmp(parse, "nameserver", 10) == 0) {
985 parse += 10; /* skip 'nameserver' */
986 /* skip whitespace */
987 while(*parse == ' ' || *parse == '\t')
990 /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */
991 while(isxdigit((unsigned char)*parse) || *parse=='.' || *parse==':')
993 /* terminate after the address, remove newline */
996 if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) {
1004 /* from resolv.conf(5) if none given, use localhost */
1005 return ub_ctx_set_fwd(ctx, "127.0.0.1");
1011 ub_ctx_hosts(struct ub_ctx* ctx, const char* fname)
1014 char buf[1024], ldata[1024];
1015 char* parse, *addr, *name, *ins;
1016 lock_basic_lock(&ctx->cfglock);
1017 if(ctx->finalized) {
1018 lock_basic_unlock(&ctx->cfglock);
1020 return UB_AFTERFINAL;
1022 lock_basic_unlock(&ctx->cfglock);
1024 #if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H)
1026 * If this is Windows NT/XP/2K it's in
1027 * %WINDIR%\system32\drivers\etc\hosts.
1028 * If this is Windows 95/98/Me it's in %WINDIR%\hosts.
1030 name = getenv("WINDIR");
1033 snprintf(buf, sizeof(buf), "%s%s", name,
1034 "\\system32\\drivers\\etc\\hosts");
1035 if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) {
1036 snprintf(buf, sizeof(buf), "%s%s", name,
1038 retval=ub_ctx_hosts(ctx, buf);
1044 fname = "/etc/hosts";
1047 in = fopen(fname, "r");
1049 /* error in errno! perror(fname) */
1052 while(fgets(buf, (int)sizeof(buf), in)) {
1053 buf[sizeof(buf)-1] = 0;
1055 while(*parse == ' ' || *parse == '\t')
1058 continue; /* skip comment */
1059 /* format: <addr> spaces <name> spaces <name> ... */
1062 while(isxdigit((unsigned char)*parse) || *parse == '.' || *parse == ':')
1066 if(*parse == '\n' || *parse == 0)
1069 continue; /* ignore macOSX fe80::1%lo0 localhost */
1070 if(*parse != ' ' && *parse != '\t') {
1071 /* must have whitespace after address */
1076 *parse++ = 0; /* end delimiter for addr ... */
1077 /* go to names and add them */
1079 while(*parse == ' ' || *parse == '\t' || *parse=='\n'
1082 if(*parse == 0 || *parse == '#')
1084 /* skip name, allows (too) many printable characters */
1086 while('!' <= *parse && *parse <= '~')
1089 *parse++ = 0; /* end delimiter for name */
1090 snprintf(ldata, sizeof(ldata), "%s %s %s",
1091 name, str_is_ip6(addr)?"AAAA":"A", addr);
1092 ins = strdup(ldata);
1099 lock_basic_lock(&ctx->cfglock);
1100 if(!cfg_strlist_insert(&ctx->env->cfg->local_data,
1102 lock_basic_unlock(&ctx->cfglock);
1108 lock_basic_unlock(&ctx->cfglock);
1115 /** finalize the context, if not already finalized */
1116 static int ub_ctx_finalize(struct ub_ctx* ctx)
1119 lock_basic_lock(&ctx->cfglock);
1120 if (!ctx->finalized) {
1121 res = context_finalize(ctx);
1123 lock_basic_unlock(&ctx->cfglock);
1127 /* Print local zones and RR data */
1128 int ub_ctx_print_local_zones(struct ub_ctx* ctx)
1130 int res = ub_ctx_finalize(ctx);
1131 if (res) return res;
1133 local_zones_print(ctx->local_zones);
1138 /* Add a new zone */
1139 int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name,
1140 const char *zone_type)
1142 enum localzone_type t;
1143 struct local_zone* z;
1148 int res = ub_ctx_finalize(ctx);
1149 if (res) return res;
1151 if(!local_zone_str2type(zone_type, &t)) {
1155 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1159 lock_rw_wrlock(&ctx->local_zones->lock);
1160 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1161 LDNS_RR_CLASS_IN))) {
1162 /* already present in tree */
1163 lock_rw_wrlock(&z->lock);
1164 z->type = t; /* update type anyway */
1165 lock_rw_unlock(&z->lock);
1166 lock_rw_unlock(&ctx->local_zones->lock);
1170 if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
1171 LDNS_RR_CLASS_IN, t)) {
1172 lock_rw_unlock(&ctx->local_zones->lock);
1175 lock_rw_unlock(&ctx->local_zones->lock);
1180 int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name)
1182 struct local_zone* z;
1187 int res = ub_ctx_finalize(ctx);
1188 if (res) return res;
1190 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1194 lock_rw_wrlock(&ctx->local_zones->lock);
1195 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1196 LDNS_RR_CLASS_IN))) {
1197 /* present in tree */
1198 local_zones_del_zone(ctx->local_zones, z);
1200 lock_rw_unlock(&ctx->local_zones->lock);
1205 /* Add new RR data */
1206 int ub_ctx_data_add(struct ub_ctx* ctx, const char *data)
1208 int res = ub_ctx_finalize(ctx);
1209 if (res) return res;
1211 res = local_zones_add_RR(ctx->local_zones, data);
1212 return (!res) ? UB_NOMEM : UB_NOERROR;
1215 /* Remove RR data */
1216 int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data)
1221 int res = ub_ctx_finalize(ctx);
1222 if (res) return res;
1224 if(!parse_dname(data, &nm, &nmlen, &nmlabs))
1227 local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs,
1234 const char* ub_version(void)
1236 return PACKAGE_VERSION;
1240 ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) {
1241 if (!ctx || !ctx->event_base || !base) {
1244 if (ctx->event_base == base) {
1249 lock_basic_lock(&ctx->cfglock);
1250 /* destroy the current worker - safe to pass in NULL */
1251 libworker_delete_event(ctx->event_worker);
1252 ctx->event_worker = NULL;
1253 ctx->event_base = base;
1254 ctx->created_bg = 0;
1256 lock_basic_unlock(&ctx->cfglock);