]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - services/mesh.c
Vendor import of Unbound 1.9.0.
[FreeBSD/FreeBSD.git] / services / mesh.c
1 /*
2  * services/mesh.c - deal with mesh of query states and handle events for that.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  * 
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  * 
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  * 
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  * 
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35
36 /**
37  * \file
38  *
39  * This file contains functions to assist in dealing with a mesh of
40  * query states. This mesh is supposed to be thread-specific.
41  * It consists of query states (per qname, qtype, qclass) and connections
42  * between query states and the super and subquery states, and replies to
43  * send back to clients.
44  */
45 #include "config.h"
46 #include "services/mesh.h"
47 #include "services/outbound_list.h"
48 #include "services/cache/dns.h"
49 #include "util/log.h"
50 #include "util/net_help.h"
51 #include "util/module.h"
52 #include "util/regional.h"
53 #include "util/data/msgencode.h"
54 #include "util/timehist.h"
55 #include "util/fptr_wlist.h"
56 #include "util/alloc.h"
57 #include "util/config_file.h"
58 #include "util/edns.h"
59 #include "sldns/sbuffer.h"
60 #include "sldns/wire2str.h"
61 #include "services/localzone.h"
62 #include "util/data/dname.h"
63 #include "respip/respip.h"
64 #include "services/listen_dnsport.h"
65
66 /** subtract timers and the values do not overflow or become negative */
67 static void
68 timeval_subtract(struct timeval* d, const struct timeval* end, const struct timeval* start)
69 {
70 #ifndef S_SPLINT_S
71         time_t end_usec = end->tv_usec;
72         d->tv_sec = end->tv_sec - start->tv_sec;
73         if(end_usec < start->tv_usec) {
74                 end_usec += 1000000;
75                 d->tv_sec--;
76         }
77         d->tv_usec = end_usec - start->tv_usec;
78 #endif
79 }
80
81 /** add timers and the values do not overflow or become negative */
82 static void
83 timeval_add(struct timeval* d, const struct timeval* add)
84 {
85 #ifndef S_SPLINT_S
86         d->tv_sec += add->tv_sec;
87         d->tv_usec += add->tv_usec;
88         if(d->tv_usec > 1000000 ) {
89                 d->tv_usec -= 1000000;
90                 d->tv_sec++;
91         }
92 #endif
93 }
94
95 /** divide sum of timers to get average */
96 static void
97 timeval_divide(struct timeval* avg, const struct timeval* sum, size_t d)
98 {
99 #ifndef S_SPLINT_S
100         size_t leftover;
101         if(d == 0) {
102                 avg->tv_sec = 0;
103                 avg->tv_usec = 0;
104                 return;
105         }
106         avg->tv_sec = sum->tv_sec / d;
107         avg->tv_usec = sum->tv_usec / d;
108         /* handle fraction from seconds divide */
109         leftover = sum->tv_sec - avg->tv_sec*d;
110         avg->tv_usec += (leftover*1000000)/d;
111 #endif
112 }
113
114 /** histogram compare of time values */
115 static int
116 timeval_smaller(const struct timeval* x, const struct timeval* y)
117 {
118 #ifndef S_SPLINT_S
119         if(x->tv_sec < y->tv_sec)
120                 return 1;
121         else if(x->tv_sec == y->tv_sec) {
122                 if(x->tv_usec <= y->tv_usec)
123                         return 1;
124                 else    return 0;
125         }
126         else    return 0;
127 #endif
128 }
129
130 /*
131  * Compare two response-ip client info entries for the purpose of mesh state
132  * compare.  It returns 0 if ci_a and ci_b are considered equal; otherwise
133  * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but
134  * in practice it should be only used to mean they are different).
135  * We cannot share the mesh state for two queries if different response-ip
136  * actions can apply in the end, even if those queries are otherwise identical.
137  * For this purpose we compare tag lists and tag action lists; they should be
138  * identical to share the same state.
139  * For tag data, we don't look into the data content, as it can be
140  * expensive; unless tag data are not defined for both or they point to the
141  * exact same data in memory (i.e., they come from the same ACL entry), we
142  * consider these data different.
143  * Likewise, if the client info is associated with views, we don't look into
144  * the views.  They are considered different unless they are exactly the same
145  * even if the views only differ in the names.
146  */
147 static int
148 client_info_compare(const struct respip_client_info* ci_a,
149         const struct respip_client_info* ci_b)
150 {
151         int cmp;
152
153         if(!ci_a && !ci_b)
154                 return 0;
155         if(ci_a && !ci_b)
156                 return -1;
157         if(!ci_a && ci_b)
158                 return 1;
159         if(ci_a->taglen != ci_b->taglen)
160                 return (ci_a->taglen < ci_b->taglen) ? -1 : 1;
161         cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen);
162         if(cmp != 0)
163                 return cmp;
164         if(ci_a->tag_actions_size != ci_b->tag_actions_size)
165                 return (ci_a->tag_actions_size < ci_b->tag_actions_size) ?
166                         -1 : 1;
167         cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions,
168                 ci_a->tag_actions_size);
169         if(cmp != 0)
170                 return cmp;
171         if(ci_a->tag_datas != ci_b->tag_datas)
172                 return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1;
173         if(ci_a->view != ci_b->view)
174                 return ci_a->view < ci_b->view ? -1 : 1;
175         /* For the unbound daemon these should be non-NULL and identical,
176          * but we check that just in case. */
177         if(ci_a->respip_set != ci_b->respip_set)
178                 return ci_a->respip_set < ci_b->respip_set ? -1 : 1;
179         return 0;
180 }
181
182 int
183 mesh_state_compare(const void* ap, const void* bp)
184 {
185         struct mesh_state* a = (struct mesh_state*)ap;
186         struct mesh_state* b = (struct mesh_state*)bp;
187         int cmp;
188
189         if(a->unique < b->unique)
190                 return -1;
191         if(a->unique > b->unique)
192                 return 1;
193
194         if(a->s.is_priming && !b->s.is_priming)
195                 return -1;
196         if(!a->s.is_priming && b->s.is_priming)
197                 return 1;
198
199         if(a->s.is_valrec && !b->s.is_valrec)
200                 return -1;
201         if(!a->s.is_valrec && b->s.is_valrec)
202                 return 1;
203
204         if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD))
205                 return -1;
206         if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD))
207                 return 1;
208
209         if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD))
210                 return -1;
211         if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD))
212                 return 1;
213
214         cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo);
215         if(cmp != 0)
216                 return cmp;
217         return client_info_compare(a->s.client_info, b->s.client_info);
218 }
219
220 int
221 mesh_state_ref_compare(const void* ap, const void* bp)
222 {
223         struct mesh_state_ref* a = (struct mesh_state_ref*)ap;
224         struct mesh_state_ref* b = (struct mesh_state_ref*)bp;
225         return mesh_state_compare(a->s, b->s);
226 }
227
228 struct mesh_area* 
229 mesh_create(struct module_stack* stack, struct module_env* env)
230 {
231         struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area));
232         if(!mesh) {
233                 log_err("mesh area alloc: out of memory");
234                 return NULL;
235         }
236         mesh->histogram = timehist_setup();
237         mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size);
238         if(!mesh->histogram || !mesh->qbuf_bak) {
239                 free(mesh);
240                 log_err("mesh area alloc: out of memory");
241                 return NULL;
242         }
243         mesh->mods = *stack;
244         mesh->env = env;
245         rbtree_init(&mesh->run, &mesh_state_compare);
246         rbtree_init(&mesh->all, &mesh_state_compare);
247         mesh->num_reply_addrs = 0;
248         mesh->num_reply_states = 0;
249         mesh->num_detached_states = 0;
250         mesh->num_forever_states = 0;
251         mesh->stats_jostled = 0;
252         mesh->stats_dropped = 0;
253         mesh->max_reply_states = env->cfg->num_queries_per_thread;
254         mesh->max_forever_states = (mesh->max_reply_states+1)/2;
255 #ifndef S_SPLINT_S
256         mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000);
257         mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000)
258                 *1000);
259 #endif
260         return mesh;
261 }
262
263 /** help mesh delete delete mesh states */
264 static void
265 mesh_delete_helper(rbnode_type* n)
266 {
267         struct mesh_state* mstate = (struct mesh_state*)n->key;
268         /* perform a full delete, not only 'cleanup' routine,
269          * because other callbacks expect a clean state in the mesh.
270          * For 're-entrant' calls */
271         mesh_state_delete(&mstate->s);
272         /* but because these delete the items from the tree, postorder
273          * traversal and rbtree rebalancing do not work together */
274 }
275
276 void 
277 mesh_delete(struct mesh_area* mesh)
278 {
279         if(!mesh)
280                 return;
281         /* free all query states */
282         while(mesh->all.count)
283                 mesh_delete_helper(mesh->all.root);
284         timehist_delete(mesh->histogram);
285         sldns_buffer_free(mesh->qbuf_bak);
286         free(mesh);
287 }
288
289 void
290 mesh_delete_all(struct mesh_area* mesh)
291 {
292         /* free all query states */
293         while(mesh->all.count)
294                 mesh_delete_helper(mesh->all.root);
295         mesh->stats_dropped += mesh->num_reply_addrs;
296         /* clear mesh area references */
297         rbtree_init(&mesh->run, &mesh_state_compare);
298         rbtree_init(&mesh->all, &mesh_state_compare);
299         mesh->num_reply_addrs = 0;
300         mesh->num_reply_states = 0;
301         mesh->num_detached_states = 0;
302         mesh->num_forever_states = 0;
303         mesh->forever_first = NULL;
304         mesh->forever_last = NULL;
305         mesh->jostle_first = NULL;
306         mesh->jostle_last = NULL;
307 }
308
309 int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf)
310 {
311         struct mesh_state* m = mesh->jostle_first;
312         /* free space is available */
313         if(mesh->num_reply_states < mesh->max_reply_states)
314                 return 1;
315         /* try to kick out a jostle-list item */
316         if(m && m->reply_list && m->list_select == mesh_jostle_list) {
317                 /* how old is it? */
318                 struct timeval age;
319                 timeval_subtract(&age, mesh->env->now_tv, 
320                         &m->reply_list->start_time);
321                 if(timeval_smaller(&mesh->jostle_max, &age)) {
322                         /* its a goner */
323                         log_nametypeclass(VERB_ALGO, "query jostled out to "
324                                 "make space for a new one",
325                                 m->s.qinfo.qname, m->s.qinfo.qtype,
326                                 m->s.qinfo.qclass);
327                         /* backup the query */
328                         if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf);
329                         /* notify supers */
330                         if(m->super_set.count > 0) {
331                                 verbose(VERB_ALGO, "notify supers of failure");
332                                 m->s.return_msg = NULL;
333                                 m->s.return_rcode = LDNS_RCODE_SERVFAIL;
334                                 mesh_walk_supers(mesh, m);
335                         }
336                         mesh->stats_jostled ++;
337                         mesh_state_delete(&m->s);
338                         /* restore the query - note that the qinfo ptr to
339                          * the querybuffer is then correct again. */
340                         if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak);
341                         return 1;
342                 }
343         }
344         /* no space for new item */
345         return 0;
346 }
347
348 void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo,
349         struct respip_client_info* cinfo, uint16_t qflags,
350         struct edns_data* edns, struct comm_reply* rep, uint16_t qid)
351 {
352         struct mesh_state* s = NULL;
353         int unique = unique_mesh_state(edns->opt_list, mesh->env);
354         int was_detached = 0;
355         int was_noreply = 0;
356         int added = 0;
357         if(!unique)
358                 s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
359         /* does this create a new reply state? */
360         if(!s || s->list_select == mesh_no_list) {
361                 if(!mesh_make_new_space(mesh, rep->c->buffer)) {
362                         verbose(VERB_ALGO, "Too many queries. dropping "
363                                 "incoming query.");
364                         comm_point_drop_reply(rep);
365                         mesh->stats_dropped ++;
366                         return;
367                 }
368                 /* for this new reply state, the reply address is free,
369                  * so the limit of reply addresses does not stop reply states*/
370         } else {
371                 /* protect our memory usage from storing reply addresses */
372                 if(mesh->num_reply_addrs > mesh->max_reply_states*16) {
373                         verbose(VERB_ALGO, "Too many requests queued. "
374                                 "dropping incoming query.");
375                         mesh->stats_dropped++;
376                         comm_point_drop_reply(rep);
377                         return;
378                 }
379         }
380         /* see if it already exists, if not, create one */
381         if(!s) {
382 #ifdef UNBOUND_DEBUG
383                 struct rbnode_type* n;
384 #endif
385                 s = mesh_state_create(mesh->env, qinfo, cinfo,
386                         qflags&(BIT_RD|BIT_CD), 0, 0);
387                 if(!s) {
388                         log_err("mesh_state_create: out of memory; SERVFAIL");
389                         if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL,
390                                 LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch))
391                                         edns->opt_list = NULL;
392                         error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
393                                 qinfo, qid, qflags, edns);
394                         comm_point_send_reply(rep);
395                         return;
396                 }
397                 if(unique)
398                         mesh_state_make_unique(s);
399                 /* copy the edns options we got from the front */
400                 if(edns->opt_list) {
401                         s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list,
402                                 s->s.region);
403                         if(!s->s.edns_opts_front_in) {
404                                 log_err("mesh_state_create: out of memory; SERVFAIL");
405                                 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL,
406                                         NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch))
407                                                 edns->opt_list = NULL;
408                                 error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
409                                         qinfo, qid, qflags, edns);
410                                 comm_point_send_reply(rep);
411                                 return;
412                         }
413                 }
414
415 #ifdef UNBOUND_DEBUG
416                 n =
417 #else
418                 (void)
419 #endif
420                 rbtree_insert(&mesh->all, &s->node);
421                 log_assert(n != NULL);
422                 /* set detached (it is now) */
423                 mesh->num_detached_states++;
424                 added = 1;
425         }
426         if(!s->reply_list && !s->cb_list && s->super_set.count == 0)
427                 was_detached = 1;
428         if(!s->reply_list && !s->cb_list)
429                 was_noreply = 1;
430         /* add reply to s */
431         if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) {
432                         log_err("mesh_new_client: out of memory; SERVFAIL");
433                 servfail_mem:
434                         if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s,
435                                 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch))
436                                         edns->opt_list = NULL;
437                         error_encode(rep->c->buffer, LDNS_RCODE_SERVFAIL,
438                                 qinfo, qid, qflags, edns);
439                         comm_point_send_reply(rep);
440                         if(added)
441                                 mesh_state_delete(&s->s);
442                         return;
443         }
444         if(rep->c->tcp_req_info) {
445                 if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) {
446                         log_err("mesh_new_client: out of memory add tcpreqinfo");
447                         goto servfail_mem;
448                 }
449         }
450         /* update statistics */
451         if(was_detached) {
452                 log_assert(mesh->num_detached_states > 0);
453                 mesh->num_detached_states--;
454         }
455         if(was_noreply) {
456                 mesh->num_reply_states ++;
457         }
458         mesh->num_reply_addrs++;
459         if(s->list_select == mesh_no_list) {
460                 /* move to either the forever or the jostle_list */
461                 if(mesh->num_forever_states < mesh->max_forever_states) {
462                         mesh->num_forever_states ++;
463                         mesh_list_insert(s, &mesh->forever_first, 
464                                 &mesh->forever_last);
465                         s->list_select = mesh_forever_list;
466                 } else {
467                         mesh_list_insert(s, &mesh->jostle_first, 
468                                 &mesh->jostle_last);
469                         s->list_select = mesh_jostle_list;
470                 }
471         }
472         if(added)
473                 mesh_run(mesh, s, module_event_new, NULL);
474 }
475
476 int 
477 mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo,
478         uint16_t qflags, struct edns_data* edns, sldns_buffer* buf, 
479         uint16_t qid, mesh_cb_func_type cb, void* cb_arg)
480 {
481         struct mesh_state* s = NULL;
482         int unique = unique_mesh_state(edns->opt_list, mesh->env);
483         int was_detached = 0;
484         int was_noreply = 0;
485         int added = 0;
486         if(!unique)
487                 s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0);
488
489         /* there are no limits on the number of callbacks */
490
491         /* see if it already exists, if not, create one */
492         if(!s) {
493 #ifdef UNBOUND_DEBUG
494                 struct rbnode_type* n;
495 #endif
496                 s = mesh_state_create(mesh->env, qinfo, NULL,
497                         qflags&(BIT_RD|BIT_CD), 0, 0);
498                 if(!s) {
499                         return 0;
500                 }
501                 if(unique)
502                         mesh_state_make_unique(s);
503                 if(edns->opt_list) {
504                         s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list,
505                                 s->s.region);
506                         if(!s->s.edns_opts_front_in) {
507                                 return 0;
508                         }
509                 }
510 #ifdef UNBOUND_DEBUG
511                 n =
512 #else
513                 (void)
514 #endif
515                 rbtree_insert(&mesh->all, &s->node);
516                 log_assert(n != NULL);
517                 /* set detached (it is now) */
518                 mesh->num_detached_states++;
519                 added = 1;
520         }
521         if(!s->reply_list && !s->cb_list && s->super_set.count == 0)
522                 was_detached = 1;
523         if(!s->reply_list && !s->cb_list)
524                 was_noreply = 1;
525         /* add reply to s */
526         if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) {
527                         if(added)
528                                 mesh_state_delete(&s->s);
529                         return 0;
530         }
531         /* update statistics */
532         if(was_detached) {
533                 log_assert(mesh->num_detached_states > 0);
534                 mesh->num_detached_states--;
535         }
536         if(was_noreply) {
537                 mesh->num_reply_states ++;
538         }
539         mesh->num_reply_addrs++;
540         if(added)
541                 mesh_run(mesh, s, module_event_new, NULL);
542         return 1;
543 }
544
545 static void mesh_schedule_prefetch(struct mesh_area* mesh,
546         struct query_info* qinfo, uint16_t qflags, time_t leeway, int run);
547
548 void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
549         uint16_t qflags, time_t leeway)
550 {
551         mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1);
552 }
553
554 /* Internal backend routine of mesh_new_prefetch().  It takes one additional
555  * parameter, 'run', which controls whether to run the prefetch state
556  * immediately.  When this function is called internally 'run' could be
557  * 0 (false), in which case the new state is only made runnable so it
558  * will not be run recursively on top of the current state. */
559 static void mesh_schedule_prefetch(struct mesh_area* mesh,
560         struct query_info* qinfo, uint16_t qflags, time_t leeway, int run)
561 {
562         struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo,
563                 qflags&(BIT_RD|BIT_CD), 0, 0);
564 #ifdef UNBOUND_DEBUG
565         struct rbnode_type* n;
566 #endif
567         /* already exists, and for a different purpose perhaps.
568          * if mesh_no_list, keep it that way. */
569         if(s) {
570                 /* make it ignore the cache from now on */
571                 if(!s->s.blacklist)
572                         sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
573                 if(s->s.prefetch_leeway < leeway)
574                         s->s.prefetch_leeway = leeway;
575                 return;
576         }
577         if(!mesh_make_new_space(mesh, NULL)) {
578                 verbose(VERB_ALGO, "Too many queries. dropped prefetch.");
579                 mesh->stats_dropped ++;
580                 return;
581         }
582
583         s = mesh_state_create(mesh->env, qinfo, NULL,
584                 qflags&(BIT_RD|BIT_CD), 0, 0);
585         if(!s) {
586                 log_err("prefetch mesh_state_create: out of memory");
587                 return;
588         }
589 #ifdef UNBOUND_DEBUG
590         n =
591 #else
592         (void)
593 #endif
594         rbtree_insert(&mesh->all, &s->node);
595         log_assert(n != NULL);
596         /* set detached (it is now) */
597         mesh->num_detached_states++;
598         /* make it ignore the cache */
599         sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region);
600         s->s.prefetch_leeway = leeway;
601
602         if(s->list_select == mesh_no_list) {
603                 /* move to either the forever or the jostle_list */
604                 if(mesh->num_forever_states < mesh->max_forever_states) {
605                         mesh->num_forever_states ++;
606                         mesh_list_insert(s, &mesh->forever_first, 
607                                 &mesh->forever_last);
608                         s->list_select = mesh_forever_list;
609                 } else {
610                         mesh_list_insert(s, &mesh->jostle_first, 
611                                 &mesh->jostle_last);
612                         s->list_select = mesh_jostle_list;
613                 }
614         }
615
616         if(!run) {
617 #ifdef UNBOUND_DEBUG
618                 n =
619 #else
620                 (void)
621 #endif
622                 rbtree_insert(&mesh->run, &s->run_node);
623                 log_assert(n != NULL);
624                 return;
625         }
626
627         mesh_run(mesh, s, module_event_new, NULL);
628 }
629
630 void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e,
631         struct comm_reply* reply, int what)
632 {
633         enum module_ev event = module_event_reply;
634         e->qstate->reply = reply;
635         if(what != NETEVENT_NOERROR) {
636                 event = module_event_noreply;
637                 if(what == NETEVENT_CAPSFAIL)
638                         event = module_event_capsfail;
639         }
640         mesh_run(mesh, e->qstate->mesh_info, event, e);
641 }
642
643 struct mesh_state*
644 mesh_state_create(struct module_env* env, struct query_info* qinfo,
645         struct respip_client_info* cinfo, uint16_t qflags, int prime,
646         int valrec)
647 {
648         struct regional* region = alloc_reg_obtain(env->alloc);
649         struct mesh_state* mstate;
650         int i;
651         if(!region)
652                 return NULL;
653         mstate = (struct mesh_state*)regional_alloc(region, 
654                 sizeof(struct mesh_state));
655         if(!mstate) {
656                 alloc_reg_release(env->alloc, region);
657                 return NULL;
658         }
659         memset(mstate, 0, sizeof(*mstate));
660         mstate->node = *RBTREE_NULL;
661         mstate->run_node = *RBTREE_NULL;
662         mstate->node.key = mstate;
663         mstate->run_node.key = mstate;
664         mstate->reply_list = NULL;
665         mstate->list_select = mesh_no_list;
666         mstate->replies_sent = 0;
667         rbtree_init(&mstate->super_set, &mesh_state_ref_compare);
668         rbtree_init(&mstate->sub_set, &mesh_state_ref_compare);
669         mstate->num_activated = 0;
670         mstate->unique = NULL;
671         /* init module qstate */
672         mstate->s.qinfo.qtype = qinfo->qtype;
673         mstate->s.qinfo.qclass = qinfo->qclass;
674         mstate->s.qinfo.local_alias = NULL;
675         mstate->s.qinfo.qname_len = qinfo->qname_len;
676         mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
677                 qinfo->qname_len);
678         if(!mstate->s.qinfo.qname) {
679                 alloc_reg_release(env->alloc, region);
680                 return NULL;
681         }
682         if(cinfo) {
683                 mstate->s.client_info = regional_alloc_init(region, cinfo,
684                         sizeof(*cinfo));
685                 if(!mstate->s.client_info) {
686                         alloc_reg_release(env->alloc, region);
687                         return NULL;
688                 }
689         }
690         /* remove all weird bits from qflags */
691         mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
692         mstate->s.is_priming = prime;
693         mstate->s.is_valrec = valrec;
694         mstate->s.reply = NULL;
695         mstate->s.region = region;
696         mstate->s.curmod = 0;
697         mstate->s.return_msg = 0;
698         mstate->s.return_rcode = LDNS_RCODE_NOERROR;
699         mstate->s.env = env;
700         mstate->s.mesh_info = mstate;
701         mstate->s.prefetch_leeway = 0;
702         mstate->s.no_cache_lookup = 0;
703         mstate->s.no_cache_store = 0;
704         mstate->s.need_refetch = 0;
705         mstate->s.was_ratelimited = 0;
706
707         /* init modules */
708         for(i=0; i<env->mesh->mods.num; i++) {
709                 mstate->s.minfo[i] = NULL;
710                 mstate->s.ext_state[i] = module_state_initial;
711         }
712         /* init edns option lists */
713         mstate->s.edns_opts_front_in = NULL;
714         mstate->s.edns_opts_back_out = NULL;
715         mstate->s.edns_opts_back_in = NULL;
716         mstate->s.edns_opts_front_out = NULL;
717
718         return mstate;
719 }
720
721 int
722 mesh_state_is_unique(struct mesh_state* mstate)
723 {
724         return mstate->unique != NULL;
725 }
726
727 void
728 mesh_state_make_unique(struct mesh_state* mstate)
729 {
730         mstate->unique = mstate;
731 }
732
733 void 
734 mesh_state_cleanup(struct mesh_state* mstate)
735 {
736         struct mesh_area* mesh;
737         int i;
738         if(!mstate)
739                 return;
740         mesh = mstate->s.env->mesh;
741         /* drop unsent replies */
742         if(!mstate->replies_sent) {
743                 struct mesh_reply* rep = mstate->reply_list;
744                 struct mesh_cb* cb;
745                 /* in tcp_req_info, the mstates linked are removed, but
746                  * the reply_list is now NULL, so the remove-from-empty-list
747                  * takes no time and also it does not do the mesh accounting */
748                 mstate->reply_list = NULL;
749                 for(; rep; rep=rep->next) {
750                         comm_point_drop_reply(&rep->query_reply);
751                         mesh->num_reply_addrs--;
752                 }
753                 while((cb = mstate->cb_list)!=NULL) {
754                         mstate->cb_list = cb->next;
755                         fptr_ok(fptr_whitelist_mesh_cb(cb->cb));
756                         (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL,
757                                 sec_status_unchecked, NULL, 0);
758                         mesh->num_reply_addrs--;
759                 }
760         }
761
762         /* de-init modules */
763         for(i=0; i<mesh->mods.num; i++) {
764                 fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear));
765                 (*mesh->mods.mod[i]->clear)(&mstate->s, i);
766                 mstate->s.minfo[i] = NULL;
767                 mstate->s.ext_state[i] = module_finished;
768         }
769         alloc_reg_release(mstate->s.env->alloc, mstate->s.region);
770 }
771
772 void 
773 mesh_state_delete(struct module_qstate* qstate)
774 {
775         struct mesh_area* mesh;
776         struct mesh_state_ref* super, ref;
777         struct mesh_state* mstate;
778         if(!qstate)
779                 return;
780         mstate = qstate->mesh_info;
781         mesh = mstate->s.env->mesh;
782         mesh_detach_subs(&mstate->s);
783         if(mstate->list_select == mesh_forever_list) {
784                 mesh->num_forever_states --;
785                 mesh_list_remove(mstate, &mesh->forever_first, 
786                         &mesh->forever_last);
787         } else if(mstate->list_select == mesh_jostle_list) {
788                 mesh_list_remove(mstate, &mesh->jostle_first, 
789                         &mesh->jostle_last);
790         }
791         if(!mstate->reply_list && !mstate->cb_list
792                 && mstate->super_set.count == 0) {
793                 log_assert(mesh->num_detached_states > 0);
794                 mesh->num_detached_states--;
795         }
796         if(mstate->reply_list || mstate->cb_list) {
797                 log_assert(mesh->num_reply_states > 0);
798                 mesh->num_reply_states--;
799         }
800         ref.node.key = &ref;
801         ref.s = mstate;
802         RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) {
803                 (void)rbtree_delete(&super->s->sub_set, &ref);
804         }
805         (void)rbtree_delete(&mesh->run, mstate);
806         (void)rbtree_delete(&mesh->all, mstate);
807         mesh_state_cleanup(mstate);
808 }
809
810 /** helper recursive rbtree find routine */
811 static int
812 find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c)
813 {
814         struct mesh_state_ref* r;
815         if((*c)++ > MESH_MAX_SUBSUB)
816                 return 1;
817         RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) {
818                 if(r->s == tofind || find_in_subsub(r->s, tofind, c))
819                         return 1;
820         }
821         return 0;
822 }
823
824 /** find cycle for already looked up mesh_state */
825 static int 
826 mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m)
827 {
828         struct mesh_state* cyc_m = qstate->mesh_info;
829         size_t counter = 0;
830         if(!dep_m)
831                 return 0;
832         if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) {
833                 if(counter > MESH_MAX_SUBSUB)
834                         return 2;
835                 return 1;
836         }
837         return 0;
838 }
839
840 void mesh_detach_subs(struct module_qstate* qstate)
841 {
842         struct mesh_area* mesh = qstate->env->mesh;
843         struct mesh_state_ref* ref, lookup;
844 #ifdef UNBOUND_DEBUG
845         struct rbnode_type* n;
846 #endif
847         lookup.node.key = &lookup;
848         lookup.s = qstate->mesh_info;
849         RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) {
850 #ifdef UNBOUND_DEBUG
851                 n =
852 #else
853                 (void)
854 #endif
855                 rbtree_delete(&ref->s->super_set, &lookup);
856                 log_assert(n != NULL); /* must have been present */
857                 if(!ref->s->reply_list && !ref->s->cb_list
858                         && ref->s->super_set.count == 0) {
859                         mesh->num_detached_states++;
860                         log_assert(mesh->num_detached_states + 
861                                 mesh->num_reply_states <= mesh->all.count);
862                 }
863         }
864         rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare);
865 }
866
867 int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo,
868         uint16_t qflags, int prime, int valrec, struct module_qstate** newq,
869         struct mesh_state** sub)
870 {
871         /* find it, if not, create it */
872         struct mesh_area* mesh = qstate->env->mesh;
873         *sub = mesh_area_find(mesh, NULL, qinfo, qflags,
874                 prime, valrec);
875         if(mesh_detect_cycle_found(qstate, *sub)) {
876                 verbose(VERB_ALGO, "attach failed, cycle detected");
877                 return 0;
878         }
879         if(!*sub) {
880 #ifdef UNBOUND_DEBUG
881                 struct rbnode_type* n;
882 #endif
883                 /* create a new one */
884                 *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime,
885                         valrec);
886                 if(!*sub) {
887                         log_err("mesh_attach_sub: out of memory");
888                         return 0;
889                 }
890 #ifdef UNBOUND_DEBUG
891                 n =
892 #else
893                 (void)
894 #endif
895                 rbtree_insert(&mesh->all, &(*sub)->node);
896                 log_assert(n != NULL);
897                 /* set detached (it is now) */
898                 mesh->num_detached_states++;
899                 /* set new query state to run */
900 #ifdef UNBOUND_DEBUG
901                 n =
902 #else
903                 (void)
904 #endif
905                 rbtree_insert(&mesh->run, &(*sub)->run_node);
906                 log_assert(n != NULL);
907                 *newq = &(*sub)->s;
908         } else
909                 *newq = NULL;
910         return 1;
911 }
912
913 int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo,
914         uint16_t qflags, int prime, int valrec, struct module_qstate** newq)
915 {
916         struct mesh_area* mesh = qstate->env->mesh;
917         struct mesh_state* sub = NULL;
918         int was_detached;
919         if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub))
920                 return 0;
921         was_detached = (sub->super_set.count == 0);
922         if(!mesh_state_attachment(qstate->mesh_info, sub))
923                 return 0;
924         /* if it was a duplicate  attachment, the count was not zero before */
925         if(!sub->reply_list && !sub->cb_list && was_detached && 
926                 sub->super_set.count == 1) {
927                 /* it used to be detached, before this one got added */
928                 log_assert(mesh->num_detached_states > 0);
929                 mesh->num_detached_states--;
930         }
931         /* *newq will be run when inited after the current module stops */
932         return 1;
933 }
934
935 int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub)
936 {
937 #ifdef UNBOUND_DEBUG
938         struct rbnode_type* n;
939 #endif
940         struct mesh_state_ref* subref; /* points to sub, inserted in super */
941         struct mesh_state_ref* superref; /* points to super, inserted in sub */
942         if( !(subref = regional_alloc(super->s.region,
943                 sizeof(struct mesh_state_ref))) ||
944                 !(superref = regional_alloc(sub->s.region,
945                 sizeof(struct mesh_state_ref))) ) {
946                 log_err("mesh_state_attachment: out of memory");
947                 return 0;
948         }
949         superref->node.key = superref;
950         superref->s = super;
951         subref->node.key = subref;
952         subref->s = sub;
953         if(!rbtree_insert(&sub->super_set, &superref->node)) {
954                 /* this should not happen, iterator and validator do not
955                  * attach subqueries that are identical. */
956                 /* already attached, we are done, nothing todo.
957                  * since superref and subref already allocated in region,
958                  * we cannot free them */
959                 return 1;
960         }
961 #ifdef UNBOUND_DEBUG
962         n =
963 #else
964         (void)
965 #endif
966         rbtree_insert(&super->sub_set, &subref->node);
967         log_assert(n != NULL); /* we checked above if statement, the reverse
968           administration should not fail now, unless they are out of sync */
969         return 1;
970 }
971
972 /**
973  * callback results to mesh cb entry
974  * @param m: mesh state to send it for.
975  * @param rcode: if not 0, error code.
976  * @param rep: reply to send (or NULL if rcode is set).
977  * @param r: callback entry
978  */
979 static void
980 mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep,
981         struct mesh_cb* r)
982 {
983         int secure;
984         char* reason = NULL;
985         int was_ratelimited = m->s.was_ratelimited;
986         /* bogus messages are not made into servfail, sec_status passed
987          * to the callback function */
988         if(rep && rep->security == sec_status_secure)
989                 secure = 1;
990         else    secure = 0;
991         if(!rep && rcode == LDNS_RCODE_NOERROR)
992                 rcode = LDNS_RCODE_SERVFAIL;
993         if(!rcode && (rep->security == sec_status_bogus ||
994                 rep->security == sec_status_secure_sentinel_fail)) {
995                 if(!(reason = errinf_to_str_bogus(&m->s)))
996                         rcode = LDNS_RCODE_SERVFAIL;
997         }
998         /* send the reply */
999         if(rcode) {
1000                 if(rcode == LDNS_RCODE_SERVFAIL) {
1001                         if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1002                                 rep, rcode, &r->edns, NULL, m->s.region))
1003                                         r->edns.opt_list = NULL;
1004                 } else {
1005                         if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1006                                 &r->edns, NULL, m->s.region))
1007                                         r->edns.opt_list = NULL;
1008                 }
1009                 fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1010                 (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL,
1011                         was_ratelimited);
1012         } else {
1013                 size_t udp_size = r->edns.udp_size;
1014                 sldns_buffer_clear(r->buf);
1015                 r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1016                 r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1017                 r->edns.ext_rcode = 0;
1018                 r->edns.bits &= EDNS_DO;
1019
1020                 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1021                         LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region) ||
1022                         !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 
1023                         r->qflags, r->buf, 0, 1, 
1024                         m->s.env->scratch, udp_size, &r->edns, 
1025                         (int)(r->edns.bits & EDNS_DO), secure)) 
1026                 {
1027                         fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1028                         (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf,
1029                                 sec_status_unchecked, NULL, 0);
1030                 } else {
1031                         fptr_ok(fptr_whitelist_mesh_cb(r->cb));
1032                         (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf,
1033                                 rep->security, reason, was_ratelimited);
1034                 }
1035         }
1036         free(reason);
1037         m->s.env->mesh->num_reply_addrs--;
1038 }
1039
1040 /**
1041  * Send reply to mesh reply entry
1042  * @param m: mesh state to send it for.
1043  * @param rcode: if not 0, error code.
1044  * @param rep: reply to send (or NULL if rcode is set).
1045  * @param r: reply entry
1046  * @param r_buffer: buffer to use for reply entry.
1047  * @param prev: previous reply, already has its answer encoded in buffer.
1048  * @param prev_buffer: buffer for previous reply.
1049  */
1050 static void
1051 mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep,
1052         struct mesh_reply* r, struct sldns_buffer* r_buffer,
1053         struct mesh_reply* prev, struct sldns_buffer* prev_buffer)
1054 {
1055         struct timeval end_time;
1056         struct timeval duration;
1057         int secure;
1058         /* Copy the client's EDNS for later restore, to make sure the edns
1059          * compare is with the correct edns options. */
1060         struct edns_data edns_bak = r->edns;
1061         /* examine security status */
1062         if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) ||
1063                 m->s.env->cfg->ignore_cd) && rep && 
1064                 (rep->security <= sec_status_bogus ||
1065                 rep->security == sec_status_secure_sentinel_fail)) {
1066                 rcode = LDNS_RCODE_SERVFAIL;
1067                 if(m->s.env->cfg->stat_extended) 
1068                         m->s.env->mesh->ans_bogus++;
1069         }
1070         if(rep && rep->security == sec_status_secure)
1071                 secure = 1;
1072         else    secure = 0;
1073         if(!rep && rcode == LDNS_RCODE_NOERROR)
1074                 rcode = LDNS_RCODE_SERVFAIL;
1075         /* send the reply */
1076         /* We don't reuse the encoded answer if either the previous or current
1077          * response has a local alias.  We could compare the alias records
1078          * and still reuse the previous answer if they are the same, but that
1079          * would be complicated and error prone for the relatively minor case.
1080          * So we err on the side of safety. */
1081         if(prev && prev_buffer && prev->qflags == r->qflags && 
1082                 !prev->local_alias && !r->local_alias &&
1083                 prev->edns.edns_present == r->edns.edns_present && 
1084                 prev->edns.bits == r->edns.bits && 
1085                 prev->edns.udp_size == r->edns.udp_size &&
1086                 edns_opt_list_compare(prev->edns.opt_list, r->edns.opt_list)
1087                 == 0) {
1088                 /* if the previous reply is identical to this one, fix ID */
1089                 if(prev_buffer != r_buffer)
1090                         sldns_buffer_copy(r_buffer, prev_buffer);
1091                 sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t));
1092                 sldns_buffer_write_at(r_buffer, 12, r->qname,
1093                         m->s.qinfo.qname_len);
1094                 comm_point_send_reply(&r->query_reply);
1095         } else if(rcode) {
1096                 m->s.qinfo.qname = r->qname;
1097                 m->s.qinfo.local_alias = r->local_alias;
1098                 if(rcode == LDNS_RCODE_SERVFAIL) {
1099                         if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1100                                 rep, rcode, &r->edns, NULL, m->s.region))
1101                                         r->edns.opt_list = NULL;
1102                 } else { 
1103                         if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode,
1104                                 &r->edns, NULL, m->s.region))
1105                                         r->edns.opt_list = NULL;
1106                 }
1107                 error_encode(r_buffer, rcode, &m->s.qinfo, r->qid,
1108                         r->qflags, &r->edns);
1109                 comm_point_send_reply(&r->query_reply);
1110         } else {
1111                 size_t udp_size = r->edns.udp_size;
1112                 r->edns.edns_version = EDNS_ADVERTISED_VERSION;
1113                 r->edns.udp_size = EDNS_ADVERTISED_SIZE;
1114                 r->edns.ext_rcode = 0;
1115                 r->edns.bits &= EDNS_DO;
1116                 m->s.qinfo.qname = r->qname;
1117                 m->s.qinfo.local_alias = r->local_alias;
1118                 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep,
1119                         LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region) ||
1120                         !apply_edns_options(&r->edns, &edns_bak,
1121                                 m->s.env->cfg, r->query_reply.c,
1122                                 m->s.region) ||
1123                         !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 
1124                         r->qflags, r_buffer, 0, 1, m->s.env->scratch,
1125                         udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO),
1126                         secure)) 
1127                 {
1128                         if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s,
1129                         rep, LDNS_RCODE_SERVFAIL, &r->edns, NULL, m->s.region))
1130                                 r->edns.opt_list = NULL;
1131                         error_encode(r_buffer, LDNS_RCODE_SERVFAIL,
1132                                 &m->s.qinfo, r->qid, r->qflags, &r->edns);
1133                 }
1134                 r->edns = edns_bak;
1135                 comm_point_send_reply(&r->query_reply);
1136         }
1137         /* account */
1138         m->s.env->mesh->num_reply_addrs--;
1139         end_time = *m->s.env->now_tv;
1140         timeval_subtract(&duration, &end_time, &r->start_time);
1141         verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec",
1142                 (long long)duration.tv_sec, (int)duration.tv_usec);
1143         m->s.env->mesh->replies_sent++;
1144         timeval_add(&m->s.env->mesh->replies_sum_wait, &duration);
1145         timehist_insert(m->s.env->mesh->histogram, &duration);
1146         if(m->s.env->cfg->stat_extended) {
1147                 uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at(
1148                         r_buffer, 2));
1149                 if(secure) m->s.env->mesh->ans_secure++;
1150                 m->s.env->mesh->ans_rcode[ rc ] ++;
1151                 if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0)
1152                         m->s.env->mesh->ans_nodata++;
1153         }
1154         /* Log reply sent */
1155         if(m->s.env->cfg->log_replies) {
1156                 log_reply_info(0, &m->s.qinfo, &r->query_reply.addr,
1157                         r->query_reply.addrlen, duration, 0, r_buffer);
1158         }
1159 }
1160
1161 void mesh_query_done(struct mesh_state* mstate)
1162 {
1163         struct mesh_reply* r;
1164         struct mesh_reply* prev = NULL;
1165         struct sldns_buffer* prev_buffer = NULL;
1166         struct mesh_cb* c;
1167         struct reply_info* rep = (mstate->s.return_msg?
1168                 mstate->s.return_msg->rep:NULL);
1169         if((mstate->s.return_rcode == LDNS_RCODE_SERVFAIL ||
1170                 (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL))
1171                 && mstate->s.env->cfg->log_servfail
1172                 && !mstate->s.env->cfg->val_log_squelch) {
1173                 char* err = errinf_to_str_servfail(&mstate->s);
1174                 if(err)
1175                         log_err("%s", err);
1176                 free(err);
1177         }
1178         for(r = mstate->reply_list; r; r = r->next) {
1179                 /* if a response-ip address block has been stored the
1180                  *  information should be logged for each client. */
1181                 if(mstate->s.respip_action_info &&
1182                         mstate->s.respip_action_info->addrinfo) {
1183                         respip_inform_print(mstate->s.respip_action_info->addrinfo,
1184                                 r->qname, mstate->s.qinfo.qtype,
1185                                 mstate->s.qinfo.qclass, r->local_alias,
1186                                 &r->query_reply);
1187                 }
1188
1189                 /* if this query is determined to be dropped during the
1190                  * mesh processing, this is the point to take that action. */
1191                 if(mstate->s.is_drop)
1192                         comm_point_drop_reply(&r->query_reply);
1193                 else {
1194                         struct sldns_buffer* r_buffer = r->query_reply.c->buffer;
1195                         if(r->query_reply.c->tcp_req_info)
1196                                 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer;
1197                         mesh_send_reply(mstate, mstate->s.return_rcode, rep,
1198                                 r, r_buffer, prev, prev_buffer);
1199                         if(r->query_reply.c->tcp_req_info)
1200                                 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate);
1201                         prev = r;
1202                         prev_buffer = r_buffer;
1203                 }
1204         }
1205         mstate->replies_sent = 1;
1206         while((c = mstate->cb_list) != NULL) {
1207                 /* take this cb off the list; so that the list can be
1208                  * changed, eg. by adds from the callback routine */
1209                 if(!mstate->reply_list && mstate->cb_list && !c->next) {
1210                         /* was a reply state, not anymore */
1211                         mstate->s.env->mesh->num_reply_states--;
1212                 }
1213                 mstate->cb_list = c->next;
1214                 if(!mstate->reply_list && !mstate->cb_list &&
1215                         mstate->super_set.count == 0)
1216                         mstate->s.env->mesh->num_detached_states++;
1217                 mesh_do_callback(mstate, mstate->s.return_rcode, rep, c);
1218         }
1219 }
1220
1221 void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate)
1222 {
1223         struct mesh_state_ref* ref;
1224         RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set)
1225         {
1226                 /* make super runnable */
1227                 (void)rbtree_insert(&mesh->run, &ref->s->run_node);
1228                 /* callback the function to inform super of result */
1229                 fptr_ok(fptr_whitelist_mod_inform_super(
1230                         mesh->mods.mod[ref->s->s.curmod]->inform_super));
1231                 (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s, 
1232                         ref->s->s.curmod, &ref->s->s);
1233                 /* copy state that is always relevant to super */
1234                 copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s);
1235         }
1236 }
1237
1238 struct mesh_state* mesh_area_find(struct mesh_area* mesh,
1239         struct respip_client_info* cinfo, struct query_info* qinfo,
1240         uint16_t qflags, int prime, int valrec)
1241 {
1242         struct mesh_state key;
1243         struct mesh_state* result;
1244
1245         key.node.key = &key;
1246         key.s.is_priming = prime;
1247         key.s.is_valrec = valrec;
1248         key.s.qinfo = *qinfo;
1249         key.s.query_flags = qflags;
1250         /* We are searching for a similar mesh state when we DO want to
1251          * aggregate the state. Thus unique is set to NULL. (default when we
1252          * desire aggregation).*/
1253         key.unique = NULL;
1254         key.s.client_info = cinfo;
1255         
1256         result = (struct mesh_state*)rbtree_search(&mesh->all, &key);
1257         return result;
1258 }
1259
1260 int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
1261         sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg,
1262         uint16_t qid, uint16_t qflags)
1263 {
1264         struct mesh_cb* r = regional_alloc(s->s.region, 
1265                 sizeof(struct mesh_cb));
1266         if(!r)
1267                 return 0;
1268         r->buf = buf;
1269         log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/
1270         r->cb = cb;
1271         r->cb_arg = cb_arg;
1272         r->edns = *edns;
1273         if(edns->opt_list) {
1274                 r->edns.opt_list = edns_opt_copy_region(edns->opt_list,
1275                         s->s.region);
1276                 if(!r->edns.opt_list)
1277                         return 0;
1278         }
1279         r->qid = qid;
1280         r->qflags = qflags;
1281         r->next = s->cb_list;
1282         s->cb_list = r;
1283         return 1;
1284
1285 }
1286
1287 int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns,
1288         struct comm_reply* rep, uint16_t qid, uint16_t qflags,
1289         const struct query_info* qinfo)
1290 {
1291         struct mesh_reply* r = regional_alloc(s->s.region, 
1292                 sizeof(struct mesh_reply));
1293         if(!r)
1294                 return 0;
1295         r->query_reply = *rep;
1296         r->edns = *edns;
1297         if(edns->opt_list) {
1298                 r->edns.opt_list = edns_opt_copy_region(edns->opt_list,
1299                         s->s.region);
1300                 if(!r->edns.opt_list)
1301                         return 0;
1302         }
1303         r->qid = qid;
1304         r->qflags = qflags;
1305         r->start_time = *s->s.env->now_tv;
1306         r->next = s->reply_list;
1307         r->qname = regional_alloc_init(s->s.region, qinfo->qname,
1308                 s->s.qinfo.qname_len);
1309         if(!r->qname)
1310                 return 0;
1311
1312         /* Data related to local alias stored in 'qinfo' (if any) is ephemeral
1313          * and can be different for different original queries (even if the
1314          * replaced query name is the same).  So we need to make a deep copy
1315          * and store the copy for each reply info. */
1316         if(qinfo->local_alias) {
1317                 struct packed_rrset_data* d;
1318                 struct packed_rrset_data* dsrc;
1319                 r->local_alias = regional_alloc_zero(s->s.region,
1320                         sizeof(*qinfo->local_alias));
1321                 if(!r->local_alias)
1322                         return 0;
1323                 r->local_alias->rrset = regional_alloc_init(s->s.region,
1324                         qinfo->local_alias->rrset,
1325                         sizeof(*qinfo->local_alias->rrset));
1326                 if(!r->local_alias->rrset)
1327                         return 0;
1328                 dsrc = qinfo->local_alias->rrset->entry.data;
1329
1330                 /* In the current implementation, a local alias must be
1331                  * a single CNAME RR (see worker_handle_request()). */
1332                 log_assert(!qinfo->local_alias->next && dsrc->count == 1 &&
1333                         qinfo->local_alias->rrset->rk.type ==
1334                         htons(LDNS_RR_TYPE_CNAME));
1335                 /* Technically, we should make a local copy for the owner
1336                  * name of the RRset, but in the case of the first (and
1337                  * currently only) local alias RRset, the owner name should
1338                  * point to the qname of the corresponding query, which should
1339                  * be valid throughout the lifetime of this mesh_reply.  So
1340                  * we can skip copying. */
1341                 log_assert(qinfo->local_alias->rrset->rk.dname ==
1342                         sldns_buffer_at(rep->c->buffer, LDNS_HEADER_SIZE));
1343
1344                 d = regional_alloc_init(s->s.region, dsrc,
1345                         sizeof(struct packed_rrset_data)
1346                         + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t));
1347                 if(!d)
1348                         return 0;
1349                 r->local_alias->rrset->entry.data = d;
1350                 d->rr_len = (size_t*)((uint8_t*)d +
1351                         sizeof(struct packed_rrset_data));
1352                 d->rr_data = (uint8_t**)&(d->rr_len[1]);
1353                 d->rr_ttl = (time_t*)&(d->rr_data[1]);
1354                 d->rr_len[0] = dsrc->rr_len[0];
1355                 d->rr_ttl[0] = dsrc->rr_ttl[0];
1356                 d->rr_data[0] = regional_alloc_init(s->s.region,
1357                         dsrc->rr_data[0], d->rr_len[0]);
1358                 if(!d->rr_data[0])
1359                         return 0;
1360         } else
1361                 r->local_alias = NULL;
1362
1363         s->reply_list = r;
1364         return 1;
1365 }
1366
1367 /* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'.
1368  * Since this is only used for internal refetch of otherwise-expired answer,
1369  * we simply ignore the rare failure mode when memory allocation fails. */
1370 static void
1371 mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop,
1372         uint16_t* qflags)
1373 {
1374         struct regional* region = mstate->s.env->scratch;
1375         struct query_info* qinfo;
1376
1377         qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo));
1378         if(!qinfo)
1379                 return;
1380         qinfo->qname = regional_alloc_init(region, qinfo->qname,
1381                 qinfo->qname_len);
1382         if(!qinfo->qname)
1383                 return;
1384         *qinfop = qinfo;
1385         *qflags = mstate->s.query_flags;
1386 }
1387
1388 /**
1389  * Continue processing the mesh state at another module.
1390  * Handles module to modules transfer of control.
1391  * Handles module finished.
1392  * @param mesh: the mesh area.
1393  * @param mstate: currently active mesh state.
1394  *      Deleted if finished, calls _done and _supers to 
1395  *      send replies to clients and inform other mesh states.
1396  *      This in turn may create additional runnable mesh states.
1397  * @param s: state at which the current module exited.
1398  * @param ev: the event sent to the module.
1399  *      returned is the event to send to the next module.
1400  * @return true if continue processing at the new module.
1401  *      false if not continued processing is needed.
1402  */
1403 static int
1404 mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate,
1405         enum module_ext_state s, enum module_ev* ev)
1406 {
1407         mstate->num_activated++;
1408         if(mstate->num_activated > MESH_MAX_ACTIVATION) {
1409                 /* module is looping. Stop it. */
1410                 log_err("internal error: looping module (%s) stopped",
1411                         mesh->mods.mod[mstate->s.curmod]->name);
1412                 log_query_info(0, "pass error for qstate",
1413                         &mstate->s.qinfo);
1414                 s = module_error;
1415         }
1416         if(s == module_wait_module || s == module_restart_next) {
1417                 /* start next module */
1418                 mstate->s.curmod++;
1419                 if(mesh->mods.num == mstate->s.curmod) {
1420                         log_err("Cannot pass to next module; at last module");
1421                         log_query_info(VERB_QUERY, "pass error for qstate",
1422                                 &mstate->s.qinfo);
1423                         mstate->s.curmod--;
1424                         return mesh_continue(mesh, mstate, module_error, ev);
1425                 }
1426                 if(s == module_restart_next) {
1427                         int curmod = mstate->s.curmod;
1428                         for(; mstate->s.curmod < mesh->mods.num; 
1429                                 mstate->s.curmod++) {
1430                                 fptr_ok(fptr_whitelist_mod_clear(
1431                                         mesh->mods.mod[mstate->s.curmod]->clear));
1432                                 (*mesh->mods.mod[mstate->s.curmod]->clear)
1433                                         (&mstate->s, mstate->s.curmod);
1434                                 mstate->s.minfo[mstate->s.curmod] = NULL;
1435                         }
1436                         mstate->s.curmod = curmod;
1437                 }
1438                 *ev = module_event_pass;
1439                 return 1;
1440         }
1441         if(s == module_wait_subquery && mstate->sub_set.count == 0) {
1442                 log_err("module cannot wait for subquery, subquery list empty");
1443                 log_query_info(VERB_QUERY, "pass error for qstate",
1444                         &mstate->s.qinfo);
1445                 s = module_error;
1446         }
1447         if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) {
1448                 /* error is bad, handle pass back up below */
1449                 mstate->s.return_rcode = LDNS_RCODE_SERVFAIL;
1450         }
1451         if(s == module_error) {
1452                 mesh_query_done(mstate);
1453                 mesh_walk_supers(mesh, mstate);
1454                 mesh_state_delete(&mstate->s);
1455                 return 0;
1456         }
1457         if(s == module_finished) {
1458                 if(mstate->s.curmod == 0) {
1459                         struct query_info* qinfo = NULL;
1460                         uint16_t qflags;
1461
1462                         mesh_query_done(mstate);
1463                         mesh_walk_supers(mesh, mstate);
1464
1465                         /* If the answer to the query needs to be refetched
1466                          * from an external DNS server, we'll need to schedule
1467                          * a prefetch after removing the current state, so
1468                          * we need to make a copy of the query info here. */
1469                         if(mstate->s.need_refetch)
1470                                 mesh_copy_qinfo(mstate, &qinfo, &qflags);
1471
1472                         mesh_state_delete(&mstate->s);
1473                         if(qinfo) {
1474                                 mesh_schedule_prefetch(mesh, qinfo, qflags,
1475                                         0, 1);
1476                         }
1477                         return 0;
1478                 }
1479                 /* pass along the locus of control */
1480                 mstate->s.curmod --;
1481                 *ev = module_event_moddone;
1482                 return 1;
1483         }
1484         return 0;
1485 }
1486
1487 void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate,
1488         enum module_ev ev, struct outbound_entry* e)
1489 {
1490         enum module_ext_state s;
1491         verbose(VERB_ALGO, "mesh_run: start");
1492         while(mstate) {
1493                 /* run the module */
1494                 fptr_ok(fptr_whitelist_mod_operate(
1495                         mesh->mods.mod[mstate->s.curmod]->operate));
1496                 (*mesh->mods.mod[mstate->s.curmod]->operate)
1497                         (&mstate->s, ev, mstate->s.curmod, e);
1498
1499                 /* examine results */
1500                 mstate->s.reply = NULL;
1501                 regional_free_all(mstate->s.env->scratch);
1502                 s = mstate->s.ext_state[mstate->s.curmod];
1503                 verbose(VERB_ALGO, "mesh_run: %s module exit state is %s", 
1504                         mesh->mods.mod[mstate->s.curmod]->name, strextstate(s));
1505                 e = NULL;
1506                 if(mesh_continue(mesh, mstate, s, &ev))
1507                         continue;
1508
1509                 /* run more modules */
1510                 ev = module_event_pass;
1511                 if(mesh->run.count > 0) {
1512                         /* pop random element off the runnable tree */
1513                         mstate = (struct mesh_state*)mesh->run.root->key;
1514                         (void)rbtree_delete(&mesh->run, mstate);
1515                 } else mstate = NULL;
1516         }
1517         if(verbosity >= VERB_ALGO) {
1518                 mesh_stats(mesh, "mesh_run: end");
1519                 mesh_log_list(mesh);
1520         }
1521 }
1522
1523 void 
1524 mesh_log_list(struct mesh_area* mesh)
1525 {
1526         char buf[30];
1527         struct mesh_state* m;
1528         int num = 0;
1529         RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1530                 snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s", 
1531                         num++, (m->s.is_priming)?"p":"",  /* prime */
1532                         (m->s.is_valrec)?"v":"",  /* prime */
1533                         (m->s.query_flags&BIT_RD)?"RD":"",
1534                         (m->s.query_flags&BIT_CD)?"CD":"",
1535                         (m->super_set.count==0)?"d":"", /* detached */
1536                         (m->sub_set.count!=0)?"c":"",  /* children */
1537                         m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/
1538                         (m->cb_list)?"cb":"" /* callbacks */
1539                         ); 
1540                 log_query_info(VERB_ALGO, buf, &m->s.qinfo);
1541         }
1542 }
1543
1544 void 
1545 mesh_stats(struct mesh_area* mesh, const char* str)
1546 {
1547         verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, "
1548                 "%u detached), %u waiting replies, %u recursion replies "
1549                 "sent, %d replies dropped, %d states jostled out", 
1550                 str, (unsigned)mesh->all.count, 
1551                 (unsigned)mesh->num_reply_states,
1552                 (unsigned)mesh->num_detached_states,
1553                 (unsigned)mesh->num_reply_addrs,
1554                 (unsigned)mesh->replies_sent,
1555                 (unsigned)mesh->stats_dropped,
1556                 (unsigned)mesh->stats_jostled);
1557         if(mesh->replies_sent > 0) {
1558                 struct timeval avg;
1559                 timeval_divide(&avg, &mesh->replies_sum_wait, 
1560                         mesh->replies_sent);
1561                 log_info("average recursion processing time "
1562                         ARG_LL "d.%6.6d sec",
1563                         (long long)avg.tv_sec, (int)avg.tv_usec);
1564                 log_info("histogram of recursion processing times");
1565                 timehist_log(mesh->histogram, "recursions");
1566         }
1567 }
1568
1569 void 
1570 mesh_stats_clear(struct mesh_area* mesh)
1571 {
1572         if(!mesh)
1573                 return;
1574         mesh->replies_sent = 0;
1575         mesh->replies_sum_wait.tv_sec = 0;
1576         mesh->replies_sum_wait.tv_usec = 0;
1577         mesh->stats_jostled = 0;
1578         mesh->stats_dropped = 0;
1579         timehist_clear(mesh->histogram);
1580         mesh->ans_secure = 0;
1581         mesh->ans_bogus = 0;
1582         memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*16);
1583         mesh->ans_nodata = 0;
1584 }
1585
1586 size_t 
1587 mesh_get_mem(struct mesh_area* mesh)
1588 {
1589         struct mesh_state* m;
1590         size_t s = sizeof(*mesh) + sizeof(struct timehist) +
1591                 sizeof(struct th_buck)*mesh->histogram->num +
1592                 sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak);
1593         RBTREE_FOR(m, struct mesh_state*, &mesh->all) {
1594                 /* all, including m itself allocated in qstate region */
1595                 s += regional_get_mem(m->s.region);
1596         }
1597         return s;
1598 }
1599
1600 int 
1601 mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo,
1602         uint16_t flags, int prime, int valrec)
1603 {
1604         struct mesh_area* mesh = qstate->env->mesh;
1605         struct mesh_state* dep_m = NULL;
1606         if(!mesh_state_is_unique(qstate->mesh_info))
1607                 dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec);
1608         return mesh_detect_cycle_found(qstate, dep_m);
1609 }
1610
1611 void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp,
1612         struct mesh_state** lp)
1613 {
1614         /* insert as last element */
1615         m->prev = *lp;
1616         m->next = NULL;
1617         if(*lp)
1618                 (*lp)->next = m;
1619         else    *fp = m;
1620         *lp = m;
1621 }
1622
1623 void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp,
1624         struct mesh_state** lp)
1625 {
1626         if(m->next)
1627                 m->next->prev = m->prev;
1628         else    *lp = m->prev;
1629         if(m->prev)
1630                 m->prev->next = m->next;
1631         else    *fp = m->next;
1632 }
1633
1634 void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m,
1635         struct comm_point* cp)
1636 {
1637         struct mesh_reply* n, *prev = NULL;
1638         n = m->reply_list;
1639         /* when in mesh_cleanup, it sets the reply_list to NULL, so that
1640          * there is no accounting twice */
1641         if(!n) return; /* nothing to remove, also no accounting needed */
1642         while(n) {
1643                 if(n->query_reply.c == cp) {
1644                         /* unlink it */
1645                         if(prev) prev->next = n->next;
1646                         else m->reply_list = n->next;
1647                         /* delete it, but allocated in m region */
1648                         mesh->num_reply_addrs--;
1649
1650                         /* prev = prev; */
1651                         n = n->next;
1652                         continue;
1653                 }
1654                 prev = n;
1655                 n = n->next;
1656         }
1657         /* it was not detached (because it had a reply list), could be now */
1658         if(!m->reply_list && !m->cb_list
1659                 && m->super_set.count == 0) {
1660                 mesh->num_detached_states++;
1661         }
1662         /* if not replies any more in mstate, it is no longer a reply_state */
1663         if(!m->reply_list && !m->cb_list) {
1664                 log_assert(mesh->num_reply_states > 0);
1665                 mesh->num_reply_states--;
1666         }
1667 }