]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sbin/routed/table.c
OpenSSL: update to 3.0.12
[FreeBSD/FreeBSD.git] / sbin / routed / table.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1983, 1988, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "defs.h"
33 static struct rt_spare *rts_better(struct rt_entry *);
34 static struct rt_spare rts_empty = {0,0,0,HOPCNT_INFINITY,0,0,0};
35 static void  set_need_flash(void);
36 #ifdef _HAVE_SIN_LEN
37 static void masktrim(struct sockaddr_in *ap);
38 #else
39 static void masktrim(struct sockaddr_in_new *ap);
40 #endif
41 static void rtbad(struct rt_entry *);
42
43
44 struct radix_node_head *rhead;          /* root of the radix tree */
45
46 int     need_flash = 1;                 /* flash update needed
47                                          * start =1 to suppress the 1st
48                                          */
49
50 struct timeval age_timer;               /* next check of old routes */
51 struct timeval need_kern = {            /* need to update kernel table */
52         EPOCH+MIN_WAITTIME-1, 0
53 };
54
55 int     stopint;
56
57 int     total_routes;
58
59 /* zap any old routes through this gateway */
60 static naddr age_bad_gate;
61
62
63 /* It is desirable to "aggregate" routes, to combine differing routes of
64  * the same metric and next hop into a common route with a smaller netmask
65  * or to suppress redundant routes, routes that add no information to
66  * routes with smaller netmasks.
67  *
68  * A route is redundant if and only if any and all routes with smaller
69  * but matching netmasks and nets are the same.  Since routes are
70  * kept sorted in the radix tree, redundant routes always come second.
71  *
72  * There are two kinds of aggregations.  First, two routes of the same bit
73  * mask and differing only in the least significant bit of the network
74  * number can be combined into a single route with a coarser mask.
75  *
76  * Second, a route can be suppressed in favor of another route with a more
77  * coarse mask provided no incompatible routes with intermediate masks
78  * are present.  The second kind of aggregation involves suppressing routes.
79  * A route must not be suppressed if an incompatible route exists with
80  * an intermediate mask, since the suppressed route would be covered
81  * by the intermediate.
82  *
83  * This code relies on the radix tree walk encountering routes
84  * sorted first by address, with the smallest address first.
85  */
86
87 static struct ag_info ag_slots[NUM_AG_SLOTS], *ag_avail, *ag_corsest, *ag_finest;
88
89 /* #define DEBUG_AG */
90 #ifdef DEBUG_AG
91 #define CHECK_AG() {int acnt = 0; struct ag_info *cag;          \
92         for (cag = ag_avail; cag != NULL; cag = cag->ag_fine)   \
93                 acnt++;                                         \
94         for (cag = ag_corsest; cag != NULL; cag = cag->ag_fine) \
95                 acnt++;                                         \
96         if (acnt != NUM_AG_SLOTS) {                             \
97                 (void)fflush(stderr);                           \
98                 abort();                                        \
99         }                                                       \
100 }
101 #else
102 #define CHECK_AG()
103 #endif
104
105
106 /* Output the contents of an aggregation table slot.
107  *      This function must always be immediately followed with the deletion
108  *      of the target slot.
109  */
110 static void
111 ag_out(struct ag_info *ag,
112          void (*out)(struct ag_info *))
113 {
114         struct ag_info *ag_cors;
115         naddr bit;
116
117
118         /* Forget it if this route should not be output for split-horizon. */
119         if (ag->ag_state & AGS_SPLIT_HZ)
120                 return;
121
122         /* If we output both the even and odd twins, then the immediate parent,
123          * if it is present, is redundant, unless the parent manages to
124          * aggregate into something coarser.
125          * On successive calls, this code detects the even and odd twins,
126          * and marks the parent.
127          *
128          * Note that the order in which the radix tree code emits routes
129          * ensures that the twins are seen before the parent is emitted.
130          */
131         ag_cors = ag->ag_cors;
132         if (ag_cors != NULL
133             && ag_cors->ag_mask == ag->ag_mask<<1
134             && ag_cors->ag_dst_h == (ag->ag_dst_h & ag_cors->ag_mask)) {
135                 ag_cors->ag_state |= ((ag_cors->ag_dst_h == ag->ag_dst_h)
136                                       ? AGS_REDUN0
137                                       : AGS_REDUN1);
138         }
139
140         /* Skip it if this route is itself redundant.
141          *
142          * It is ok to change the contents of the slot here, since it is
143          * always deleted next.
144          */
145         if (ag->ag_state & AGS_REDUN0) {
146                 if (ag->ag_state & AGS_REDUN1)
147                         return;         /* quit if fully redundant */
148                 /* make it finer if it is half-redundant */
149                 bit = (-ag->ag_mask) >> 1;
150                 ag->ag_dst_h |= bit;
151                 ag->ag_mask |= bit;
152
153         } else if (ag->ag_state & AGS_REDUN1) {
154                 /* make it finer if it is half-redundant */
155                 bit = (-ag->ag_mask) >> 1;
156                 ag->ag_mask |= bit;
157         }
158         out(ag);
159 }
160
161
162 static void
163 ag_del(struct ag_info *ag)
164 {
165         CHECK_AG();
166
167         if (ag->ag_cors == NULL)
168                 ag_corsest = ag->ag_fine;
169         else
170                 ag->ag_cors->ag_fine = ag->ag_fine;
171
172         if (ag->ag_fine == NULL)
173                 ag_finest = ag->ag_cors;
174         else
175                 ag->ag_fine->ag_cors = ag->ag_cors;
176
177         ag->ag_fine = ag_avail;
178         ag_avail = ag;
179
180         CHECK_AG();
181 }
182
183
184 /* Flush routes waiting for aggregation.
185  *      This must not suppress a route unless it is known that among all
186  *      routes with coarser masks that match it, the one with the longest
187  *      mask is appropriate.  This is ensured by scanning the routes
188  *      in lexical order, and with the most restrictive mask first
189  *      among routes to the same destination.
190  */
191 void
192 ag_flush(naddr lim_dst_h,               /* flush routes to here */
193          naddr lim_mask,                /* matching this mask */
194          void (*out)(struct ag_info *))
195 {
196         struct ag_info *ag, *ag_cors;
197         naddr dst_h;
198
199
200         for (ag = ag_finest;
201              ag != NULL && ag->ag_mask >= lim_mask;
202              ag = ag_cors) {
203                 ag_cors = ag->ag_cors;
204
205                 /* work on only the specified routes */
206                 dst_h = ag->ag_dst_h;
207                 if ((dst_h & lim_mask) != lim_dst_h)
208                         continue;
209
210                 if (!(ag->ag_state & AGS_SUPPRESS))
211                         ag_out(ag, out);
212
213                 else for ( ; ; ag_cors = ag_cors->ag_cors) {
214                         /* Look for a route that can suppress the
215                          * current route */
216                         if (ag_cors == NULL) {
217                                 /* failed, so output it and look for
218                                  * another route to work on
219                                  */
220                                 ag_out(ag, out);
221                                 break;
222                         }
223
224                         if ((dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h) {
225                                 /* We found a route with a coarser mask that
226                                  * aggregates the current target.
227                                  *
228                                  * If it has a different next hop, it
229                                  * cannot replace the target, so output
230                                  * the target.
231                                  */
232                                 if (ag->ag_gate != ag_cors->ag_gate
233                                     && !(ag->ag_state & AGS_FINE_GATE)
234                                     && !(ag_cors->ag_state & AGS_CORS_GATE)) {
235                                         ag_out(ag, out);
236                                         break;
237                                 }
238
239                                 /* If the coarse route has a good enough
240                                  * metric, it suppresses the target.
241                                  * If the suppressed target was redundant,
242                                  * then mark the suppressor redundant.
243                                  */
244                                 if (ag_cors->ag_pref <= ag->ag_pref) {
245                                     if (AG_IS_REDUN(ag->ag_state)
246                                         && ag_cors->ag_mask==ag->ag_mask<<1) {
247                                         if (ag_cors->ag_dst_h == dst_h)
248                                             ag_cors->ag_state |= AGS_REDUN0;
249                                         else
250                                             ag_cors->ag_state |= AGS_REDUN1;
251                                     }
252                                     if (ag->ag_tag != ag_cors->ag_tag)
253                                             ag_cors->ag_tag = 0;
254                                     if (ag->ag_nhop != ag_cors->ag_nhop)
255                                             ag_cors->ag_nhop = 0;
256                                     break;
257                                 }
258                         }
259                 }
260
261                 /* That route has either been output or suppressed */
262                 ag_cors = ag->ag_cors;
263                 ag_del(ag);
264         }
265
266         CHECK_AG();
267 }
268
269
270 /* Try to aggregate a route with previous routes.
271  */
272 void
273 ag_check(naddr  dst,
274          naddr  mask,
275          naddr  gate,
276          naddr  nhop,
277          char   metric,
278          char   pref,
279          u_int  new_seqno,
280          u_short tag,
281          u_short state,
282          void (*out)(struct ag_info *)) /* output using this */
283 {
284         struct ag_info *ag, *nag, *ag_cors;
285         naddr xaddr;
286         int x;
287
288         dst = ntohl(dst);
289
290         /* Punt non-contiguous subnet masks.
291          *
292          * (X & -X) contains a single bit if and only if X is a power of 2.
293          * (X + (X & -X)) == 0 if and only if X is a power of 2.
294          */
295         if ((mask & -mask) + mask != 0) {
296                 struct ag_info nc_ag;
297
298                 nc_ag.ag_dst_h = dst;
299                 nc_ag.ag_mask = mask;
300                 nc_ag.ag_gate = gate;
301                 nc_ag.ag_nhop = nhop;
302                 nc_ag.ag_metric = metric;
303                 nc_ag.ag_pref = pref;
304                 nc_ag.ag_tag = tag;
305                 nc_ag.ag_state = state;
306                 nc_ag.ag_seqno = new_seqno;
307                 out(&nc_ag);
308                 return;
309         }
310
311         /* Search for the right slot in the aggregation table.
312          */
313         ag_cors = NULL;
314         ag = ag_corsest;
315         while (ag != NULL) {
316                 if (ag->ag_mask >= mask)
317                         break;
318
319                 /* Suppress old routes (i.e. combine with compatible routes
320                  * with coarser masks) as we look for the right slot in the
321                  * aggregation table for the new route.
322                  * A route to an address less than the current destination
323                  * will not be affected by the current route or any route
324                  * seen hereafter.  That means it is safe to suppress it.
325                  * This check keeps poor routes (e.g. with large hop counts)
326                  * from preventing suppression of finer routes.
327                  */
328                 if (ag_cors != NULL
329                     && ag->ag_dst_h < dst
330                     && (ag->ag_state & AGS_SUPPRESS)
331                     && ag_cors->ag_pref <= ag->ag_pref
332                     && (ag->ag_dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h
333                     && (ag_cors->ag_gate == ag->ag_gate
334                         || (ag->ag_state & AGS_FINE_GATE)
335                         || (ag_cors->ag_state & AGS_CORS_GATE))) {
336                         /*  If the suppressed target was redundant,
337                          * then mark the suppressor redundant.
338                          */
339                         if (AG_IS_REDUN(ag->ag_state)
340                             && ag_cors->ag_mask == ag->ag_mask<<1) {
341                                 if (ag_cors->ag_dst_h == dst)
342                                         ag_cors->ag_state |= AGS_REDUN0;
343                                 else
344                                         ag_cors->ag_state |= AGS_REDUN1;
345                         }
346                         if (ag->ag_tag != ag_cors->ag_tag)
347                                 ag_cors->ag_tag = 0;
348                         if (ag->ag_nhop != ag_cors->ag_nhop)
349                                 ag_cors->ag_nhop = 0;
350                         ag_del(ag);
351                         CHECK_AG();
352                 } else {
353                         ag_cors = ag;
354                 }
355                 ag = ag_cors->ag_fine;
356         }
357
358         /* If we find the even/odd twin of the new route, and if the
359          * masks and so forth are equal, we can aggregate them.
360          * We can probably promote one of the pair.
361          *
362          * Since the routes are encountered in lexical order,
363          * the new route must be odd.  However, the second or later
364          * times around this loop, it could be the even twin promoted
365          * from the even/odd pair of twins of the finer route.
366          */
367         while (ag != NULL
368                && ag->ag_mask == mask
369                && ((ag->ag_dst_h ^ dst) & (mask<<1)) == 0) {
370
371                 /* Here we know the target route and the route in the current
372                  * slot have the same netmasks and differ by at most the
373                  * last bit.  They are either for the same destination, or
374                  * for an even/odd pair of destinations.
375                  */
376                 if (ag->ag_dst_h == dst) {
377                         /* We have two routes to the same destination.
378                          * Routes are encountered in lexical order, so a
379                          * route is never promoted until the parent route is
380                          * already present.  So we know that the new route is
381                          * a promoted (or aggregated) pair and the route
382                          * already in the slot is the explicit route.
383                          *
384                          * Prefer the best route if their metrics differ,
385                          * or the aggregated one if not, following a sort
386                          * of longest-match rule.
387                          */
388                         if (pref <= ag->ag_pref) {
389                                 ag->ag_gate = gate;
390                                 ag->ag_nhop = nhop;
391                                 ag->ag_tag = tag;
392                                 ag->ag_metric = metric;
393                                 ag->ag_pref = pref;
394                                 if (ag->ag_seqno < new_seqno)
395                                         ag->ag_seqno = new_seqno;
396                                 x = ag->ag_state;
397                                 ag->ag_state = state;
398                                 state = x;
399                         }
400
401                         /* Some bits are set if they are set on either route,
402                          * except when the route is for an interface.
403                          */
404                         if (!(ag->ag_state & AGS_IF))
405                                 ag->ag_state |= (state & (AGS_AGGREGATE_EITHER
406                                                         | AGS_REDUN0
407                                                         | AGS_REDUN1));
408                         return;
409                 }
410
411                 /* If one of the routes can be promoted and the other can
412                  * be suppressed, it may be possible to combine them or
413                  * worthwhile to promote one.
414                  *
415                  * Any route that can be promoted is always
416                  * marked to be eligible to be suppressed.
417                  */
418                 if (!((state & AGS_AGGREGATE)
419                       && (ag->ag_state & AGS_SUPPRESS))
420                     && !((ag->ag_state & AGS_AGGREGATE)
421                          && (state & AGS_SUPPRESS)))
422                         break;
423
424                 /* A pair of even/odd twin routes can be combined
425                  * if either is redundant, or if they are via the
426                  * same gateway and have the same metric.
427                  */
428                 if (AG_IS_REDUN(ag->ag_state)
429                     || AG_IS_REDUN(state)
430                     || (ag->ag_gate == gate
431                         && ag->ag_pref == pref
432                         && (state & ag->ag_state & AGS_AGGREGATE) != 0)) {
433
434                         /* We have both the even and odd pairs.
435                          * Since the routes are encountered in order,
436                          * the route in the slot must be the even twin.
437                          *
438                          * Combine and promote (aggregate) the pair of routes.
439                          */
440                         if (new_seqno < ag->ag_seqno)
441                                 new_seqno = ag->ag_seqno;
442                         if (!AG_IS_REDUN(state))
443                                 state &= ~AGS_REDUN1;
444                         if (AG_IS_REDUN(ag->ag_state))
445                                 state |= AGS_REDUN0;
446                         else
447                                 state &= ~AGS_REDUN0;
448                         state |= (ag->ag_state & AGS_AGGREGATE_EITHER);
449                         if (ag->ag_tag != tag)
450                                 tag = 0;
451                         if (ag->ag_nhop != nhop)
452                                 nhop = 0;
453
454                         /* Get rid of the even twin that was already
455                          * in the slot.
456                          */
457                         ag_del(ag);
458
459                 } else if (ag->ag_pref >= pref
460                            && (ag->ag_state & AGS_AGGREGATE)) {
461                         /* If we cannot combine the pair, maybe the route
462                          * with the worse metric can be promoted.
463                          *
464                          * Promote the old, even twin, by giving its slot
465                          * in the table to the new, odd twin.
466                          */
467                         ag->ag_dst_h = dst;
468
469                         xaddr = ag->ag_gate;
470                         ag->ag_gate = gate;
471                         gate = xaddr;
472
473                         xaddr = ag->ag_nhop;
474                         ag->ag_nhop = nhop;
475                         nhop = xaddr;
476
477                         x = ag->ag_tag;
478                         ag->ag_tag = tag;
479                         tag = x;
480
481                         /* The promoted route is even-redundant only if the
482                          * even twin was fully redundant.  It is not
483                          * odd-redundant because the odd-twin will still be
484                          * in the table.
485                          */
486                         x = ag->ag_state;
487                         if (!AG_IS_REDUN(x))
488                                 x &= ~AGS_REDUN0;
489                         x &= ~AGS_REDUN1;
490                         ag->ag_state = state;
491                         state = x;
492
493                         x = ag->ag_metric;
494                         ag->ag_metric = metric;
495                         metric = x;
496
497                         x = ag->ag_pref;
498                         ag->ag_pref = pref;
499                         pref = x;
500
501                         /* take the newest sequence number */
502                         if (new_seqno <= ag->ag_seqno)
503                                 new_seqno = ag->ag_seqno;
504                         else
505                                 ag->ag_seqno = new_seqno;
506
507                 } else {
508                         if (!(state & AGS_AGGREGATE))
509                                 break;  /* cannot promote either twin */
510
511                         /* Promote the new, odd twin by shaving its
512                          * mask and address.
513                          * The promoted route is odd-redundant only if the
514                          * odd twin was fully redundant.  It is not
515                          * even-redundant because the even twin is still in
516                          * the table.
517                          */
518                         if (!AG_IS_REDUN(state))
519                                 state &= ~AGS_REDUN1;
520                         state &= ~AGS_REDUN0;
521                         if (new_seqno < ag->ag_seqno)
522                                 new_seqno = ag->ag_seqno;
523                         else
524                                 ag->ag_seqno = new_seqno;
525                 }
526
527                 mask <<= 1;
528                 dst &= mask;
529
530                 if (ag_cors == NULL) {
531                         ag = ag_corsest;
532                         break;
533                 }
534                 ag = ag_cors;
535                 ag_cors = ag->ag_cors;
536         }
537
538         /* When we can no longer promote and combine routes,
539          * flush the old route in the target slot.  Also flush
540          * any finer routes that we know will never be aggregated by
541          * the new route.
542          *
543          * In case we moved toward coarser masks,
544          * get back where we belong
545          */
546         if (ag != NULL
547             && ag->ag_mask < mask) {
548                 ag_cors = ag;
549                 ag = ag->ag_fine;
550         }
551
552         /* Empty the target slot
553          */
554         if (ag != NULL && ag->ag_mask == mask) {
555                 ag_flush(ag->ag_dst_h, ag->ag_mask, out);
556                 ag = (ag_cors == NULL) ? ag_corsest : ag_cors->ag_fine;
557         }
558
559 #ifdef DEBUG_AG
560         (void)fflush(stderr);
561         if (ag == NULL && ag_cors != ag_finest)
562                 abort();
563         if (ag_cors == NULL && ag != ag_corsest)
564                 abort();
565         if (ag != NULL && ag->ag_cors != ag_cors)
566                 abort();
567         if (ag_cors != NULL && ag_cors->ag_fine != ag)
568                 abort();
569         CHECK_AG();
570 #endif
571
572         /* Save the new route on the end of the table.
573          */
574         nag = ag_avail;
575         ag_avail = nag->ag_fine;
576
577         nag->ag_dst_h = dst;
578         nag->ag_mask = mask;
579         nag->ag_gate = gate;
580         nag->ag_nhop = nhop;
581         nag->ag_metric = metric;
582         nag->ag_pref = pref;
583         nag->ag_tag = tag;
584         nag->ag_state = state;
585         nag->ag_seqno = new_seqno;
586
587         nag->ag_fine = ag;
588         if (ag != NULL)
589                 ag->ag_cors = nag;
590         else
591                 ag_finest = nag;
592         nag->ag_cors = ag_cors;
593         if (ag_cors == NULL)
594                 ag_corsest = nag;
595         else
596                 ag_cors->ag_fine = nag;
597         CHECK_AG();
598 }
599
600 static const char *
601 rtm_type_name(u_char type)
602 {
603         static const char * const rtm_types[] = {
604                 "RTM_ADD",
605                 "RTM_DELETE",
606                 "RTM_CHANGE",
607                 "RTM_GET",
608                 "RTM_LOSING",
609                 "RTM_REDIRECT",
610                 "RTM_MISS",
611                 "RTM_LOCK",
612                 "RTM_OLDADD",
613                 "RTM_OLDDEL",
614                 "RTM_RESOLVE",
615                 "RTM_NEWADDR",
616                 "RTM_DELADDR",
617 #ifdef RTM_OIFINFO
618                 "RTM_OIFINFO",
619 #endif
620                 "RTM_IFINFO",
621                 "RTM_NEWMADDR",
622                 "RTM_DELMADDR"
623         };
624 #define NEW_RTM_PAT "RTM type %#x"
625         static char name0[sizeof(NEW_RTM_PAT)+2];
626
627
628         if (type > sizeof(rtm_types)/sizeof(rtm_types[0])
629             || type == 0) {
630                 snprintf(name0, sizeof(name0), NEW_RTM_PAT, type);
631                 return name0;
632         } else {
633                 return rtm_types[type-1];
634         }
635 #undef NEW_RTM_PAT
636 }
637
638
639 /* Trim a mask in a sockaddr
640  *      Produce a length of 0 for an address of 0.
641  *      Otherwise produce the index of the first zero byte.
642  */
643 void
644 #ifdef _HAVE_SIN_LEN
645 masktrim(struct sockaddr_in *ap)
646 #else
647 masktrim(struct sockaddr_in_new *ap)
648 #endif
649 {
650         char *cp;
651
652         if (ap->sin_addr.s_addr == 0) {
653                 ap->sin_len = 0;
654                 return;
655         }
656         cp = (char *)(&ap->sin_addr.s_addr+1);
657         while (*--cp == 0)
658                 continue;
659         ap->sin_len = cp - (char*)ap + 1;
660 }
661
662
663 /* Tell the kernel to add, delete or change a route
664  */
665 static void
666 rtioctl(int action,                     /* RTM_DELETE, etc */
667         naddr dst,
668         naddr gate,
669         naddr mask,
670         int metric,
671         int flags)
672 {
673         struct {
674                 struct rt_msghdr w_rtm;
675                 struct sockaddr_in w_dst;
676                 struct sockaddr_in w_gate;
677 #ifdef _HAVE_SA_LEN
678                 struct sockaddr_in w_mask;
679 #else
680                 struct sockaddr_in_new w_mask;
681 #endif
682         } w;
683         long cc;
684 #   define PAT " %-10s %s metric=%d flags=%#x"
685 #   define ARGS rtm_type_name(action), rtname(dst,mask,gate), metric, flags
686
687 again:
688         memset(&w, 0, sizeof(w));
689         w.w_rtm.rtm_msglen = sizeof(w);
690         w.w_rtm.rtm_version = RTM_VERSION;
691         w.w_rtm.rtm_type = action;
692         w.w_rtm.rtm_flags = flags;
693         w.w_rtm.rtm_seq = ++rt_sock_seqno;
694         w.w_rtm.rtm_addrs = RTA_DST|RTA_GATEWAY;
695         if (metric != 0 || action == RTM_CHANGE) {
696                 w.w_rtm.rtm_rmx.rmx_hopcount = metric;
697                 w.w_rtm.rtm_inits |= RTV_HOPCOUNT;
698         }
699         w.w_dst.sin_family = AF_INET;
700         w.w_dst.sin_addr.s_addr = dst;
701         w.w_gate.sin_family = AF_INET;
702         w.w_gate.sin_addr.s_addr = gate;
703 #ifdef _HAVE_SA_LEN
704         w.w_dst.sin_len = sizeof(w.w_dst);
705         w.w_gate.sin_len = sizeof(w.w_gate);
706 #endif
707         if (mask == HOST_MASK) {
708                 w.w_rtm.rtm_flags |= RTF_HOST;
709                 w.w_rtm.rtm_msglen -= sizeof(w.w_mask);
710         } else {
711                 w.w_rtm.rtm_addrs |= RTA_NETMASK;
712                 w.w_mask.sin_addr.s_addr = htonl(mask);
713 #ifdef _HAVE_SA_LEN
714                 masktrim(&w.w_mask);
715                 if (w.w_mask.sin_len == 0)
716                         w.w_mask.sin_len = sizeof(long);
717                 w.w_rtm.rtm_msglen -= (sizeof(w.w_mask) - w.w_mask.sin_len);
718 #endif
719         }
720
721 #ifndef NO_INSTALL
722         cc = write(rt_sock, &w, w.w_rtm.rtm_msglen);
723         if (cc < 0) {
724                 if (errno == ESRCH
725                     && (action == RTM_CHANGE || action == RTM_DELETE)) {
726                         trace_act("route disappeared before" PAT, ARGS);
727                         if (action == RTM_CHANGE) {
728                                 action = RTM_ADD;
729                                 goto again;
730                         }
731                         return;
732                 }
733                 msglog("write(rt_sock)" PAT ": %s", ARGS, strerror(errno));
734                 return;
735         } else if (cc != w.w_rtm.rtm_msglen) {
736                 msglog("write(rt_sock) wrote %ld instead of %d for" PAT,
737                        cc, w.w_rtm.rtm_msglen, ARGS);
738                 return;
739         }
740 #endif
741         if (TRACEKERNEL)
742                 trace_misc("write kernel" PAT, ARGS);
743 #undef PAT
744 #undef ARGS
745 }
746
747
748 #define KHASH_SIZE 71                   /* should be prime */
749 #define KHASH(a,m) khash_bins[((a) ^ (m)) % KHASH_SIZE]
750 static struct khash {
751         struct khash *k_next;
752         naddr   k_dst;
753         naddr   k_mask;
754         naddr   k_gate;
755         short   k_metric;
756         u_short k_state;
757 #define     KS_NEW      0x001
758 #define     KS_DELETE   0x002           /* need to delete the route */
759 #define     KS_ADD      0x004           /* add to the kernel */
760 #define     KS_CHANGE   0x008           /* tell kernel to change the route */
761 #define     KS_DEL_ADD  0x010           /* delete & add to change the kernel */
762 #define     KS_STATIC   0x020           /* Static flag in kernel */
763 #define     KS_GATEWAY  0x040           /* G flag in kernel */
764 #define     KS_DYNAMIC  0x080           /* result of redirect */
765 #define     KS_DELETED  0x100           /* already deleted from kernel */
766 #define     KS_CHECK    0x200
767         time_t  k_keep;
768 #define     K_KEEP_LIM  30
769         time_t  k_redirect_time;        /* when redirected route 1st seen */
770 } *khash_bins[KHASH_SIZE];
771
772
773 static struct khash*
774 kern_find(naddr dst, naddr mask, struct khash ***ppk)
775 {
776         struct khash *k, **pk;
777
778         for (pk = &KHASH(dst,mask); (k = *pk) != NULL; pk = &k->k_next) {
779                 if (k->k_dst == dst && k->k_mask == mask)
780                         break;
781         }
782         if (ppk != NULL)
783                 *ppk = pk;
784         return k;
785 }
786
787
788 static struct khash*
789 kern_add(naddr dst, naddr mask)
790 {
791         struct khash *k, **pk;
792
793         k = kern_find(dst, mask, &pk);
794         if (k != NULL)
795                 return k;
796
797         k = (struct khash *)rtmalloc(sizeof(*k), "kern_add");
798
799         memset(k, 0, sizeof(*k));
800         k->k_dst = dst;
801         k->k_mask = mask;
802         k->k_state = KS_NEW;
803         k->k_keep = now.tv_sec;
804         *pk = k;
805
806         return k;
807 }
808
809
810 /* If a kernel route has a non-zero metric, check that it is still in the
811  *      daemon table, and not deleted by interfaces coming and going.
812  */
813 static void
814 kern_check_static(struct khash *k,
815                   struct interface *ifp)
816 {
817         struct rt_entry *rt;
818         struct rt_spare new;
819
820         if (k->k_metric == 0)
821                 return;
822
823         memset(&new, 0, sizeof(new));
824         new.rts_ifp = ifp;
825         new.rts_gate = k->k_gate;
826         new.rts_router = (ifp != NULL) ? ifp->int_addr : loopaddr;
827         new.rts_metric = k->k_metric;
828         new.rts_time = now.tv_sec;
829
830         rt = rtget(k->k_dst, k->k_mask);
831         if (rt != NULL) {
832                 if (!(rt->rt_state & RS_STATIC))
833                         rtchange(rt, rt->rt_state | RS_STATIC, &new, 0);
834         } else {
835                 rtadd(k->k_dst, k->k_mask, RS_STATIC, &new);
836         }
837 }
838
839
840 /* operate on a kernel entry
841  */
842 static void
843 kern_ioctl(struct khash *k,
844            int action,                  /* RTM_DELETE, etc */
845            int flags)
846
847 {
848         switch (action) {
849         case RTM_DELETE:
850                 k->k_state &= ~KS_DYNAMIC;
851                 if (k->k_state & KS_DELETED)
852                         return;
853                 k->k_state |= KS_DELETED;
854                 break;
855         case RTM_ADD:
856                 k->k_state &= ~KS_DELETED;
857                 break;
858         case RTM_CHANGE:
859                 if (k->k_state & KS_DELETED) {
860                         action = RTM_ADD;
861                         k->k_state &= ~KS_DELETED;
862                 }
863                 break;
864         }
865
866         rtioctl(action, k->k_dst, k->k_gate, k->k_mask, k->k_metric, flags);
867 }
868
869
870 /* add a route the kernel told us
871  */
872 static void
873 rtm_add(struct rt_msghdr *rtm,
874         struct rt_addrinfo *info,
875         time_t keep)
876 {
877         struct khash *k;
878         struct interface *ifp;
879         naddr mask;
880
881
882         if (rtm->rtm_flags & RTF_HOST) {
883                 mask = HOST_MASK;
884         } else if (INFO_MASK(info) != 0) {
885                 mask = ntohl(S_ADDR(INFO_MASK(info)));
886         } else {
887                 msglog("ignore %s without mask", rtm_type_name(rtm->rtm_type));
888                 return;
889         }
890
891         k = kern_add(S_ADDR(INFO_DST(info)), mask);
892         if (k->k_state & KS_NEW)
893                 k->k_keep = now.tv_sec+keep;
894         if (INFO_GATE(info) == 0) {
895                 trace_act("note %s without gateway",
896                           rtm_type_name(rtm->rtm_type));
897                 k->k_metric = HOPCNT_INFINITY;
898         } else if (INFO_GATE(info)->sa_family != AF_INET) {
899                 trace_act("note %s with gateway AF=%d",
900                           rtm_type_name(rtm->rtm_type),
901                           INFO_GATE(info)->sa_family);
902                 k->k_metric = HOPCNT_INFINITY;
903         } else {
904                 k->k_gate = S_ADDR(INFO_GATE(info));
905                 k->k_metric = rtm->rtm_rmx.rmx_hopcount;
906                 if (k->k_metric < 0)
907                         k->k_metric = 0;
908                 else if (k->k_metric > HOPCNT_INFINITY-1)
909                         k->k_metric = HOPCNT_INFINITY-1;
910         }
911         k->k_state &= ~(KS_DELETE | KS_ADD | KS_CHANGE | KS_DEL_ADD
912                         | KS_DELETED | KS_GATEWAY | KS_STATIC
913                         | KS_NEW | KS_CHECK);
914         if (rtm->rtm_flags & RTF_GATEWAY)
915                 k->k_state |= KS_GATEWAY;
916         if (rtm->rtm_flags & RTF_STATIC)
917                 k->k_state |= KS_STATIC;
918
919         if (0 != (rtm->rtm_flags & (RTF_DYNAMIC | RTF_MODIFIED))) {
920                 if (INFO_AUTHOR(info) != 0
921                     && INFO_AUTHOR(info)->sa_family == AF_INET)
922                         ifp = iflookup(S_ADDR(INFO_AUTHOR(info)));
923                 else
924                         ifp = NULL;
925                 if (supplier
926                     && (ifp == NULL || !(ifp->int_state & IS_REDIRECT_OK))) {
927                         /* Routers are not supposed to listen to redirects,
928                          * so delete it if it came via an unknown interface
929                          * or the interface does not have special permission.
930                          */
931                         k->k_state &= ~KS_DYNAMIC;
932                         k->k_state |= KS_DELETE;
933                         LIM_SEC(need_kern, 0);
934                         trace_act("mark for deletion redirected %s --> %s"
935                                   " via %s",
936                                   addrname(k->k_dst, k->k_mask, 0),
937                                   naddr_ntoa(k->k_gate),
938                                   ifp ? ifp->int_name : "unknown interface");
939                 } else {
940                         k->k_state |= KS_DYNAMIC;
941                         k->k_redirect_time = now.tv_sec;
942                         trace_act("accept redirected %s --> %s via %s",
943                                   addrname(k->k_dst, k->k_mask, 0),
944                                   naddr_ntoa(k->k_gate),
945                                   ifp ? ifp->int_name : "unknown interface");
946                 }
947                 return;
948         }
949
950         /* If it is not a static route, quit until the next comparison
951          * between the kernel and daemon tables, when it will be deleted.
952          */
953         if (!(k->k_state & KS_STATIC)) {
954                 k->k_state |= KS_DELETE;
955                 LIM_SEC(need_kern, k->k_keep);
956                 return;
957         }
958
959         /* Put static routes with real metrics into the daemon table so
960          * they can be advertised.
961          *
962          * Find the interface toward the gateway.
963          */
964         ifp = iflookup(k->k_gate);
965         if (ifp == NULL)
966                 msglog("static route %s --> %s impossibly lacks ifp",
967                        addrname(S_ADDR(INFO_DST(info)), mask, 0),
968                        naddr_ntoa(k->k_gate));
969
970         kern_check_static(k, ifp);
971 }
972
973
974 /* deal with packet loss
975  */
976 static void
977 rtm_lose(struct rt_msghdr *rtm,
978          struct rt_addrinfo *info)
979 {
980         if (INFO_GATE(info) == 0
981             || INFO_GATE(info)->sa_family != AF_INET) {
982                 trace_act("ignore %s without gateway",
983                           rtm_type_name(rtm->rtm_type));
984                 return;
985         }
986
987         if (rdisc_ok)
988                 rdisc_age(S_ADDR(INFO_GATE(info)));
989         age(S_ADDR(INFO_GATE(info)));
990 }
991
992
993 /* Make the gateway slot of an info structure point to something
994  * useful.  If it is not already useful, but it specifies an interface,
995  * then fill in the sockaddr_in provided and point it there.
996  */
997 static int
998 get_info_gate(struct sockaddr **sap,
999               struct sockaddr_in *rsin)
1000 {
1001         struct sockaddr_dl *sdl = (struct sockaddr_dl *)*sap;
1002         struct interface *ifp;
1003
1004         if (sdl == NULL)
1005                 return 0;
1006         if ((sdl)->sdl_family == AF_INET)
1007                 return 1;
1008         if ((sdl)->sdl_family != AF_LINK)
1009                 return 0;
1010
1011         ifp = ifwithindex(sdl->sdl_index, 1);
1012         if (ifp == NULL)
1013                 return 0;
1014
1015         rsin->sin_addr.s_addr = ifp->int_addr;
1016 #ifdef _HAVE_SA_LEN
1017         rsin->sin_len = sizeof(*rsin);
1018 #endif
1019         rsin->sin_family = AF_INET;
1020         *sap = (struct sockaddr*)rsin;
1021
1022         return 1;
1023 }
1024
1025
1026 /* Clean the kernel table by copying it to the daemon image.
1027  * Eventually the daemon will delete any extra routes.
1028  */
1029 void
1030 flush_kern(void)
1031 {
1032         static char *sysctl_buf;
1033         static size_t sysctl_buf_size = 0;
1034         size_t needed;
1035         int mib[6];
1036         char *next, *lim;
1037         struct rt_msghdr *rtm;
1038         struct sockaddr_in gate_sin;
1039         struct rt_addrinfo info;
1040         int i;
1041         struct khash *k;
1042
1043
1044         for (i = 0; i < KHASH_SIZE; i++) {
1045                 for (k = khash_bins[i]; k != NULL; k = k->k_next) {
1046                         k->k_state |= KS_CHECK;
1047                 }
1048         }
1049
1050         mib[0] = CTL_NET;
1051         mib[1] = PF_ROUTE;
1052         mib[2] = 0;             /* protocol */
1053         mib[3] = 0;             /* wildcard address family */
1054         mib[4] = NET_RT_DUMP;
1055         mib[5] = 0;             /* no flags */
1056         for (;;) {
1057                 if ((needed = sysctl_buf_size) != 0) {
1058                         if (sysctl(mib, 6, sysctl_buf,&needed, 0, 0) >= 0)
1059                                 break;
1060                         if (errno != ENOMEM && errno != EFAULT)
1061                                 BADERR(1,"flush_kern: sysctl(RT_DUMP)");
1062                         free(sysctl_buf);
1063                         needed = 0;
1064                 }
1065                 if (sysctl(mib, 6, 0, &needed, 0, 0) < 0)
1066                         BADERR(1,"flush_kern: sysctl(RT_DUMP) estimate");
1067                 /* Kludge around the habit of some systems, such as
1068                  * BSD/OS 3.1, to not admit how many routes are in the
1069                  * kernel, or at least to be quite wrong.
1070                  */
1071                 needed += 50*(sizeof(*rtm)+5*sizeof(struct sockaddr));
1072                 sysctl_buf = rtmalloc(sysctl_buf_size = needed,
1073                                       "flush_kern sysctl(RT_DUMP)");
1074         }
1075
1076         lim = sysctl_buf + needed;
1077         for (next = sysctl_buf; next < lim; next += rtm->rtm_msglen) {
1078                 rtm = (struct rt_msghdr *)next;
1079                 if (rtm->rtm_msglen == 0) {
1080                         msglog("zero length kernel route at "
1081                                " %#lx in buffer %#lx before %#lx",
1082                                (u_long)rtm, (u_long)sysctl_buf, (u_long)lim);
1083                         break;
1084                 }
1085
1086                 rt_xaddrs(&info,
1087                           (struct sockaddr *)(rtm+1),
1088                           (struct sockaddr *)(next + rtm->rtm_msglen),
1089                           rtm->rtm_addrs);
1090
1091                 if (INFO_DST(&info) == 0
1092                     || INFO_DST(&info)->sa_family != AF_INET)
1093                         continue;
1094
1095 #if defined (RTF_LLINFO)                
1096                 /* ignore ARP table entries on systems with a merged route
1097                  * and ARP table.
1098                  */
1099                 if (rtm->rtm_flags & RTF_LLINFO)
1100                         continue;
1101 #endif
1102 #if defined(RTF_WASCLONED) && defined(__FreeBSD__)
1103                 /* ignore cloned routes
1104                  */
1105                 if (rtm->rtm_flags & RTF_WASCLONED)
1106                         continue;
1107 #endif
1108
1109                 /* ignore multicast addresses
1110                  */
1111                 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info)))))
1112                         continue;
1113
1114                 if (!get_info_gate(&INFO_GATE(&info), &gate_sin))
1115                         continue;
1116
1117                 /* Note static routes and interface routes, and also
1118                  * preload the image of the kernel table so that
1119                  * we can later clean it, as well as avoid making
1120                  * unneeded changes.  Keep the old kernel routes for a
1121                  * few seconds to allow a RIP or router-discovery
1122                  * response to be heard.
1123                  */
1124                 rtm_add(rtm,&info,MIN_WAITTIME);
1125         }
1126
1127         for (i = 0; i < KHASH_SIZE; i++) {
1128                 for (k = khash_bins[i]; k != NULL; k = k->k_next) {
1129                         if (k->k_state & KS_CHECK) {
1130                                 msglog("%s --> %s disappeared from kernel",
1131                                        addrname(k->k_dst, k->k_mask, 0),
1132                                        naddr_ntoa(k->k_gate));
1133                                 del_static(k->k_dst, k->k_mask, k->k_gate, 1);
1134                         }
1135                 }
1136         }
1137 }
1138
1139
1140 /* Listen to announcements from the kernel
1141  */
1142 void
1143 read_rt(void)
1144 {
1145         long cc;
1146         struct interface *ifp;
1147         struct sockaddr_in gate_sin;
1148         naddr mask, gate;
1149         union {
1150                 struct {
1151                         struct rt_msghdr rtm;
1152                         struct sockaddr addrs[RTAX_MAX];
1153                 } r;
1154                 struct if_msghdr ifm;
1155         } m;
1156         char str[100], *strp;
1157         struct rt_addrinfo info;
1158
1159
1160         for (;;) {
1161                 cc = read(rt_sock, &m, sizeof(m));
1162                 if (cc <= 0) {
1163                         if (cc < 0 && errno != EWOULDBLOCK)
1164                                 LOGERR("read(rt_sock)");
1165                         return;
1166                 }
1167
1168                 if (m.r.rtm.rtm_version != RTM_VERSION) {
1169                         msglog("bogus routing message version %d",
1170                                m.r.rtm.rtm_version);
1171                         continue;
1172                 }
1173
1174                 /* Ignore our own results.
1175                  */
1176                 if (m.r.rtm.rtm_type <= RTM_CHANGE
1177                     && m.r.rtm.rtm_pid == mypid) {
1178                         static int complained = 0;
1179                         if (!complained) {
1180                                 msglog("receiving our own change messages");
1181                                 complained = 1;
1182                         }
1183                         continue;
1184                 }
1185
1186                 if (m.r.rtm.rtm_type == RTM_IFINFO
1187                     || m.r.rtm.rtm_type == RTM_NEWADDR
1188                     || m.r.rtm.rtm_type == RTM_DELADDR) {
1189                         ifp = ifwithindex(m.ifm.ifm_index,
1190                                           m.r.rtm.rtm_type != RTM_DELADDR);
1191                         if (ifp == NULL)
1192                                 trace_act("note %s with flags %#x"
1193                                           " for unknown interface index #%d",
1194                                           rtm_type_name(m.r.rtm.rtm_type),
1195                                           m.ifm.ifm_flags,
1196                                           m.ifm.ifm_index);
1197                         else
1198                                 trace_act("note %s with flags %#x for %s",
1199                                           rtm_type_name(m.r.rtm.rtm_type),
1200                                           m.ifm.ifm_flags,
1201                                           ifp->int_name);
1202
1203                         /* After being informed of a change to an interface,
1204                          * check them all now if the check would otherwise
1205                          * be a long time from now, if the interface is
1206                          * not known, or if the interface has been turned
1207                          * off or on.
1208                          */
1209                         if (ifinit_timer.tv_sec-now.tv_sec>=CHECK_BAD_INTERVAL
1210                             || ifp == NULL
1211                             || ((ifp->int_if_flags ^ m.ifm.ifm_flags)
1212                                 & IFF_UP) != 0)
1213                                 ifinit_timer.tv_sec = now.tv_sec;
1214                         continue;
1215                 }
1216 #ifdef RTM_OIFINFO
1217                 if (m.r.rtm.rtm_type == RTM_OIFINFO)
1218                         continue;       /* ignore compat message */
1219 #endif
1220
1221                 strlcpy(str, rtm_type_name(m.r.rtm.rtm_type), sizeof(str));
1222                 strp = &str[strlen(str)];
1223                 if (m.r.rtm.rtm_type <= RTM_CHANGE)
1224                         strp += sprintf(strp," from pid %d",m.r.rtm.rtm_pid);
1225
1226                 /*
1227                  * Only messages that use the struct rt_msghdr format are
1228                  * allowed beyond this point.
1229                  */
1230                 if (m.r.rtm.rtm_type > RTM_RESOLVE) {
1231                         trace_act("ignore %s", str);
1232                         continue;
1233                 }
1234                 
1235                 rt_xaddrs(&info, m.r.addrs, &m.r.addrs[RTAX_MAX],
1236                           m.r.rtm.rtm_addrs);
1237
1238                 if (INFO_DST(&info) == 0) {
1239                         trace_act("ignore %s without dst", str);
1240                         continue;
1241                 }
1242
1243                 if (INFO_DST(&info)->sa_family != AF_INET) {
1244                         trace_act("ignore %s for AF %d", str,
1245                                   INFO_DST(&info)->sa_family);
1246                         continue;
1247                 }
1248
1249                 mask = ((INFO_MASK(&info) != 0)
1250                         ? ntohl(S_ADDR(INFO_MASK(&info)))
1251                         : (m.r.rtm.rtm_flags & RTF_HOST)
1252                         ? HOST_MASK
1253                         : std_mask(S_ADDR(INFO_DST(&info))));
1254
1255                 strp += sprintf(strp, ": %s",
1256                                 addrname(S_ADDR(INFO_DST(&info)), mask, 0));
1257
1258                 if (IN_MULTICAST(ntohl(S_ADDR(INFO_DST(&info))))) {
1259                         trace_act("ignore multicast %s", str);
1260                         continue;
1261                 }
1262
1263 #if defined(RTF_LLINFO) 
1264                 if (m.r.rtm.rtm_flags & RTF_LLINFO) {
1265                         trace_act("ignore ARP %s", str);
1266                         continue;
1267                 }
1268 #endif
1269                 
1270 #if defined(RTF_WASCLONED) && defined(__FreeBSD__)
1271                 if (m.r.rtm.rtm_flags & RTF_WASCLONED) {
1272                         trace_act("ignore cloned %s", str);
1273                         continue;
1274                 }
1275 #endif
1276
1277                 if (get_info_gate(&INFO_GATE(&info), &gate_sin)) {
1278                         gate = S_ADDR(INFO_GATE(&info));
1279                         strp += sprintf(strp, " --> %s", naddr_ntoa(gate));
1280                 } else {
1281                         gate = 0;
1282                 }
1283
1284                 if (INFO_AUTHOR(&info) != 0)
1285                         strp += sprintf(strp, " by authority of %s",
1286                                         saddr_ntoa(INFO_AUTHOR(&info)));
1287
1288                 switch (m.r.rtm.rtm_type) {
1289                 case RTM_ADD:
1290                 case RTM_CHANGE:
1291                 case RTM_REDIRECT:
1292                         if (m.r.rtm.rtm_errno != 0) {
1293                                 trace_act("ignore %s with \"%s\" error",
1294                                           str, strerror(m.r.rtm.rtm_errno));
1295                         } else {
1296                                 trace_act("%s", str);
1297                                 rtm_add(&m.r.rtm,&info,0);
1298                         }
1299                         break;
1300
1301                 case RTM_DELETE:
1302                         if (m.r.rtm.rtm_errno != 0
1303                             && m.r.rtm.rtm_errno != ESRCH) {
1304                                 trace_act("ignore %s with \"%s\" error",
1305                                           str, strerror(m.r.rtm.rtm_errno));
1306                         } else {
1307                                 trace_act("%s", str);
1308                                 del_static(S_ADDR(INFO_DST(&info)), mask,
1309                                            gate, 1);
1310                         }
1311                         break;
1312
1313                 case RTM_LOSING:
1314                         trace_act("%s", str);
1315                         rtm_lose(&m.r.rtm,&info);
1316                         break;
1317
1318                 default:
1319                         trace_act("ignore %s", str);
1320                         break;
1321                 }
1322         }
1323 }
1324
1325
1326 /* after aggregating, note routes that belong in the kernel
1327  */
1328 static void
1329 kern_out(struct ag_info *ag)
1330 {
1331         struct khash *k;
1332
1333
1334         /* Do not install bad routes if they are not already present.
1335          * This includes routes that had RS_NET_SYN for interfaces that
1336          * recently died.
1337          */
1338         if (ag->ag_metric == HOPCNT_INFINITY) {
1339                 k = kern_find(htonl(ag->ag_dst_h), ag->ag_mask, 0);
1340                 if (k == NULL)
1341                         return;
1342         } else {
1343                 k = kern_add(htonl(ag->ag_dst_h), ag->ag_mask);
1344         }
1345
1346         if (k->k_state & KS_NEW) {
1347                 /* will need to add new entry to the kernel table */
1348                 k->k_state = KS_ADD;
1349                 if (ag->ag_state & AGS_GATEWAY)
1350                         k->k_state |= KS_GATEWAY;
1351                 k->k_gate = ag->ag_gate;
1352                 k->k_metric = ag->ag_metric;
1353                 return;
1354         }
1355
1356         if (k->k_state & KS_STATIC)
1357                 return;
1358
1359         /* modify existing kernel entry if necessary */
1360         if (k->k_gate != ag->ag_gate
1361             || k->k_metric != ag->ag_metric) {
1362                 /* Must delete bad interface routes etc. to change them. */
1363                 if (k->k_metric == HOPCNT_INFINITY)
1364                         k->k_state |= KS_DEL_ADD;
1365                 k->k_gate = ag->ag_gate;
1366                 k->k_metric = ag->ag_metric;
1367                 k->k_state |= KS_CHANGE;
1368         }
1369
1370         /* If the daemon thinks the route should exist, forget
1371          * about any redirections.
1372          * If the daemon thinks the route should exist, eventually
1373          * override manual intervention by the operator.
1374          */
1375         if ((k->k_state & (KS_DYNAMIC | KS_DELETED)) != 0) {
1376                 k->k_state &= ~KS_DYNAMIC;
1377                 k->k_state |= (KS_ADD | KS_DEL_ADD);
1378         }
1379
1380         if ((k->k_state & KS_GATEWAY)
1381             && !(ag->ag_state & AGS_GATEWAY)) {
1382                 k->k_state &= ~KS_GATEWAY;
1383                 k->k_state |= (KS_ADD | KS_DEL_ADD);
1384         } else if (!(k->k_state & KS_GATEWAY)
1385                    && (ag->ag_state & AGS_GATEWAY)) {
1386                 k->k_state |= KS_GATEWAY;
1387                 k->k_state |= (KS_ADD | KS_DEL_ADD);
1388         }
1389
1390         /* Deleting-and-adding is necessary to change aspects of a route.
1391          * Just delete instead of deleting and then adding a bad route.
1392          * Otherwise, we want to keep the route in the kernel.
1393          */
1394         if (k->k_metric == HOPCNT_INFINITY
1395             && (k->k_state & KS_DEL_ADD))
1396                 k->k_state |= KS_DELETE;
1397         else
1398                 k->k_state &= ~KS_DELETE;
1399 #undef RT
1400 }
1401
1402
1403 /* ARGSUSED */
1404 static int
1405 walk_kern(struct radix_node *rn,
1406           struct walkarg *argp UNUSED)
1407 {
1408 #define RT ((struct rt_entry *)rn)
1409         char metric, pref;
1410         u_int ags = 0;
1411
1412
1413         /* Do not install synthetic routes */
1414         if (RT->rt_state & RS_NET_SYN)
1415                 return 0;
1416
1417         if (!(RT->rt_state & RS_IF)) {
1418                 /* This is an ordinary route, not for an interface.
1419                  */
1420
1421                 /* aggregate, ordinary good routes without regard to
1422                  * their metric
1423                  */
1424                 pref = 1;
1425                 ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_AGGREGATE);
1426
1427                 /* Do not install host routes directly to hosts, to avoid
1428                  * interfering with ARP entries in the kernel table.
1429                  */
1430                 if (RT_ISHOST(RT)
1431                     && ntohl(RT->rt_dst) == RT->rt_gate)
1432                         return 0;
1433
1434         } else {
1435                 /* This is an interface route.
1436                  * Do not install routes for "external" remote interfaces.
1437                  */
1438                 if (RT->rt_ifp != 0 && (RT->rt_ifp->int_state & IS_EXTERNAL))
1439                         return 0;
1440
1441                 /* Interfaces should override received routes.
1442                  */
1443                 pref = 0;
1444                 ags |= (AGS_IF | AGS_CORS_GATE);
1445
1446                 /* If it is not an interface, or an alias for an interface,
1447                  * it must be a "gateway."
1448                  *
1449                  * If it is a "remote" interface, it is also a "gateway" to
1450                  * the kernel if is not an alias.
1451                  */
1452                 if (RT->rt_ifp == 0
1453                     || (RT->rt_ifp->int_state & IS_REMOTE))
1454                         ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_AGGREGATE);
1455         }
1456
1457         /* If RIP is off and IRDP is on, let the route to the discovered
1458          * route suppress any RIP routes.  Eventually the RIP routes
1459          * will time-out and be deleted.  This reaches the steady-state
1460          * quicker.
1461          */
1462         if ((RT->rt_state & RS_RDISC) && rip_sock < 0)
1463                 ags |= AGS_CORS_GATE;
1464
1465         metric = RT->rt_metric;
1466         if (metric == HOPCNT_INFINITY) {
1467                 /* if the route is dead, so try hard to aggregate. */
1468                 pref = HOPCNT_INFINITY;
1469                 ags |= (AGS_FINE_GATE | AGS_SUPPRESS);
1470                 ags &= ~(AGS_IF | AGS_CORS_GATE);
1471         }
1472
1473         ag_check(RT->rt_dst, RT->rt_mask, RT->rt_gate, 0,
1474                  metric,pref, 0, 0, ags, kern_out);
1475         return 0;
1476 #undef RT
1477 }
1478
1479
1480 /* Update the kernel table to match the daemon table.
1481  */
1482 static void
1483 fix_kern(void)
1484 {
1485         int i;
1486         struct khash *k, **pk;
1487
1488
1489         need_kern = age_timer;
1490
1491         /* Walk daemon table, updating the copy of the kernel table.
1492          */
1493         (void)rn_walktree(rhead, walk_kern, 0);
1494         ag_flush(0,0,kern_out);
1495
1496         for (i = 0; i < KHASH_SIZE; i++) {
1497                 for (pk = &khash_bins[i]; (k = *pk) != NULL; ) {
1498                         /* Do not touch static routes */
1499                         if (k->k_state & KS_STATIC) {
1500                                 kern_check_static(k,0);
1501                                 pk = &k->k_next;
1502                                 continue;
1503                         }
1504
1505                         /* check hold on routes deleted by the operator */
1506                         if (k->k_keep > now.tv_sec) {
1507                                 /* ensure we check when the hold is over */
1508                                 LIM_SEC(need_kern, k->k_keep);
1509                                 /* mark for the next cycle */
1510                                 k->k_state |= KS_DELETE;
1511                                 pk = &k->k_next;
1512                                 continue;
1513                         }
1514
1515                         if ((k->k_state & KS_DELETE)
1516                             && !(k->k_state & KS_DYNAMIC)) {
1517                                 kern_ioctl(k, RTM_DELETE, 0);
1518                                 *pk = k->k_next;
1519                                 free(k);
1520                                 continue;
1521                         }
1522
1523                         if (k->k_state & KS_DEL_ADD)
1524                                 kern_ioctl(k, RTM_DELETE, 0);
1525
1526                         if (k->k_state & KS_ADD) {
1527                                 kern_ioctl(k, RTM_ADD,
1528                                            ((0 != (k->k_state & (KS_GATEWAY
1529                                                         | KS_DYNAMIC)))
1530                                             ? RTF_GATEWAY : 0));
1531                         } else if (k->k_state & KS_CHANGE) {
1532                                 kern_ioctl(k,  RTM_CHANGE,
1533                                            ((0 != (k->k_state & (KS_GATEWAY
1534                                                         | KS_DYNAMIC)))
1535                                             ? RTF_GATEWAY : 0));
1536                         }
1537                         k->k_state &= ~(KS_ADD|KS_CHANGE|KS_DEL_ADD);
1538
1539                         /* Mark this route to be deleted in the next cycle.
1540                          * This deletes routes that disappear from the
1541                          * daemon table, since the normal aging code
1542                          * will clear the bit for routes that have not
1543                          * disappeared from the daemon table.
1544                          */
1545                         k->k_state |= KS_DELETE;
1546                         pk = &k->k_next;
1547                 }
1548         }
1549 }
1550
1551
1552 /* Delete a static route in the image of the kernel table.
1553  */
1554 void
1555 del_static(naddr dst,
1556            naddr mask,
1557            naddr gate,
1558            int gone)
1559 {
1560         struct khash *k;
1561         struct rt_entry *rt;
1562
1563         /* Just mark it in the table to be deleted next time the kernel
1564          * table is updated.
1565          * If it has already been deleted, mark it as such, and set its
1566          * keep-timer so that it will not be deleted again for a while.
1567          * This lets the operator delete a route added by the daemon
1568          * and add a replacement.
1569          */
1570         k = kern_find(dst, mask, 0);
1571         if (k != NULL && (gate == 0 || k->k_gate == gate)) {
1572                 k->k_state &= ~(KS_STATIC | KS_DYNAMIC | KS_CHECK);
1573                 k->k_state |= KS_DELETE;
1574                 if (gone) {
1575                         k->k_state |= KS_DELETED;
1576                         k->k_keep = now.tv_sec + K_KEEP_LIM;
1577                 }
1578         }
1579
1580         rt = rtget(dst, mask);
1581         if (rt != NULL && (rt->rt_state & RS_STATIC))
1582                 rtbad(rt);
1583 }
1584
1585
1586 /* Delete all routes generated from ICMP Redirects that use a given gateway,
1587  * as well as old redirected routes.
1588  */
1589 void
1590 del_redirects(naddr bad_gate,
1591               time_t old)
1592 {
1593         int i;
1594         struct khash *k;
1595
1596
1597         for (i = 0; i < KHASH_SIZE; i++) {
1598                 for (k = khash_bins[i]; k != NULL; k = k->k_next) {
1599                         if (!(k->k_state & KS_DYNAMIC)
1600                             || (k->k_state & KS_STATIC))
1601                                 continue;
1602
1603                         if (k->k_gate != bad_gate
1604                             && k->k_redirect_time > old
1605                             && !supplier)
1606                                 continue;
1607
1608                         k->k_state |= KS_DELETE;
1609                         k->k_state &= ~KS_DYNAMIC;
1610                         need_kern.tv_sec = now.tv_sec;
1611                         trace_act("mark redirected %s --> %s for deletion",
1612                                   addrname(k->k_dst, k->k_mask, 0),
1613                                   naddr_ntoa(k->k_gate));
1614                 }
1615         }
1616 }
1617
1618
1619 /* Start the daemon tables.
1620  */
1621 extern int max_keylen;
1622
1623 void
1624 rtinit(void)
1625 {
1626         int i;
1627         struct ag_info *ag;
1628
1629         /* Initialize the radix trees */
1630         max_keylen = sizeof(struct sockaddr_in);
1631         rn_init();
1632         rn_inithead(&rhead, 32);
1633
1634         /* mark all of the slots in the table free */
1635         ag_avail = ag_slots;
1636         for (ag = ag_slots, i = 1; i < NUM_AG_SLOTS; i++) {
1637                 ag->ag_fine = ag+1;
1638                 ag++;
1639         }
1640 }
1641
1642
1643 #ifdef _HAVE_SIN_LEN
1644 static struct sockaddr_in dst_sock = {sizeof(dst_sock), AF_INET, 0, {0}, {0}};
1645 static struct sockaddr_in mask_sock = {sizeof(mask_sock), AF_INET, 0, {0}, {0}};
1646 #else
1647 static struct sockaddr_in_new dst_sock = {_SIN_ADDR_SIZE, AF_INET};
1648 static struct sockaddr_in_new mask_sock = {_SIN_ADDR_SIZE, AF_INET};
1649 #endif
1650
1651
1652 static void
1653 set_need_flash(void)
1654 {
1655         if (!need_flash) {
1656                 need_flash = 1;
1657                 /* Do not send the flash update immediately.  Wait a little
1658                  * while to hear from other routers.
1659                  */
1660                 no_flash.tv_sec = now.tv_sec + MIN_WAITTIME;
1661         }
1662 }
1663
1664
1665 /* Get a particular routing table entry
1666  */
1667 struct rt_entry *
1668 rtget(naddr dst, naddr mask)
1669 {
1670         struct rt_entry *rt;
1671
1672         dst_sock.sin_addr.s_addr = dst;
1673         mask_sock.sin_addr.s_addr = htonl(mask);
1674         masktrim(&mask_sock);
1675         rt = (struct rt_entry *)rhead->rnh_lookup(&dst_sock,&mask_sock,rhead);
1676         if (!rt
1677             || rt->rt_dst != dst
1678             || rt->rt_mask != mask)
1679                 return 0;
1680
1681         return rt;
1682 }
1683
1684
1685 /* Find a route to dst as the kernel would.
1686  */
1687 struct rt_entry *
1688 rtfind(naddr dst)
1689 {
1690         dst_sock.sin_addr.s_addr = dst;
1691         return (struct rt_entry *)rhead->rnh_matchaddr(&dst_sock, rhead);
1692 }
1693
1694
1695 /* add a route to the table
1696  */
1697 void
1698 rtadd(naddr     dst,
1699       naddr     mask,
1700       u_int     state,                  /* rt_state for the entry */
1701       struct    rt_spare *new)
1702 {
1703         struct rt_entry *rt;
1704         naddr smask;
1705         int i;
1706         struct rt_spare *rts;
1707
1708         rt = (struct rt_entry *)rtmalloc(sizeof (*rt), "rtadd");
1709         memset(rt, 0, sizeof(*rt));
1710         for (rts = rt->rt_spares, i = NUM_SPARES; i != 0; i--, rts++)
1711                 rts->rts_metric = HOPCNT_INFINITY;
1712
1713         rt->rt_nodes->rn_key = (caddr_t)&rt->rt_dst_sock;
1714         rt->rt_dst = dst;
1715         rt->rt_dst_sock.sin_family = AF_INET;
1716 #ifdef _HAVE_SIN_LEN
1717         rt->rt_dst_sock.sin_len = dst_sock.sin_len;
1718 #endif
1719         if (mask != HOST_MASK) {
1720                 smask = std_mask(dst);
1721                 if ((smask & ~mask) == 0 && mask > smask)
1722                         state |= RS_SUBNET;
1723         }
1724         mask_sock.sin_addr.s_addr = htonl(mask);
1725         masktrim(&mask_sock);
1726         rt->rt_mask = mask;
1727         rt->rt_state = state;
1728         rt->rt_spares[0] = *new;
1729         rt->rt_time = now.tv_sec;
1730         rt->rt_poison_metric = HOPCNT_INFINITY;
1731         rt->rt_seqno = update_seqno;
1732
1733         if (++total_routes == MAX_ROUTES)
1734                 msglog("have maximum (%d) routes", total_routes);
1735         if (TRACEACTIONS)
1736                 trace_add_del("Add", rt);
1737
1738         need_kern.tv_sec = now.tv_sec;
1739         set_need_flash();
1740
1741         if (0 == rhead->rnh_addaddr(&rt->rt_dst_sock, &mask_sock,
1742                                     rhead, rt->rt_nodes)) {
1743                 msglog("rnh_addaddr() failed for %s mask=%#lx",
1744                        naddr_ntoa(dst), (u_long)mask);
1745                 free(rt);
1746         }
1747 }
1748
1749
1750 /* notice a changed route
1751  */
1752 void
1753 rtchange(struct rt_entry *rt,
1754          u_int  state,                  /* new state bits */
1755          struct rt_spare *new,
1756          char   *label)
1757 {
1758         if (rt->rt_metric != new->rts_metric) {
1759                 /* Fix the kernel immediately if it seems the route
1760                  * has gone bad, since there may be a working route that
1761                  * aggregates this route.
1762                  */
1763                 if (new->rts_metric == HOPCNT_INFINITY) {
1764                         need_kern.tv_sec = now.tv_sec;
1765                         if (new->rts_time >= now.tv_sec - EXPIRE_TIME)
1766                                 new->rts_time = now.tv_sec - EXPIRE_TIME;
1767                 }
1768                 rt->rt_seqno = update_seqno;
1769                 set_need_flash();
1770         }
1771
1772         if (rt->rt_gate != new->rts_gate) {
1773                 need_kern.tv_sec = now.tv_sec;
1774                 rt->rt_seqno = update_seqno;
1775                 set_need_flash();
1776         }
1777
1778         state |= (rt->rt_state & RS_SUBNET);
1779
1780         /* Keep various things from deciding ageless routes are stale.
1781          */
1782         if (!AGE_RT(state, new->rts_ifp))
1783                 new->rts_time = now.tv_sec;
1784
1785         if (TRACEACTIONS)
1786                 trace_change(rt, state, new,
1787                              label ? label : "Chg   ");
1788
1789         rt->rt_state = state;
1790         rt->rt_spares[0] = *new;
1791 }
1792
1793
1794 /* check for a better route among the spares
1795  */
1796 static struct rt_spare *
1797 rts_better(struct rt_entry *rt)
1798 {
1799         struct rt_spare *rts, *rts1;
1800         int i;
1801
1802         /* find the best alternative among the spares */
1803         rts = rt->rt_spares+1;
1804         for (i = NUM_SPARES, rts1 = rts+1; i > 2; i--, rts1++) {
1805                 if (BETTER_LINK(rt,rts1,rts))
1806                         rts = rts1;
1807         }
1808
1809         return rts;
1810 }
1811
1812
1813 /* switch to a backup route
1814  */
1815 void
1816 rtswitch(struct rt_entry *rt,
1817          struct rt_spare *rts)
1818 {
1819         struct rt_spare swap;
1820         char label[10];
1821
1822
1823         /* Do not change permanent routes */
1824         if (0 != (rt->rt_state & (RS_MHOME | RS_STATIC | RS_RDISC
1825                                   | RS_NET_SYN | RS_IF)))
1826                 return;
1827
1828         /* find the best alternative among the spares */
1829         if (rts == NULL)
1830                 rts = rts_better(rt);
1831
1832         /* Do not bother if it is not worthwhile.
1833          */
1834         if (!BETTER_LINK(rt, rts, rt->rt_spares))
1835                 return;
1836
1837         swap = rt->rt_spares[0];
1838         (void)sprintf(label, "Use #%d", (int)(rts - rt->rt_spares));
1839         rtchange(rt, rt->rt_state & ~(RS_NET_SYN | RS_RDISC), rts, label);
1840         if (swap.rts_metric == HOPCNT_INFINITY) {
1841                 *rts = rts_empty;
1842         } else {
1843                 *rts = swap;
1844         }
1845 }
1846
1847
1848 void
1849 rtdelete(struct rt_entry *rt)
1850 {
1851         struct khash *k;
1852
1853
1854         if (TRACEACTIONS)
1855                 trace_add_del("Del", rt);
1856
1857         k = kern_find(rt->rt_dst, rt->rt_mask, 0);
1858         if (k != NULL) {
1859                 k->k_state |= KS_DELETE;
1860                 need_kern.tv_sec = now.tv_sec;
1861         }
1862
1863         dst_sock.sin_addr.s_addr = rt->rt_dst;
1864         mask_sock.sin_addr.s_addr = htonl(rt->rt_mask);
1865         masktrim(&mask_sock);
1866         if (rt != (struct rt_entry *)rhead->rnh_deladdr(&dst_sock, &mask_sock,
1867                                                         rhead)) {
1868                 msglog("rnh_deladdr() failed");
1869         } else {
1870                 free(rt);
1871                 total_routes--;
1872         }
1873 }
1874
1875
1876 void
1877 rts_delete(struct rt_entry *rt,
1878            struct rt_spare *rts)
1879 {
1880         trace_upslot(rt, rts, &rts_empty);
1881         *rts = rts_empty;
1882 }
1883
1884
1885 /* Get rid of a bad route, and try to switch to a replacement.
1886  */
1887 static void
1888 rtbad(struct rt_entry *rt)
1889 {
1890         struct rt_spare new;
1891
1892         /* Poison the route */
1893         new = rt->rt_spares[0];
1894         new.rts_metric = HOPCNT_INFINITY;
1895         rtchange(rt, rt->rt_state & ~(RS_IF | RS_LOCAL | RS_STATIC), &new, 0);
1896         rtswitch(rt, 0);
1897 }
1898
1899
1900 /* Junk a RS_NET_SYN or RS_LOCAL route,
1901  *      unless it is needed by another interface.
1902  */
1903 void
1904 rtbad_sub(struct rt_entry *rt)
1905 {
1906         struct interface *ifp, *ifp1;
1907         struct intnet *intnetp;
1908         u_int state;
1909
1910
1911         ifp1 = NULL;
1912         state = 0;
1913
1914         if (rt->rt_state & RS_LOCAL) {
1915                 /* Is this the route through loopback for the interface?
1916                  * If so, see if it is used by any other interfaces, such
1917                  * as a point-to-point interface with the same local address.
1918                  */
1919                 LIST_FOREACH(ifp, &ifnet, int_list) {
1920                         /* Retain it if another interface needs it.
1921                          */
1922                         if (ifp->int_addr == rt->rt_ifp->int_addr) {
1923                                 state |= RS_LOCAL;
1924                                 ifp1 = ifp;
1925                                 break;
1926                         }
1927                 }
1928
1929         }
1930
1931         if (!(state & RS_LOCAL)) {
1932                 /* Retain RIPv1 logical network route if there is another
1933                  * interface that justifies it.
1934                  */
1935                 if (rt->rt_state & RS_NET_SYN) {
1936                         LIST_FOREACH(ifp, &ifnet, int_list) {
1937                                 if ((ifp->int_state & IS_NEED_NET_SYN)
1938                                     && rt->rt_mask == ifp->int_std_mask
1939                                     && rt->rt_dst == ifp->int_std_addr) {
1940                                         state |= RS_NET_SYN;
1941                                         ifp1 = ifp;
1942                                         break;
1943                                 }
1944                         }
1945                 }
1946
1947                 /* or if there is an authority route that needs it. */
1948                 for (intnetp = intnets;
1949                      intnetp != NULL;
1950                      intnetp = intnetp->intnet_next) {
1951                         if (intnetp->intnet_addr == rt->rt_dst
1952                             && intnetp->intnet_mask == rt->rt_mask) {
1953                                 state |= (RS_NET_SYN | RS_NET_INT);
1954                                 break;
1955                         }
1956                 }
1957         }
1958
1959         if (ifp1 != NULL || (state & RS_NET_SYN)) {
1960                 struct rt_spare new = rt->rt_spares[0];
1961                 new.rts_ifp = ifp1;
1962                 rtchange(rt, ((rt->rt_state & ~(RS_NET_SYN|RS_LOCAL)) | state),
1963                          &new, 0);
1964         } else {
1965                 rtbad(rt);
1966         }
1967 }
1968
1969
1970 /* Called while walking the table looking for sick interfaces
1971  * or after a time change.
1972  */
1973 /* ARGSUSED */
1974 int
1975 walk_bad(struct radix_node *rn,
1976          struct walkarg *argp UNUSED)
1977 {
1978 #define RT ((struct rt_entry *)rn)
1979         struct rt_spare *rts;
1980         int i;
1981
1982
1983         /* fix any spare routes through the interface
1984          */
1985         rts = RT->rt_spares;
1986         for (i = NUM_SPARES; i != 1; i--) {
1987                 rts++;
1988                 if (rts->rts_metric < HOPCNT_INFINITY
1989                     && (rts->rts_ifp == NULL
1990                         || (rts->rts_ifp->int_state & IS_BROKE)))
1991                         rts_delete(RT, rts);
1992         }
1993
1994         /* Deal with the main route
1995          */
1996         /* finished if it has been handled before or if its interface is ok
1997          */
1998         if (RT->rt_ifp == 0 || !(RT->rt_ifp->int_state & IS_BROKE))
1999                 return 0;
2000
2001         /* Bad routes for other than interfaces are easy.
2002          */
2003         if (0 == (RT->rt_state & (RS_IF | RS_NET_SYN | RS_LOCAL))) {
2004                 rtbad(RT);
2005                 return 0;
2006         }
2007
2008         rtbad_sub(RT);
2009         return 0;
2010 #undef RT
2011 }
2012
2013
2014 /* Check the age of an individual route.
2015  */
2016 /* ARGSUSED */
2017 static int
2018 walk_age(struct radix_node *rn,
2019            struct walkarg *argp UNUSED)
2020 {
2021 #define RT ((struct rt_entry *)rn)
2022         struct interface *ifp;
2023         struct rt_spare *rts;
2024         int i;
2025
2026
2027         /* age all of the spare routes, including the primary route
2028          * currently in use
2029          */
2030         rts = RT->rt_spares;
2031         for (i = NUM_SPARES; i != 0; i--, rts++) {
2032
2033                 ifp = rts->rts_ifp;
2034                 if (i == NUM_SPARES) {
2035                         if (!AGE_RT(RT->rt_state, ifp)) {
2036                                 /* Keep various things from deciding ageless
2037                                  * routes are stale
2038                                  */
2039                                 rts->rts_time = now.tv_sec;
2040                                 continue;
2041                         }
2042
2043                         /* forget RIP routes after RIP has been turned off.
2044                          */
2045                         if (rip_sock < 0) {
2046                                 rtdelete(RT);
2047                                 return 0;
2048                         }
2049                 }
2050
2051                 /* age failing routes
2052                  */
2053                 if (age_bad_gate == rts->rts_gate
2054                     && rts->rts_time >= now_stale) {
2055                         rts->rts_time -= SUPPLY_INTERVAL;
2056                 }
2057
2058                 /* trash the spare routes when they go bad */
2059                 if (rts->rts_metric < HOPCNT_INFINITY
2060                     && now_garbage > rts->rts_time
2061                     && i != NUM_SPARES)
2062                         rts_delete(RT, rts);
2063         }
2064
2065
2066         /* finished if the active route is still fresh */
2067         if (now_stale <= RT->rt_time)
2068                 return 0;
2069
2070         /* try to switch to an alternative */
2071         rtswitch(RT, 0);
2072
2073         /* Delete a dead route after it has been publicly mourned. */
2074         if (now_garbage > RT->rt_time) {
2075                 rtdelete(RT);
2076                 return 0;
2077         }
2078
2079         /* Start poisoning a bad route before deleting it. */
2080         if (now.tv_sec - RT->rt_time > EXPIRE_TIME) {
2081                 struct rt_spare new = RT->rt_spares[0];
2082                 new.rts_metric = HOPCNT_INFINITY;
2083                 rtchange(RT, RT->rt_state, &new, 0);
2084         }
2085         return 0;
2086 }
2087
2088
2089 /* Watch for dead routes and interfaces.
2090  */
2091 void
2092 age(naddr bad_gate)
2093 {
2094         struct interface *ifp;
2095         int need_query = 0;
2096
2097         /* If not listening to RIP, there is no need to age the routes in
2098          * the table.
2099          */
2100         age_timer.tv_sec = (now.tv_sec
2101                             + ((rip_sock < 0) ? NEVER : SUPPLY_INTERVAL));
2102
2103         /* Check for dead IS_REMOTE interfaces by timing their
2104          * transmissions.
2105          */
2106         LIST_FOREACH(ifp, &ifnet, int_list) {
2107                 if (!(ifp->int_state & IS_REMOTE))
2108                         continue;
2109
2110                 /* ignore unreachable remote interfaces */
2111                 if (!check_remote(ifp))
2112                         continue;
2113
2114                 /* Restore remote interface that has become reachable
2115                  */
2116                 if (ifp->int_state & IS_BROKE)
2117                         if_ok(ifp, "remote ");
2118
2119                 if (ifp->int_act_time != NEVER
2120                     && now.tv_sec - ifp->int_act_time > EXPIRE_TIME) {
2121                         msglog("remote interface %s to %s timed out after"
2122                                " %ld:%ld",
2123                                ifp->int_name,
2124                                naddr_ntoa(ifp->int_dstaddr),
2125                                (long)(now.tv_sec - ifp->int_act_time)/60,
2126                                (long)(now.tv_sec - ifp->int_act_time)%60);
2127                         if_sick(ifp);
2128                 }
2129
2130                 /* If we have not heard from the other router
2131                  * recently, ask it.
2132                  */
2133                 if (now.tv_sec >= ifp->int_query_time) {
2134                         ifp->int_query_time = NEVER;
2135                         need_query = 1;
2136                 }
2137         }
2138
2139         /* Age routes. */
2140         age_bad_gate = bad_gate;
2141         (void)rn_walktree(rhead, walk_age, 0);
2142
2143         /* delete old redirected routes to keep the kernel table small
2144          * and prevent blackholes
2145          */
2146         del_redirects(bad_gate, now.tv_sec-STALE_TIME);
2147
2148         /* Update the kernel routing table. */
2149         fix_kern();
2150
2151         /* poke reticent remote gateways */
2152         if (need_query)
2153                 rip_query();
2154 }