2 * The mrouted program is covered by the license in the accompanying file
3 * named "LICENSE". Use of the mrouted program represents acceptance of
4 * the terms and conditions listed in that file.
6 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
7 * Leland Stanford Junior University.
11 static const char rcsid[] =
17 extern int cache_lifetime;
18 extern int max_prune_lifetime;
19 extern struct rtentry *routing_table;
24 * dither cache lifetime to obtain a value between x and 2*x
27 #define CACHE_LIFETIME(x) ((x) + (lrand48() % (x)))
29 #define CACHE_LIFETIME(x) ((x) + (random() % (x)))
32 #define CHK_GS(x, y) { \
47 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
48 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
49 struct gtable *gtp; /* pointer for kernel rt entries */
50 unsigned int kroutes; /* current number of cache entries */
52 /****************************************************************************
53 Functions that are local to prune.c
54 ****************************************************************************/
55 static void prun_add_ttls __P((struct gtable *gt));
56 static int pruning_neighbor __P((vifi_t vifi, u_int32 addr));
57 static int can_mtrace __P((vifi_t vifi, u_int32 addr));
58 static struct ptable * find_prune_entry __P((u_int32 vr, struct ptable *pt));
59 static void expire_prune __P((vifi_t vifi, struct gtable *gt));
60 static void send_prune __P((struct gtable *gt));
61 static void send_graft __P((struct gtable *gt));
62 static void send_graft_ack __P((u_int32 src, u_int32 dst,
63 u_int32 origin, u_int32 grp));
64 static void update_kernel __P((struct gtable *g));
65 static char * scaletime __P((u_long t));
68 * Updates the ttl values for each vif.
77 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
78 if (VIFM_ISSET(vifi, gt->gt_grpmems))
79 gt->gt_ttls[vifi] = v->uv_threshold;
81 gt->gt_ttls[vifi] = 0;
86 * checks for scoped multicast addresses
88 #define GET_SCOPE(gt) { \
90 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
91 for (_i = 0; _i < numvifs; _i++) \
92 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
93 VIFM_SET(_i, (gt)->gt_scope); \
97 scoped_addr(vifi, addr)
103 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
104 if ((addr & acl->acl_mask) == acl->acl_addr)
111 * Determine if mcastgrp has a listener on vifi
114 grplst_mem(vifi, mcastgrp)
118 register struct listaddr *g;
119 register struct uvif *v;
123 for (g = v->uv_groups; g != NULL; g = g->al_next)
124 if (mcastgrp == g->al_addr)
131 * Finds the group entry with the specified source and netmask.
132 * If netmask is 0, it uses the route's netmask.
134 * Returns TRUE if found a match, and the global variable gtp is left
135 * pointing to entry before the found entry.
136 * Returns FALSE if no exact match found, gtp is left pointing to before
137 * the entry in question belongs, or is NULL if the it belongs at the
141 find_src_grp(src, mask, grp)
151 if (grp == gt->gt_mcastgrp &&
152 (mask ? (gt->gt_route->rt_origin == src &&
153 gt->gt_route->rt_originmask == mask) :
154 ((src & gt->gt_route->rt_originmask) ==
155 gt->gt_route->rt_origin)))
157 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
158 (grp == gt->gt_mcastgrp &&
159 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
160 (mask == gt->gt_route->rt_originmask &&
161 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
171 * Check if the neighbor supports pruning
174 pruning_neighbor(vifi, addr)
178 struct listaddr *n = neighbor_info(vifi, addr);
184 if (n->al_flags & NF_PRUNE)
188 * Versions from 3.0 to 3.4 relied on the version number to identify
189 * that they could handle pruning.
192 return (vers >= 0x0300 && vers <= 0x0304);
196 * Can the neighbor in question handle multicast traceroute?
199 can_mtrace(vifi, addr)
203 struct listaddr *n = neighbor_info(vifi, addr);
209 if (n->al_flags & NF_MTRACE)
213 * Versions 3.3 and 3.4 relied on the version number to identify
214 * that they could handle traceroute.
217 return (vers >= 0x0303 && vers <= 0x0304);
221 * Returns the prune entry of the router, or NULL if none exists
223 static struct ptable *
224 find_prune_entry(vr, pt)
229 if (pt->pt_router == vr)
238 * Send a prune message to the dominant router for
241 * Record an entry that a prune was sent for this group
255 /* Don't process any prunes if router is not pruning */
259 /* Can't process a prune if we don't have an associated route */
260 if (gt->gt_route == NULL)
263 /* Don't send a prune to a non-pruning router */
264 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
268 * sends a prune message to the router upstream.
270 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
271 dst = gt->gt_route->rt_gateway;
273 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
277 * determine prune lifetime
279 gt->gt_prsent_timer = gt->gt_timer;
280 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
281 if (pt->pt_timer < gt->gt_prsent_timer)
282 gt->gt_prsent_timer = pt->pt_timer;
285 * If we have a graft pending, cancel graft retransmission
289 for (i = 0; i < 4; i++)
290 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
291 for (i = 0; i < 4; i++)
292 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
293 tmp = htonl(gt->gt_prsent_timer);
294 for (i = 0; i < 4; i++)
295 *p++ = ((char *)&(tmp))[i];
298 send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
299 htonl(MROUTED_LEVEL), datalen);
301 log(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
302 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
303 inet_fmt(gt->gt_mcastgrp, s2),
304 gt->gt_prsent_timer, gt->gt_route->rt_parent,
305 inet_fmt(gt->gt_route->rt_gateway, s3));
309 * a prune was sent upstream
310 * so, a graft has to be sent to annul the prune
311 * set up a graft timer so that if an ack is not
312 * heard within that time, another graft request
325 /* Can't send a graft without an associated route */
326 if (gt->gt_route == NULL)
329 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
330 dst = gt->gt_route->rt_gateway;
332 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
335 for (i = 0; i < 4; i++)
336 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
337 for (i = 0; i < 4; i++)
338 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
342 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
343 htonl(MROUTED_LEVEL), datalen);
345 log(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
346 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
347 inet_fmt(gt->gt_mcastgrp, s2),
348 inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
352 * Send an ack that a graft was received
355 send_graft_ack(src, dst, origin, grp)
365 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
368 for (i = 0; i < 4; i++)
369 *p++ = ((char *)&(origin))[i];
370 for (i = 0; i < 4; i++)
371 *p++ = ((char *)&(grp))[i];
374 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
375 htonl(MROUTED_LEVEL), datalen);
377 log(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
378 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
382 * Update the kernel cache with all the routes hanging off the group entry
390 for (st = g->gt_srctbl; st; st = st->st_next)
391 k_add_rg(st->st_origin, g);
394 /****************************************************************************
395 Functions that are used externally
396 ****************************************************************************/
399 #include <sys/types.h>
403 * Find a specific group entry in the group table
411 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
412 if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
414 if (gt->gt_mcastgrp == grp)
421 * Given a group entry and source, find the corresponding source table
425 find_grp_src(gt, src)
430 u_long grp = gt->gt_mcastgrp;
431 struct gtable *gtcurr;
433 for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
434 for (st = gtcurr->gt_srctbl; st; st = st->st_next)
435 if (st->st_origin == src)
442 * Find next entry > specification
445 next_grp_src_mask(gtpp, stpp, grp, src, mask)
446 struct gtable **gtpp; /* ordered by group */
447 struct stable **stpp; /* ordered by source */
452 struct gtable *gt, *gbest = NULL;
453 struct stable *st, *sbest = NULL;
455 /* Find first group entry >= grp spec */
456 (*gtpp) = kernel_table;
457 while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
458 (*gtpp)=(*gtpp)->gt_gnext;
460 return 0; /* no more groups */
462 for (gt = kernel_table; gt; gt=gt->gt_gnext) {
463 /* Since grps are ordered, we can stop when group changes from gbest */
464 if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
466 for (st = gt->gt_srctbl; st; st=st->st_next) {
468 /* Among those entries > spec, find "lowest" one */
469 if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
470 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
471 && ntohl(st->st_origin)> ntohl(src))
472 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
473 && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
475 || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
476 || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
477 && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
489 * Ensure that sg contains current information for the given group,source.
490 * This is fetched from the kernel as a unit so that counts for the entry
491 * are consistent, i.e. packet and byte counts for the same entry are
492 * read at the same time.
495 refresh_sg(sg, gt, st)
496 struct sioc_sg_req *sg;
500 static int lastq = -1;
502 if (quantum != lastq || sg->src.s_addr!=st->st_origin
503 || sg->grp.s_addr!=gt->gt_mcastgrp) {
505 sg->src.s_addr = st->st_origin;
506 sg->grp.s_addr = gt->gt_mcastgrp;
507 ioctl(udp_socket, SIOCGETSGCNT, (char *)sg);
512 * Return pointer to a specific route entry. This must be a separate
513 * function from find_route() which modifies rtp.
516 snmp_find_route(src, mask)
517 register u_long src, mask;
519 register struct rtentry *rt;
521 for (rt = routing_table; rt; rt = rt->rt_next) {
522 if (src == rt->rt_origin && mask == rt->rt_originmask)
529 * Find next route entry > specification
532 next_route(rtpp, src, mask)
533 struct rtentry **rtpp;
537 struct rtentry *rt, *rbest = NULL;
539 /* Among all entries > spec, find "lowest" one in order */
540 for (rt = routing_table; rt; rt=rt->rt_next) {
541 if ((ntohl(rt->rt_origin) > ntohl(src)
542 || (ntohl(rt->rt_origin) == ntohl(src)
543 && ntohl(rt->rt_originmask) > ntohl(mask)))
544 && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
545 || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
546 && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
554 * Given a routing table entry, and a vifi, find the next vifi/entry
557 next_route_child(rtpp, src, mask, vifi)
558 struct rtentry **rtpp;
561 vifi_t *vifi; /* vif at which to start looking */
565 /* Get (S,M) entry */
566 if (!((*rtpp) = snmp_find_route(src,mask)))
567 if (!next_route(rtpp, src, mask))
570 /* Continue until we get one with a valid next vif */
572 for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
573 if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
576 } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
582 * Given a routing table entry, and a vifi, find the next entry
583 * equal to or greater than those
586 next_child(gtpp, stpp, grp, src, mask, vifi)
587 struct gtable **gtpp;
588 struct stable **stpp;
592 vifi_t *vifi; /* vif at which to start looking */
596 /* Get (G,S,M) entry */
598 || !((*gtpp) = find_grp(grp))
599 || !((*stpp) = find_grp_src((*gtpp),src)))
600 if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
603 /* Continue until we get one with a valid next vif */
605 for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
606 if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
609 } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
610 (*stpp)->st_origin, 0xFFFFFFFF) );
617 * Initialize the kernel table structure
623 kernel_no_route = NULL;
628 * Add a new table entry for (origin, mcastgrp)
631 add_table_entry(origin, mcastgrp)
636 struct gtable *gt,**gtnp,*prev_gt;
637 struct stable *st,**stnp;
641 md_log(MD_MISS, origin, mcastgrp);
644 r = determine_route(origin);
648 * Look for it on the no_route table; if it is found then
649 * it will be detected as a duplicate below.
651 for (gt = kernel_no_route; gt; gt = gt->gt_next)
652 if (mcastgrp == gt->gt_mcastgrp &&
653 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
655 gtnp = &kernel_no_route;
657 gtnp = &r->rt_groups;
658 while ((gt = *gtnp) != NULL) {
659 if (gt->gt_mcastgrp >= mcastgrp)
666 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
667 gt = (struct gtable *)malloc(sizeof(struct gtable));
669 log(LOG_ERR, 0, "ran out of memory");
671 gt->gt_mcastgrp = mcastgrp;
672 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
676 gt->gt_prsent_timer = 0;
678 gt->gt_srctbl = NULL;
679 gt->gt_pruntbl = NULL;
682 gt->gt_rsrr_cache = NULL;
686 /* obtain the multicast group membership list */
687 for (i = 0; i < numvifs; i++) {
688 if (VIFM_ISSET(i, r->rt_children) &&
689 !(VIFM_ISSET(i, r->rt_leaves)))
690 VIFM_SET(i, gt->gt_grpmems);
692 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
693 VIFM_SET(i, gt->gt_grpmems);
696 if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
698 gt->gt_grpmems &= ~gt->gt_scope;
710 gt->gt_next->gt_prev = gt;
711 gt->gt_prev = prev_gt;
714 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
717 g = gtp ? gtp->gt_gnext : kernel_table;
718 log(LOG_WARNING, 0, "Entry for (%s %s) (rt:%x) exists (rt:%x)",
719 inet_fmts(r->rt_origin, r->rt_originmask, s1),
720 inet_fmt(g->gt_mcastgrp, s2),
724 gt->gt_gnext = gtp->gt_gnext;
728 gt->gt_gnext = kernel_table;
733 gt->gt_gnext->gt_gprev = gt;
736 gt->gt_gnext = gt->gt_gprev = NULL;
740 stnp = >->gt_srctbl;
741 while ((st = *stnp) != NULL) {
742 if (ntohl(st->st_origin) >= ntohl(origin))
747 if (st == NULL || st->st_origin != origin) {
748 st = (struct stable *)malloc(sizeof(struct stable));
750 log(LOG_ERR, 0, "ran out of memory");
752 st->st_origin = origin;
758 md_log(MD_DUPE, origin, mcastgrp);
760 log(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
761 inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
762 /* XXX Doing this should cause no harm, and may ensure
763 * kernel<>mrouted synchronization */
764 k_add_rg(origin, gt);
769 k_add_rg(origin, gt);
771 log(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
772 inet_fmt(origin, s1),
773 inet_fmt(mcastgrp, s2),
774 gt->gt_grpmems, r ? r->rt_parent : -1);
776 /* If there are no leaf vifs
777 * which have this group, then
778 * mark this src-grp as a prune candidate.
780 if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
785 * An mrouter has gone down and come up on an interface
786 * Forward on that interface immediately
789 reset_neighbor_state(vifi, addr)
795 struct ptable *pt, **ptnp;
798 for (g = kernel_table; g; g = g->gt_gnext) {
802 * If neighbor was the parent, remove the prune sent state
803 * and all of the source cache info so that prunes get
806 if (vifi == r->rt_parent) {
807 if (addr == r->rt_gateway) {
808 log(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
809 inet_fmts(r->rt_origin, r->rt_originmask, s1),
810 inet_fmt(g->gt_mcastgrp, s2));
812 g->gt_prsent_timer = 0;
814 while ((st = g->gt_srctbl)) {
815 g->gt_srctbl = st->st_next;
816 k_del_rg(st->st_origin, g);
823 * Neighbor was not the parent, send grafts to join the groups
825 if (g->gt_prsent_timer) {
828 g->gt_prsent_timer = 0;
832 * Remove any prunes that this router has sent us.
834 ptnp = &g->gt_pruntbl;
835 while ((pt = *ptnp) != NULL) {
836 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
844 * And see if we want to forward again.
846 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
847 if (VIFM_ISSET(vifi, r->rt_children) &&
848 !(VIFM_ISSET(vifi, r->rt_leaves)))
849 VIFM_SET(vifi, g->gt_grpmems);
851 if (VIFM_ISSET(vifi, r->rt_leaves) &&
852 grplst_mem(vifi, g->gt_mcastgrp))
853 VIFM_SET(vifi, g->gt_grpmems);
855 g->gt_grpmems &= ~g->gt_scope;
858 /* Update kernel state */
861 /* Send route change notification to reservation protocol. */
862 rsrr_cache_send(g,1);
865 log(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
866 inet_fmts(r->rt_origin, r->rt_originmask, s1),
867 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
874 * Delete table entry from the kernel
875 * del_flag determines how many entries to delete
878 del_table_entry(r, mcastgrp, del_flag)
883 struct gtable *g, *prev_g;
884 struct stable *st, *prev_st;
885 struct ptable *pt, *prev_pt;
887 if (del_flag == DEL_ALL_ROUTES) {
890 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
891 inet_fmts(r->rt_origin, r->rt_originmask, s1),
892 inet_fmt(g->gt_mcastgrp, s2));
895 if (k_del_rg(st->st_origin, g) < 0) {
896 log(LOG_WARNING, errno,
897 "del_table_entry trying to delete (%s, %s)",
898 inet_fmt(st->st_origin, s1),
899 inet_fmt(g->gt_mcastgrp, s2));
914 g->gt_pruntbl = NULL;
917 g->gt_gnext->gt_gprev = g->gt_gprev;
919 g->gt_gprev->gt_gnext = g->gt_gnext;
921 kernel_table = g->gt_gnext;
924 /* Send route change notification to reservation protocol. */
925 rsrr_cache_send(g,0);
936 * Dummy routine - someday this may be needed, so it is just there
938 if (del_flag == DEL_RTE_GROUP) {
939 prev_g = (struct gtable *)&r->rt_groups;
940 for (g = r->rt_groups; g; g = g->gt_next) {
941 if (g->gt_mcastgrp == mcastgrp) {
942 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
943 inet_fmts(r->rt_origin, r->rt_originmask, s1),
944 inet_fmt(g->gt_mcastgrp, s2));
947 if (k_del_rg(st->st_origin, g) < 0) {
948 log(LOG_WARNING, errno,
949 "del_table_entry trying to delete (%s, %s)",
950 inet_fmt(st->st_origin, s1),
951 inet_fmt(g->gt_mcastgrp, s2));
966 g->gt_pruntbl = NULL;
969 g->gt_gnext->gt_gprev = g->gt_gprev;
971 g->gt_gprev->gt_gnext = g->gt_gnext;
973 kernel_table = g->gt_gnext;
975 if (prev_g != (struct gtable *)&r->rt_groups)
976 g->gt_next->gt_prev = prev_g;
978 g->gt_next->gt_prev = NULL;
979 prev_g->gt_next = g->gt_next;
982 /* Send route change notification to reservation protocol. */
983 rsrr_cache_send(g,0);
996 * update kernel table entry when a route entry changes
999 update_table_entry(r)
1003 struct ptable *pt, *prev_pt;
1006 for (g = r->rt_groups; g; g = g->gt_next) {
1009 prev_pt = pt->pt_next;
1013 g->gt_pruntbl = NULL;
1017 /* obtain the multicast group membership list */
1018 for (i = 0; i < numvifs; i++) {
1019 if (VIFM_ISSET(i, r->rt_children) &&
1020 !(VIFM_ISSET(i, r->rt_leaves)))
1021 VIFM_SET(i, g->gt_grpmems);
1023 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
1024 VIFM_SET(i, g->gt_grpmems);
1026 if (VIFM_ISSET(r->rt_parent, g->gt_scope))
1028 g->gt_grpmems &= ~g->gt_scope;
1030 log(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
1031 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1032 inet_fmt(g->gt_mcastgrp, s2),
1035 if (g->gt_grpmems && g->gt_prsent_timer) {
1038 g->gt_prsent_timer = 0;
1041 /* update ttls and add entry into kernel */
1045 /* Send route change notification to reservation protocol. */
1046 rsrr_cache_send(g,1);
1049 /* Check if we want to prune this group */
1050 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1051 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1058 * set the forwarding flag for all mcastgrps on this vifi
1061 update_lclgrp(vifi, mcastgrp)
1068 log(LOG_DEBUG, 0, "group %s joined on vif %d",
1069 inet_fmt(mcastgrp, s1), vifi);
1071 for (g = kernel_table; g; g = g->gt_gnext) {
1072 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1076 if (g->gt_mcastgrp == mcastgrp &&
1077 VIFM_ISSET(vifi, r->rt_children)) {
1079 VIFM_SET(vifi, g->gt_grpmems);
1080 g->gt_grpmems &= ~g->gt_scope;
1081 if (g->gt_grpmems == 0)
1085 log(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1086 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1087 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1091 /* Send route change notification to reservation protocol. */
1092 rsrr_cache_send(g,1);
1099 * reset forwarding flag for all mcastgrps on this vifi
1102 delete_lclgrp(vifi, mcastgrp)
1109 log(LOG_DEBUG, 0, "group %s left on vif %d",
1110 inet_fmt(mcastgrp, s1), vifi);
1112 for (g = kernel_table; g; g = g->gt_gnext) {
1113 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1116 if (g->gt_mcastgrp == mcastgrp) {
1117 int stop_sending = 1;
1121 * If this is not a leaf, then we have router neighbors on this
1122 * vif. Only turn off forwarding if they have all pruned.
1124 if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1125 struct listaddr *vr;
1127 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1128 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1135 VIFM_CLR(vifi, g->gt_grpmems);
1136 log(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1137 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1138 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1143 /* Send route change notification to reservation protocol. */
1144 rsrr_cache_send(g,1);
1148 * If there are no more members of this particular group,
1149 * send prune upstream
1151 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1159 * Takes the prune message received and then strips it to
1160 * determine the (src, grp) pair to be pruned.
1162 * Adds the router to the (src, grp) entry then.
1164 * Determines if further packets have to be sent down that vif
1166 * Determines if a corresponding prune message has to be generated
1169 accept_prune(src, dst, p, datalen)
1184 struct listaddr *vr;
1186 /* Don't process any prunes if router is not pruning */
1190 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1192 "ignoring prune report from non-neighbor %s",
1197 /* Check if enough data is present */
1201 "non-decipherable prune from %s",
1206 for (i = 0; i< 4; i++)
1207 ((char *)&prun_src)[i] = *p++;
1208 for (i = 0; i< 4; i++)
1209 ((char *)&prun_grp)[i] = *p++;
1210 for (i = 0; i< 4; i++)
1211 ((char *)&prun_tmr)[i] = *p++;
1212 prun_tmr = ntohl(prun_tmr);
1214 log(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1215 inet_fmt(src, s1), vifi,
1216 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1219 * Find the subnet for the prune
1221 if (find_src_grp(prun_src, 0, prun_grp)) {
1222 g = gtp ? gtp->gt_gnext : kernel_table;
1225 if (!VIFM_ISSET(vifi, r->rt_children)) {
1226 log(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1227 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1228 inet_fmt(prun_grp, s3));
1231 if (VIFM_ISSET(vifi, g->gt_scope)) {
1232 log(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1233 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1234 inet_fmt(prun_grp, s3));
1237 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1238 log(LOG_DEBUG, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1239 "duplicate prune received on vif",
1240 vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
1241 inet_fmt(prun_grp, s3), prun_tmr,
1242 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1243 pt->pt_timer = prun_tmr;
1245 /* allocate space for the prune structure */
1246 pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1248 log(LOG_ERR, 0, "pt: ran out of memory");
1251 pt->pt_router = src;
1252 pt->pt_timer = prun_tmr;
1254 pt->pt_next = g->gt_pruntbl;
1258 /* Refresh the group's lifetime */
1259 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1260 if (g->gt_timer < prun_tmr)
1261 g->gt_timer = prun_tmr;
1264 * check if any more packets need to be sent on the
1265 * vif which sent this message
1268 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1269 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1274 if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1275 VIFM_CLR(vifi, g->gt_grpmems);
1276 log(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1277 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1278 inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1283 /* Send route change notification to reservation protocol. */
1284 rsrr_cache_send(g,1);
1289 * check if all the child routers have expressed no interest
1290 * in this group and if this group does not exist in the
1292 * Send a prune message then upstream
1294 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1299 * There is no kernel entry for this group. Therefore, we can
1300 * simply ignore the prune, as we are not forwarding this traffic
1303 log(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1304 "prune message received with no kernel entry for",
1305 inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1306 prun_tmr, inet_fmt(src, s3));
1312 * Checks if this mcastgrp is present in the kernel table
1313 * If so and if a prune was sent, it sends a graft upwards
1316 chkgrp_graft(vifi, mcastgrp)
1323 for (g = kernel_table; g; g = g->gt_gnext) {
1324 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1328 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1329 if (g->gt_prsent_timer) {
1330 VIFM_SET(vifi, g->gt_grpmems);
1333 * If the vif that was joined was a scoped vif,
1334 * ignore it ; don't graft back
1336 g->gt_grpmems &= ~g->gt_scope;
1337 if (g->gt_grpmems == 0)
1340 /* set the flag for graft retransmission */
1343 /* send graft upwards */
1346 /* reset the prune timer and update cache timer*/
1347 g->gt_prsent_timer = 0;
1348 g->gt_timer = max_prune_lifetime;
1350 log(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1351 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1352 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1357 /* Send route change notification to reservation protocol. */
1358 rsrr_cache_send(g,1);
1364 /* determine the multicast group and src
1366 * if it does, then determine if a prune was sent
1368 * if prune sent upstream, send graft upstream and send
1371 * if no prune sent upstream, change the forwarding bit
1372 * for this interface and send ack downstream.
1374 * if no entry exists for this group send ack downstream.
1377 accept_graft(src, dst, p, datalen)
1389 struct ptable *pt, **ptnp;
1391 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1393 "ignoring graft from non-neighbor %s",
1400 "received non-decipherable graft from %s",
1405 for (i = 0; i< 4; i++)
1406 ((char *)&graft_src)[i] = *p++;
1407 for (i = 0; i< 4; i++)
1408 ((char *)&graft_grp)[i] = *p++;
1410 log(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1411 inet_fmt(src, s1), vifi,
1412 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1415 * Find the subnet for the graft
1417 if (find_src_grp(graft_src, 0, graft_grp)) {
1418 g = gtp ? gtp->gt_gnext : kernel_table;
1421 if (VIFM_ISSET(vifi, g->gt_scope)) {
1422 log(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1423 inet_fmt(src, s1), inet_fmt(graft_src, s2),
1424 inet_fmt(graft_grp, s3));
1428 ptnp = &g->gt_pruntbl;
1429 while ((pt = *ptnp) != NULL) {
1430 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1431 *ptnp = pt->pt_next;
1434 VIFM_SET(vifi, g->gt_grpmems);
1435 log(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1436 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1437 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1442 /* Send route change notification to reservation protocol. */
1443 rsrr_cache_send(g,1);
1447 ptnp = &pt->pt_next;
1451 /* send ack downstream */
1452 send_graft_ack(dst, src, graft_src, graft_grp);
1453 g->gt_timer = max_prune_lifetime;
1455 if (g->gt_prsent_timer) {
1456 /* set the flag for graft retransmission */
1459 /* send graft upwards */
1462 /* reset the prune sent timer */
1463 g->gt_prsent_timer = 0;
1467 * We have no state for the source and group in question.
1468 * We can simply acknowledge the graft, since we know
1469 * that we have no prune state, and grafts are requests
1470 * to remove prune state.
1472 send_graft_ack(dst, src, graft_src, graft_grp);
1473 log(LOG_DEBUG, 0, "%s (%s %s) from %s",
1474 "graft received with no kernel entry for",
1475 inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1482 * find out which group is involved first of all
1483 * then determine if a graft was sent.
1484 * if no graft sent, ignore the message
1485 * if graft was sent and the ack is from the right
1486 * source, remove the graft timer so that we don't
1487 * have send a graft again
1490 accept_g_ack(src, dst, p, datalen)
1502 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1504 "ignoring graft ack from non-neighbor %s",
1509 if (datalen < 0 || datalen > 8) {
1511 "received non-decipherable graft ack from %s",
1516 for (i = 0; i< 4; i++)
1517 ((char *)&grft_src)[i] = *p++;
1518 for (i = 0; i< 4; i++)
1519 ((char *)&grft_grp)[i] = *p++;
1521 log(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1522 inet_fmt(src, s1), vifi,
1523 inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1526 * Find the subnet for the graft ack
1528 if (find_src_grp(grft_src, 0, grft_grp)) {
1529 g = gtp ? gtp->gt_gnext : kernel_table;
1532 log(LOG_WARNING, 0, "%s (%s, %s) from %s",
1533 "rcvd graft ack with no kernel entry for",
1534 inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1542 * free all prune entries and kernel routes
1543 * normally, this should inform the kernel that all of its routes
1544 * are going away, but this is only called by restart(), which is
1545 * about to call MRT_DONE which does that anyway.
1550 register struct rtentry *r;
1551 register struct gtable *g, *prev_g;
1552 register struct stable *s, *prev_s;
1553 register struct ptable *p, *prev_p;
1555 for (r = routing_table; r; r = r->rt_next) {
1576 r->rt_groups = NULL;
1578 kernel_table = NULL;
1580 g = kernel_no_route;
1589 kernel_no_route = NULL;
1593 * When a new route is created, search
1594 * a) The less-specific part of the routing table
1595 * b) The route-less kernel table
1596 * for sources that the new route might want to handle.
1598 * "Inheriting" these sources might be cleanest, but simply deleting
1599 * them is easier, and letting the kernel re-request them.
1605 register struct rtentry *rp;
1606 register struct gtable *gt, **gtnp;
1607 register struct stable *st, **stnp;
1609 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1610 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1611 log(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1612 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1613 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1614 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1615 stnp = >->gt_srctbl;
1616 while ((st = *stnp) != NULL) {
1617 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1618 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1619 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1620 inet_fmt(st->st_origin, s3),
1621 inet_fmt(gt->gt_mcastgrp, s4),
1622 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1623 if (k_del_rg(st->st_origin, gt) < 0) {
1624 log(LOG_WARNING, errno, "%s (%s, %s)",
1625 "steal_sources trying to delete",
1626 inet_fmt(st->st_origin, s1),
1627 inet_fmt(gt->gt_mcastgrp, s2));
1629 *stnp = st->st_next;
1633 stnp = &st->st_next;
1640 gtnp = &kernel_no_route;
1641 while ((gt = *gtnp) != NULL) {
1642 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1643 == rt->rt_origin)) {
1644 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1645 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1646 inet_fmt(gt->gt_srctbl->st_origin, s3),
1647 inet_fmt(gt->gt_mcastgrp, s4),
1649 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1650 log(LOG_WARNING, errno, "%s (%s %s)",
1651 "steal_sources trying to delete",
1652 inet_fmt(gt->gt_srctbl->st_origin, s1),
1653 inet_fmt(gt->gt_mcastgrp, s2));
1656 free(gt->gt_srctbl);
1657 *gtnp = gt->gt_next;
1659 gt->gt_next->gt_prev = gt->gt_prev;
1662 gtnp = >->gt_next;
1668 * Advance the timers on all the cache entries.
1669 * If there are any entries whose timers have expired,
1670 * remove these entries from the kernel cache.
1676 struct gtable *gt, **gtnptr;
1677 struct stable *st, **stnp;
1678 struct ptable *pt, **ptnp;
1679 struct sioc_sg_req sg_req;
1681 log(LOG_DEBUG, 0, "ageing entries");
1683 gtnptr = &kernel_table;
1684 while ((gt = *gtnptr) != NULL) {
1687 /* advance the timer for the kernel entry */
1688 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1690 /* decrement prune timer if need be */
1691 if (gt->gt_prsent_timer > 0) {
1692 gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1693 if (gt->gt_prsent_timer <= 0) {
1694 log(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1695 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1696 inet_fmt(gt->gt_mcastgrp, s2));
1697 gt->gt_prsent_timer = -1;
1701 /* retransmit graft if graft sent flag is still set */
1702 if (gt->gt_grftsnt) {
1704 CHK_GS(gt->gt_grftsnt++, y);
1712 * If a prune expires, forward again on that vif.
1714 ptnp = >->gt_pruntbl;
1715 while ((pt = *ptnp) != NULL) {
1716 if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1717 log(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1718 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1719 inet_fmt(gt->gt_mcastgrp, s2),
1720 inet_fmt(pt->pt_router, s3),
1723 expire_prune(pt->pt_vifi, gt);
1725 /* remove the router's prune entry and await new one */
1726 *ptnp = pt->pt_next;
1729 ptnp = &pt->pt_next;
1734 * If the cache entry has expired, delete source table entries for
1735 * silent sources. If there are no source entries left, and there
1736 * are no downstream prunes, then the entry is deleted.
1737 * Otherwise, the cache entry's timer is refreshed.
1739 if (gt->gt_timer <= 0) {
1740 /* Check for traffic before deleting source entries */
1741 sg_req.grp.s_addr = gt->gt_mcastgrp;
1742 stnp = >->gt_srctbl;
1743 while ((st = *stnp) != NULL) {
1744 sg_req.src.s_addr = st->st_origin;
1745 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1746 log(LOG_WARNING, errno, "%s (%s %s)",
1747 "age_table_entry: SIOCGETSGCNT failing for",
1748 inet_fmt(st->st_origin, s1),
1749 inet_fmt(gt->gt_mcastgrp, s2));
1750 /* Make sure it gets deleted below */
1751 sg_req.pktcnt = st->st_pktcnt;
1753 if (sg_req.pktcnt == st->st_pktcnt) {
1754 *stnp = st->st_next;
1755 log(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1756 inet_fmt(st->st_origin, s1),
1757 inet_fmt(gt->gt_mcastgrp, s2));
1758 if (k_del_rg(st->st_origin, gt) < 0) {
1759 log(LOG_WARNING, errno,
1760 "age_table_entry trying to delete (%s %s)",
1761 inet_fmt(st->st_origin, s1),
1762 inet_fmt(gt->gt_mcastgrp, s2));
1767 st->st_pktcnt = sg_req.pktcnt;
1768 stnp = &st->st_next;
1773 * Retain the group entry if we have downstream prunes or if
1774 * there is at least one source in the list that still has
1775 * traffic, or if our upstream prune timer is running.
1777 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1778 gt->gt_prsent_timer > 0) {
1779 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1780 if (gt->gt_prsent_timer == -1)
1781 if (gt->gt_grpmems == 0)
1784 gt->gt_prsent_timer = 0;
1785 gtnptr = >->gt_gnext;
1789 log(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1790 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1791 inet_fmt(gt->gt_mcastgrp, s2));
1794 gt->gt_prev->gt_next = gt->gt_next;
1796 gt->gt_route->rt_groups = gt->gt_next;
1798 gt->gt_next->gt_prev = gt->gt_prev;
1801 gt->gt_gprev->gt_gnext = gt->gt_gnext;
1802 gtnptr = >->gt_gprev->gt_gnext;
1804 kernel_table = gt->gt_gnext;
1805 gtnptr = &kernel_table;
1808 gt->gt_gnext->gt_gprev = gt->gt_gprev;
1811 /* Send route change notification to reservation protocol. */
1812 rsrr_cache_send(gt,0);
1813 rsrr_cache_clean(gt);
1817 if (gt->gt_prsent_timer == -1)
1818 if (gt->gt_grpmems == 0)
1821 gt->gt_prsent_timer = 0;
1822 gtnptr = >->gt_gnext;
1827 * When traversing the no_route table, the decision is much easier.
1828 * Just delete it if it has timed out.
1830 gtnptr = &kernel_no_route;
1831 while ((gt = *gtnptr) != NULL) {
1832 /* advance the timer for the kernel entry */
1833 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1835 if (gt->gt_timer < 0) {
1836 if (gt->gt_srctbl) {
1837 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1838 log(LOG_WARNING, errno, "%s (%s %s)",
1839 "age_table_entry trying to delete no-route",
1840 inet_fmt(gt->gt_srctbl->st_origin, s1),
1841 inet_fmt(gt->gt_mcastgrp, s2));
1843 free(gt->gt_srctbl);
1845 *gtnptr = gt->gt_next;
1847 gt->gt_next->gt_prev = gt->gt_prev;
1851 gtnptr = >->gt_next;
1857 * Modify the kernel to forward packets when one or multiple prunes that
1858 * were received on the vif given by vifi, for the group given by gt,
1862 expire_prune(vifi, gt)
1867 * No need to send a graft, any prunes that we sent
1868 * will expire before any prunes that we have received.
1870 if (gt->gt_prsent_timer > 0) {
1871 log(LOG_DEBUG, 0, "prune expired with %d left on %s",
1872 gt->gt_prsent_timer, "prsent_timer");
1873 gt->gt_prsent_timer = 0;
1876 /* modify the kernel entry to forward packets */
1877 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1878 struct rtentry *rt = gt->gt_route;
1879 VIFM_SET(vifi, gt->gt_grpmems);
1880 log(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1881 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1882 inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1887 /* Send route change notification to reservation protocol. */
1888 rsrr_cache_send(gt,1);
1898 static char buf1[5];
1899 static char buf2[5];
1900 static char *buf=buf1;
1912 } else if (t < 3600) {
1915 } else if (t < 86400) {
1918 } else if (t < 864000) {
1928 sprintf(p,"%3d%c", (int)t, s);
1934 * Print the contents of the cache table on file 'fp2'.
1940 register struct rtentry *r;
1941 register struct gtable *gt;
1942 register struct stable *st;
1943 register struct ptable *pt;
1946 register time_t thyme = time(0);
1949 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1950 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1952 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1953 if (gt->gt_srctbl) {
1954 fprintf(fp2, " %-18s %-15s %-4s %-4s - -1\n",
1955 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1956 inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1957 scaletime(thyme - gt->gt_ctime));
1958 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1962 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1964 fprintf(fp2, " %-18s %-15s",
1965 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1966 inet_fmt(gt->gt_mcastgrp, s2));
1968 fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1970 fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1971 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1974 fprintf(fp2, "%2u%c%c ", r->rt_parent,
1975 gt->gt_prsent_timer ? 'P' : ' ',
1976 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1978 for (i = 0; i < numvifs; ++i) {
1979 if (VIFM_ISSET(i, gt->gt_grpmems))
1980 fprintf(fp2, " %u ", i);
1981 else if (VIFM_ISSET(i, r->rt_children) &&
1982 !VIFM_ISSET(i, r->rt_leaves))
1983 fprintf(fp2, " %u%c", i,
1984 VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1987 if (gt->gt_pruntbl) {
1990 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1991 fprintf(fp2, "%c%s:%d/%d", c, inet_fmt(pt->pt_router, s1),
1992 pt->pt_vifi, pt->pt_timer);
1995 fprintf(fp2, ")\n");
1997 for (st = gt->gt_srctbl; st; st = st->st_next) {
1998 fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
2004 * Traceroute function which returns traceroute replies to the requesting
2005 * router. Also forwards the request to downstream routers.
2008 accept_mtrace(src, dst, group, data, no, datalen)
2013 u_int no; /* promoted u_char */
2019 struct tr_query *qry;
2020 struct tr_resp *resp;
2024 int errcode = TR_NO_ERR;
2027 struct sioc_vif_req v_req;
2028 struct sioc_sg_req sg_req;
2030 /* Remember qid across invocations */
2031 static u_int32 oqid = 0;
2033 /* timestamp the request/response */
2034 gettimeofday(&tp, 0);
2037 * Check if it is a query or a response
2039 if (datalen == QLEN) {
2041 log(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
2042 inet_fmt(src, s1), inet_fmt(dst, s2));
2044 else if ((datalen - QLEN) % RLEN == 0) {
2046 log(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
2047 inet_fmt(src, s1), inet_fmt(dst, s2));
2048 if (IN_MULTICAST(ntohl(dst))) {
2049 log(LOG_DEBUG, 0, "Dropping multicast response");
2054 log(LOG_WARNING, 0, "%s from %s to %s",
2055 "Non decipherable traceroute request recieved",
2056 inet_fmt(src, s1), inet_fmt(dst, s2));
2060 qry = (struct tr_query *)data;
2063 * if it is a packet with all reports filled, drop it
2065 if ((rcount = (datalen - QLEN)/RLEN) == no) {
2066 log(LOG_DEBUG, 0, "packet with all reports filled in");
2070 log(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
2071 inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
2072 log(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2073 inet_fmt(qry->tr_raddr, s1));
2074 log(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2076 /* determine the routing table entry for this traceroute */
2077 rt = determine_route(qry->tr_src);
2079 log(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2080 rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
2081 log(LOG_DEBUG, 0, "rt origin %s",
2082 inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
2084 log(LOG_DEBUG, 0, "...no route");
2087 * Query type packet - check if rte exists
2088 * Check if the query destination is a vif connected to me.
2089 * and if so, whether I should start response back
2091 if (type == QUERY) {
2092 if (oqid == qry->tr_qid) {
2094 * If the multicast router is a member of the group being
2095 * queried, and the query is multicasted, then the router can
2096 * recieve multiple copies of the same query. If we have already
2097 * replied to this traceroute, just ignore it this time.
2099 * This is not a total solution, but since if this fails you
2100 * only get N copies, N <= the number of interfaces on the router,
2103 log(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2108 log(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2109 inet_fmt(qry->tr_src, s1));
2110 if (IN_MULTICAST(ntohl(dst)))
2113 vifi = find_vif(qry->tr_dst, 0);
2115 if (vifi == NO_VIF) {
2116 /* The traceroute destination is not on one of my subnet vifs. */
2117 log(LOG_DEBUG, 0, "Destination %s not an interface",
2118 inet_fmt(qry->tr_dst, s1));
2119 if (IN_MULTICAST(ntohl(dst)))
2121 errcode = TR_WRONG_IF;
2122 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2123 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2124 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2125 if (IN_MULTICAST(ntohl(dst)))
2127 errcode = TR_WRONG_IF;
2132 * determine which interface the packet came in on
2133 * RESP packets travel hop-by-hop so this either traversed
2134 * a tunnel or came from a directly attached mrouter.
2136 if ((vifi = find_vif(src, dst)) == NO_VIF) {
2137 log(LOG_DEBUG, 0, "Wrong interface for packet");
2138 errcode = TR_WRONG_IF;
2142 /* Now that we've decided to send a response, save the qid */
2145 log(LOG_DEBUG, 0, "Sending traceroute response");
2147 /* copy the packet to the sending buffer */
2148 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2150 bcopy(data, p, datalen);
2155 * If there is no room to insert our reply, coopt the previous hop
2156 * error indication to relay this fact.
2158 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2159 resp = (struct tr_resp *)p - 1;
2160 resp->tr_rflags = TR_NO_SPACE;
2166 * fill in initial response fields
2168 resp = (struct tr_resp *)p;
2169 bzero(resp, sizeof(struct tr_resp));
2172 resp->tr_qarr = htonl(((tp.tv_sec + JAN_1970) << 16) +
2173 ((tp.tv_usec << 10) / 15625));
2175 resp->tr_rproto = PROTO_DVMRP;
2176 if (errcode != TR_NO_ERR) {
2177 resp->tr_rflags = errcode;
2178 rt = NULL; /* hack to enforce send straight to requestor */
2181 resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2182 resp->tr_fttl = uvifs[vifi].uv_threshold;
2183 resp->tr_rflags = TR_NO_ERR;
2186 * obtain # of packets out on interface
2189 if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2190 resp->tr_vifout = htonl(v_req.ocount);
2193 * fill in scoping & pruning information
2196 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2197 if (gt->gt_mcastgrp >= group)
2203 if (gt && gt->gt_mcastgrp == group) {
2204 sg_req.src.s_addr = qry->tr_src;
2205 sg_req.grp.s_addr = group;
2206 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2207 resp->tr_pktcnt = htonl(sg_req.pktcnt);
2209 if (VIFM_ISSET(vifi, gt->gt_scope))
2210 resp->tr_rflags = TR_SCOPED;
2211 else if (gt->gt_prsent_timer)
2212 resp->tr_rflags = TR_PRUNED;
2213 else if (!VIFM_ISSET(vifi, gt->gt_grpmems))
2214 if (VIFM_ISSET(vifi, rt->rt_children) &&
2215 !VIFM_ISSET(vifi, rt->rt_leaves))
2216 resp->tr_rflags = TR_OPRUNED;
2218 resp->tr_rflags = TR_NO_FWD;
2220 if (scoped_addr(vifi, group))
2221 resp->tr_rflags = TR_SCOPED;
2222 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2223 resp->tr_rflags = TR_NO_FWD;
2227 * if no rte exists, set NO_RTE error
2230 src = dst; /* the dst address of resp. pkt */
2231 resp->tr_inaddr = 0;
2232 resp->tr_rflags = TR_NO_RTE;
2233 resp->tr_rmtaddr = 0;
2235 /* get # of packets in on interface */
2236 v_req.vifi = rt->rt_parent;
2237 if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2238 resp->tr_vifin = htonl(v_req.icount);
2240 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2241 src = uvifs[rt->rt_parent].uv_lcl_addr;
2242 resp->tr_inaddr = src;
2243 resp->tr_rmtaddr = rt->rt_gateway;
2244 if (!VIFM_ISSET(vifi, rt->rt_children)) {
2245 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2246 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2247 resp->tr_rflags = TR_WRONG_IF;
2249 if (rt->rt_metric >= UNREACHABLE) {
2250 resp->tr_rflags = TR_NO_RTE;
2251 /* Hack to send reply directly */
2258 * if metric is 1 or no. of reports is 1, send response to requestor
2259 * else send to upstream router. If the upstream router can't handle
2260 * mtrace, set an error code and send to requestor anyway.
2262 log(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2264 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2265 resptype = IGMP_MTRACE_RESP;
2266 dst = qry->tr_raddr;
2268 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2269 dst = qry->tr_raddr;
2270 resp->tr_rflags = TR_OLD_ROUTER;
2271 resptype = IGMP_MTRACE_RESP;
2273 dst = rt->rt_gateway;
2274 resptype = IGMP_MTRACE;
2277 if (IN_MULTICAST(ntohl(dst))) {
2279 * Send the reply on a known multicast capable vif.
2280 * If we don't have one, we can't source any multicasts anyway.
2282 if (phys_vif != -1) {
2283 log(LOG_DEBUG, 0, "Sending reply to %s from %s",
2284 inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2285 k_set_ttl(qry->tr_rttl);
2286 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2287 resptype, no, group,
2291 log(LOG_INFO, 0, "No enabled phyints -- %s",
2292 "dropping traceroute reply");
2294 log(LOG_DEBUG, 0, "Sending %s to %s from %s",
2295 resptype == IGMP_MTRACE_RESP ? "reply" : "request on",
2296 inet_fmt(dst, s1), inet_fmt(src, s2));
2299 resptype, no, group,