2 * Copyright (c) 2007-2009 Bruce Simpson.
3 * Copyright (c) 1988 Stephen Deering.
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
38 * Internet Group Management Protocol (IGMP) routines.
39 * [RFC1112, RFC2236, RFC3376]
41 * Written by Steve Deering, Stanford, May 1988.
42 * Modified by Rosen Sharma, Stanford, Aug 1994.
43 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
47 * MULTICAST Revision: 3.5.1.4
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 #include "opt_route.h"
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/module.h>
59 #include <sys/malloc.h>
61 #include <sys/socket.h>
62 #include <sys/protosw.h>
63 #include <sys/kernel.h>
64 #include <sys/sysctl.h>
65 #include <sys/vimage.h>
67 #include <sys/condvar.h>
70 #include <net/netisr.h>
71 #include <net/route.h>
74 #include <netinet/in.h>
75 #include <netinet/in_var.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip_var.h>
79 #include <netinet/ip_options.h>
80 #include <netinet/igmp.h>
81 #include <netinet/igmp_var.h>
82 #include <netinet/vinet.h>
84 #include <machine/in_cksum.h>
86 #include <security/mac/mac_framework.h>
89 #define KTR_IGMPV3 KTR_SUBSYS
92 static struct igmp_ifinfo *
93 igi_alloc_locked(struct ifnet *);
94 static void igi_delete_locked(const struct ifnet *);
95 static void igmp_dispatch_queue(struct ifqueue *, int, const int);
96 static void igmp_fasttimo_vnet(void);
97 static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
98 static int igmp_handle_state_change(struct in_multi *,
99 struct igmp_ifinfo *);
100 static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *);
101 static int igmp_input_v1_query(struct ifnet *, const struct ip *);
102 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
103 const struct igmp *);
104 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
105 /*const*/ struct igmpv3 *);
106 static int igmp_input_v3_group_query(struct in_multi *,
107 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *);
108 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
109 /*const*/ struct igmp *);
110 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
111 /*const*/ struct igmp *);
112 static void igmp_intr(struct mbuf *);
113 static int igmp_isgroupreported(const struct in_addr);
117 static char * igmp_rec_type_to_str(const int);
119 static void igmp_set_version(struct igmp_ifinfo *, const int);
120 static void igmp_slowtimo_vnet(void);
121 static void igmp_sysinit(void);
122 static int igmp_v1v2_queue_report(struct in_multi *, const int);
123 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
124 static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
125 static void igmp_v2_update_group(struct in_multi *, const int);
126 static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
127 static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
129 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
130 static int igmp_v3_enqueue_group_record(struct ifqueue *,
131 struct in_multi *, const int, const int, const int);
132 static int igmp_v3_enqueue_filter_change(struct ifqueue *,
134 static void igmp_v3_process_group_timers(struct igmp_ifinfo *,
135 struct ifqueue *, struct ifqueue *, struct in_multi *,
137 static int igmp_v3_merge_state_changes(struct in_multi *,
139 static void igmp_v3_suppress_group_record(struct in_multi *);
140 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
141 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
142 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
145 static vnet_attach_fn vnet_igmp_iattach;
146 static vnet_detach_fn vnet_igmp_idetach;
148 static int vnet_igmp_iattach(const void *);
149 static int vnet_igmp_idetach(const void *);
153 * System-wide globals.
155 * Unlocked access to these is OK, except for the global IGMP output
156 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
157 * because all VIMAGEs have to share a global output queue, as netisrs
158 * themselves are not virtualized.
161 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
162 * Any may be taken independently; if any are held at the same
163 * time, the above lock order must be followed.
164 * * All output is delegated to the netisr to handle IFF_NEEDSGIANT.
165 * Most of the time, direct dispatch will be fine.
166 * * IN_MULTI_LOCK covers in_multi.
167 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file,
168 * including the output queue.
169 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
170 * per-link state iterators.
171 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface,
172 * therefore it is not refcounted.
173 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi.
176 * * IGMP acquires its own reference every time an in_multi is passed to
177 * it and the group is being joined for the first time.
178 * * IGMP releases its reference(s) on in_multi in a deferred way,
179 * because the operations which process the release run as part of
180 * a loop whose control variables are directly affected by the release
181 * (that, and not recursing on the IF_ADDR_LOCK).
183 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
184 * to a vnet in ifp->if_vnet.
186 * SMPng: XXX We may potentially race operations on ifma_protospec.
187 * The problem is that we currently lack a clean way of taking the
188 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
189 * as anything which modifies ifma needs to be covered by that lock.
190 * So check for ifma_protospec being NULL before proceeding.
194 SYSCTL_INT(_debug, OID_AUTO, mpsafe_igmp, CTLFLAG_RDTUN, &mpsafe_igmp, 0,
195 "Enable SMP-safe IGMPv3");
197 struct mbuf *m_raopt; /* Router Alert option */
198 MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
201 * Global netisr output queue.
202 * This is only used as a last resort if we cannot directly dispatch.
203 * As IN_MULTI_LOCK is no longer in the bottom half of IP, we can do
204 * this, providing mpsafe_igmp is set. If it is not, we take Giant,
205 * and queueing is forced.
207 struct ifqueue igmpoq;
210 * VIMAGE-wide globals.
212 * The IGMPv3 timers themselves need to run per-image, however,
213 * protosw timers run globally (see tcp).
214 * An ifnet can only be in one vimage at a time, and the loopback
215 * ifnet, loif, is itself virtualized.
216 * It would otherwise be possible to seriously hose IGMP state,
217 * and create inconsistencies in upstream multicast routing, if you have
218 * multiple VIMAGEs running on the same link joining different multicast
219 * groups, UNLESS the "primary IP address" is different. This is because
220 * IGMP for IPv4 does not force link-local addresses to be used for each
221 * node, unlike MLD for IPv6.
222 * Obviously the IGMPv3 per-interface state has per-vimage granularity
225 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
226 * policy to control the address used by IGMP on the link.
228 #ifdef VIMAGE_GLOBALS
229 int interface_timers_running; /* IGMPv3 general query response */
230 int state_change_timers_running; /* IGMPv3 state-change retransmit */
231 int current_state_timers_running; /* IGMPv1/v2 host report;
232 * IGMPv3 g/sg query response */
234 LIST_HEAD(, igmp_ifinfo) igi_head;
235 struct igmpstat igmpstat;
236 struct timeval igmp_gsrdelay;
238 int igmp_recvifkludge;
244 int igmp_default_version;
245 #endif /* VIMAGE_GLOBALS */
248 * Virtualized sysctls.
250 SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_igmp, IGMPCTL_STATS, stats,
251 CTLFLAG_RW, igmpstat, igmpstat, "");
252 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, recvifkludge,
253 CTLFLAG_RW, igmp_recvifkludge, 0,
254 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
255 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendra,
256 CTLFLAG_RW, igmp_sendra, 0,
257 "Send IP Router Alert option in IGMPv2/v3 messages");
258 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendlocal,
259 CTLFLAG_RW, igmp_sendlocal, 0,
260 "Send IGMP membership reports for 224.0.0.0/24 groups");
261 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v1enable,
262 CTLFLAG_RW, igmp_v1enable, 0,
263 "Enable backwards compatibility with IGMPv1");
264 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v2enable,
265 CTLFLAG_RW, igmp_v2enable, 0,
266 "Enable backwards compatibility with IGMPv2");
267 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, legacysupp,
268 CTLFLAG_RW, igmp_legacysupp, 0,
269 "Allow v1/v2 reports to suppress v3 group responses");
270 SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, default_version,
271 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_default_version, 0,
272 sysctl_igmp_default_version, "I",
273 "Default version of IGMP to run on each interface");
274 SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, gsrdelay,
275 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_gsrdelay.tv_sec, 0,
276 sysctl_igmp_gsr, "I",
277 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
280 * Non-virtualized sysctls.
282 SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE,
283 sysctl_igmp_ifinfo, "Per-interface IGMPv3 state");
286 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
290 m->m_pkthdr.header = ifp->if_vnet;
292 m->m_pkthdr.flowid = ifp->if_index;
296 igmp_scrub_context(struct mbuf *m)
299 m->m_pkthdr.header = NULL;
300 m->m_pkthdr.flowid = 0;
304 static __inline char *
305 inet_ntoa_haddr(in_addr_t haddr)
309 ia.s_addr = htonl(haddr);
310 return (inet_ntoa(ia));
315 * Restore context from a queued IGMP output chain.
316 * Return saved ifindex.
318 * VIMAGE: The assertion is there to make sure that we
319 * actually called CURVNET_SET() with what's in the mbuf chain.
321 static __inline uint32_t
322 igmp_restore_context(struct mbuf *m)
326 #if defined(VIMAGE) && defined(INVARIANTS)
327 KASSERT(curvnet == (m->m_pkthdr.header),
328 ("%s: called when curvnet was not restored", __func__));
331 return (m->m_pkthdr.flowid);
335 * Retrieve or set default IGMP version.
337 * VIMAGE: Assume curvnet set by caller.
338 * SMPng: NOTE: Serialized by IGMP lock.
341 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
346 error = sysctl_wire_old_buffer(req, sizeof(int));
352 new = V_igmp_default_version;
354 error = sysctl_handle_int(oidp, &new, 0, req);
355 if (error || !req->newptr)
358 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
363 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
364 V_igmp_default_version, new);
366 V_igmp_default_version = new;
374 * Retrieve or set threshold between group-source queries in seconds.
376 * VIMAGE: Assume curvnet set by caller.
377 * SMPng: NOTE: Serialized by IGMP lock.
380 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
385 error = sysctl_wire_old_buffer(req, sizeof(int));
391 i = V_igmp_gsrdelay.tv_sec;
393 error = sysctl_handle_int(oidp, &i, 0, req);
394 if (error || !req->newptr)
397 if (i < -1 || i >= 60) {
402 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
403 V_igmp_gsrdelay.tv_sec, i);
404 V_igmp_gsrdelay.tv_sec = i;
412 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
413 * For use by ifmcstat(8).
415 * SMPng: NOTE: Does an unlocked ifindex space read.
416 * VIMAGE: Assume curvnet set by caller. The node handler itself
417 * is not directly virtualized.
420 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
422 INIT_VNET_NET(curvnet);
427 struct igmp_ifinfo *igi;
432 if (req->newptr != NULL)
438 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
445 if (name[0] <= 0 || name[0] > V_if_index) {
452 ifp = ifnet_byindex(name[0]);
456 LIST_FOREACH(igi, &V_igi_head, igi_link) {
457 if (ifp == igi->igi_ifp) {
458 error = SYSCTL_OUT(req, igi,
459 sizeof(struct igmp_ifinfo));
471 * Dispatch an entire queue of pending packet chains
473 * VIMAGE: Assumes the vnet pointer has been set.
476 igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop)
484 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m);
486 m->m_flags |= M_IGMP_LOOP;
487 netisr_dispatch(NETISR_IGMP, m);
494 * Filter outgoing IGMP report state by group.
496 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
497 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
498 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
499 * this may break certain IGMP snooping switches which rely on the old
502 * Return zero if the given group is one for which IGMP reports
503 * should be suppressed, or non-zero if reports should be issued.
506 igmp_isgroupreported(const struct in_addr addr)
509 if (in_allhosts(addr) ||
510 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
517 * Construct a Router Alert option to use in outgoing packets.
525 MGET(m, M_DONTWAIT, MT_DATA);
526 p = mtod(m, struct ipoption *);
527 p->ipopt_dst.s_addr = INADDR_ANY;
528 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
529 p->ipopt_list[1] = 0x04; /* 4 bytes long */
530 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
531 p->ipopt_list[3] = 0x00; /* pad byte */
532 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
538 * Attach IGMP when PF_INET is attached to an interface.
540 * VIMAGE: Currently we set the vnet pointer, although it is
541 * likely that it was already set by our caller.
544 igmp_domifattach(struct ifnet *ifp)
546 struct igmp_ifinfo *igi;
548 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
549 __func__, ifp, ifp->if_xname);
551 CURVNET_SET(ifp->if_vnet);
554 igi = igi_alloc_locked(ifp);
555 if (!(ifp->if_flags & IFF_MULTICAST))
556 igi->igi_flags |= IGIF_SILENT;
565 * VIMAGE: assume curvnet set by caller.
567 static struct igmp_ifinfo *
568 igi_alloc_locked(/*const*/ struct ifnet *ifp)
570 struct igmp_ifinfo *igi;
574 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO);
579 igi->igi_version = V_igmp_default_version;
581 igi->igi_rv = IGMP_RV_INIT;
582 igi->igi_qi = IGMP_QI_INIT;
583 igi->igi_qri = IGMP_QRI_INIT;
584 igi->igi_uri = IGMP_URI_INIT;
586 SLIST_INIT(&igi->igi_relinmhead);
589 * Responses to general queries are subject to bounds.
591 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
593 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
595 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)",
605 * NOTE: Some finalization tasks need to run before the protocol domain
606 * is detached, but also before the link layer does its cleanup.
608 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
609 * XXX This is also bitten by unlocked ifma_protospec access.
611 * VIMAGE: curvnet should have been set by caller, but let's not assume
615 igmp_ifdetach(struct ifnet *ifp)
617 struct igmp_ifinfo *igi;
618 struct ifmultiaddr *ifma;
619 struct in_multi *inm, *tinm;
621 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
624 CURVNET_SET(ifp->if_vnet);
628 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
629 if (igi->igi_version == IGMP_VERSION_3) {
631 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
632 if (ifma->ifma_addr->sa_family != AF_INET ||
633 ifma->ifma_protospec == NULL)
636 KASSERT(ifma->ifma_protospec != NULL,
637 ("%s: ifma_protospec is NULL", __func__));
639 inm = (struct in_multi *)ifma->ifma_protospec;
640 if (inm->inm_state == IGMP_LEAVING_MEMBER) {
641 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
644 inm_clear_recorded(inm);
648 * Free the in_multi reference(s) for this IGMP lifecycle.
650 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
652 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
653 inm_release_locked(inm);
661 * Plug the potential race which may occur when a VIMAGE
662 * is detached and we are forced to queue pending IGMP output for
663 * output netisr processing due to !mpsafe_igmp. In this case it
664 * is possible that igmp_intr() is about to see mbuf chains with
665 * invalid cached curvnet pointers.
666 * This is a rare condition, so just blow them all away.
667 * FUTURE: This may in fact not be needed, because IFF_NEEDSGIANT
668 * is being removed in 8.x and the netisr may then be eliminated;
669 * it is needed only if VIMAGE and IFF_NEEDSGIANT need to co-exist
675 drops = igmpoq.ifq_len;
678 if (bootverbose && drops) {
679 printf("%s: dropped %d pending IGMP output packets\n",
689 * Hook for domifdetach.
691 * VIMAGE: curvnet should have been set by caller, but let's not assume
695 igmp_domifdetach(struct ifnet *ifp)
697 struct igmp_ifinfo *igi;
699 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
700 __func__, ifp, ifp->if_xname);
702 CURVNET_SET(ifp->if_vnet);
705 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
706 igi_delete_locked(ifp);
713 igi_delete_locked(const struct ifnet *ifp)
715 struct igmp_ifinfo *igi, *tigi;
717 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)",
718 __func__, ifp, ifp->if_xname);
722 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
723 if (igi->igi_ifp == ifp) {
725 * Free deferred General Query responses.
727 _IF_DRAIN(&igi->igi_gq);
729 LIST_REMOVE(igi, igi_link);
731 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
732 ("%s: there are dangling in_multi references",
741 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp);
746 * Process a received IGMPv1 query.
747 * Return non-zero if the message should be dropped.
749 * VIMAGE: The curvnet pointer is derived from the input ifp.
752 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip)
754 INIT_VNET_INET(ifp->if_vnet);
755 struct ifmultiaddr *ifma;
756 struct igmp_ifinfo *igi;
757 struct in_multi *inm;
760 * IGMPv1 General Queries SHOULD always addressed to 224.0.0.1.
761 * igmp_group is always ignored. Do not drop it as a userland
762 * daemon may wish to see it.
764 if (!in_allhosts(ip->ip_dst)) {
765 ++V_igmpstat.igps_rcv_badqueries;
769 ++V_igmpstat.igps_rcv_gen_queries;
772 * Switch to IGMPv1 host compatibility mode.
777 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
778 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
780 if (igi->igi_flags & IGIF_LOOPBACK) {
781 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
786 igmp_set_version(igi, IGMP_VERSION_1);
788 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
791 * Start the timers in all of our group records
792 * for the interface on which the query arrived,
793 * except those which are already running.
796 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
797 if (ifma->ifma_addr->sa_family != AF_INET ||
798 ifma->ifma_protospec == NULL)
800 inm = (struct in_multi *)ifma->ifma_protospec;
801 if (inm->inm_timer != 0)
803 switch (inm->inm_state) {
804 case IGMP_NOT_MEMBER:
805 case IGMP_SILENT_MEMBER:
807 case IGMP_G_QUERY_PENDING_MEMBER:
808 case IGMP_SG_QUERY_PENDING_MEMBER:
809 case IGMP_REPORTING_MEMBER:
810 case IGMP_IDLE_MEMBER:
811 case IGMP_LAZY_MEMBER:
812 case IGMP_SLEEPING_MEMBER:
813 case IGMP_AWAKENING_MEMBER:
814 inm->inm_state = IGMP_REPORTING_MEMBER;
815 inm->inm_timer = IGMP_RANDOM_DELAY(
816 IGMP_V1V2_MAX_RI * PR_FASTHZ);
817 V_current_state_timers_running = 1;
819 case IGMP_LEAVING_MEMBER:
833 * Process a received IGMPv2 general or group-specific query.
836 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
837 const struct igmp *igmp)
839 struct ifmultiaddr *ifma;
840 struct igmp_ifinfo *igi;
841 struct in_multi *inm;
845 * Perform lazy allocation of IGMP link info if required,
846 * and switch to IGMPv2 host compatibility mode.
851 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
852 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
854 if (igi->igi_flags & IGIF_LOOPBACK) {
855 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
860 igmp_set_version(igi, IGMP_VERSION_2);
862 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
866 if (!in_nullhost(igmp->igmp_group)) {
868 * IGMPv2 Group-Specific Query.
869 * If this is a group-specific IGMPv2 query, we need only
870 * look up the single group to process it.
872 inm = inm_lookup(ifp, igmp->igmp_group);
874 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)",
875 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
876 igmp_v2_update_group(inm, timer);
878 ++V_igmpstat.igps_rcv_group_queries;
881 * IGMPv2 General Query.
882 * If this was not sent to the all-hosts group, ignore it.
884 if (in_allhosts(ip->ip_dst)) {
886 * For each reporting group joined on this
887 * interface, kick the report timer.
890 "process v2 general query on ifp %p(%s)",
894 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
895 if (ifma->ifma_addr->sa_family != AF_INET ||
896 ifma->ifma_protospec == NULL)
898 inm = (struct in_multi *)ifma->ifma_protospec;
899 igmp_v2_update_group(inm, timer);
903 ++V_igmpstat.igps_rcv_gen_queries;
914 * Update the report timer on a group in response to an IGMPv2 query.
916 * If we are becoming the reporting member for this group, start the timer.
917 * If we already are the reporting member for this group, and timer is
918 * below the threshold, reset it.
920 * We may be updating the group for the first time since we switched
921 * to IGMPv3. If we are, then we must clear any recorded source lists,
922 * and transition to REPORTING state; the group timer is overloaded
923 * for group and group-source query responses.
925 * Unlike IGMPv3, the delay per group should be jittered
926 * to avoid bursts of IGMPv2 reports.
929 igmp_v2_update_group(struct in_multi *inm, const int timer)
932 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__,
933 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer);
935 IN_MULTI_LOCK_ASSERT();
937 switch (inm->inm_state) {
938 case IGMP_NOT_MEMBER:
939 case IGMP_SILENT_MEMBER:
941 case IGMP_REPORTING_MEMBER:
942 if (inm->inm_timer != 0 &&
943 inm->inm_timer <= timer) {
944 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
945 "skipping.", __func__);
949 case IGMP_SG_QUERY_PENDING_MEMBER:
950 case IGMP_G_QUERY_PENDING_MEMBER:
951 case IGMP_IDLE_MEMBER:
952 case IGMP_LAZY_MEMBER:
953 case IGMP_AWAKENING_MEMBER:
954 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
955 inm->inm_state = IGMP_REPORTING_MEMBER;
956 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
957 V_current_state_timers_running = 1;
959 case IGMP_SLEEPING_MEMBER:
960 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
961 inm->inm_state = IGMP_AWAKENING_MEMBER;
963 case IGMP_LEAVING_MEMBER:
969 * Process a received IGMPv3 general, group-specific or
970 * group-and-source-specific query.
971 * Assumes m has already been pulled up to the full IGMP message length.
972 * Return 0 if successful, otherwise an appropriate error code is returned.
975 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
976 /*const*/ struct igmpv3 *igmpv3)
978 struct igmp_ifinfo *igi;
979 struct in_multi *inm;
980 uint32_t maxresp, nsrc, qqi;
984 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
986 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
987 if (maxresp >= 128) {
988 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
989 (IGMP_EXP(igmpv3->igmp_code) + 3);
993 * Robustness must never be less than 2 for on-wire IGMPv3.
994 * FIXME: Check if ifp has IGIF_LOOPBACK set, as we make
995 * an exception for interfaces whose IGMPv3 state changes
996 * are redirected to loopback (e.g. MANET).
998 qrv = IGMP_QRV(igmpv3->igmp_misc);
1000 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
1005 qqi = igmpv3->igmp_qqi;
1007 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
1008 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
1011 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
1015 nsrc = ntohs(igmpv3->igmp_numsrc);
1020 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1021 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
1023 if (igi->igi_flags & IGIF_LOOPBACK) {
1024 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1025 ifp, ifp->if_xname);
1029 igmp_set_version(igi, IGMP_VERSION_3);
1033 igi->igi_qri = maxresp;
1035 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1038 if (in_nullhost(igmpv3->igmp_group)) {
1040 * IGMPv3 General Query.
1041 * Schedule a current-state report on this ifp for
1042 * all groups, possibly containing source lists.
1044 ++V_igmpstat.igps_rcv_gen_queries;
1046 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1048 * General Queries SHOULD be directed to 224.0.0.1.
1049 * A general query with a source list has undefined
1050 * behaviour; discard it.
1052 ++V_igmpstat.igps_rcv_badqueries;
1056 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1057 ifp, ifp->if_xname);
1060 * If there is a pending General Query response
1061 * scheduled earlier than the selected delay, do
1062 * not schedule any other reports.
1063 * Otherwise, reset the interface timer.
1065 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1066 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1067 V_interface_timers_running = 1;
1071 * IGMPv3 Group-specific or Group-and-source-specific Query.
1073 * Group-source-specific queries are throttled on
1074 * a per-group basis to defeat denial-of-service attempts.
1075 * Queries for groups we are not a member of on this
1076 * link are simply ignored.
1078 inm = inm_lookup(ifp, igmpv3->igmp_group);
1082 ++V_igmpstat.igps_rcv_gsr_queries;
1083 if (!ratecheck(&inm->inm_lastgsrtv,
1084 &V_igmp_gsrdelay)) {
1085 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1087 ++V_igmpstat.igps_drop_gsr_queries;
1091 ++V_igmpstat.igps_rcv_group_queries;
1093 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)",
1094 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname);
1096 * If there is a pending General Query response
1097 * scheduled sooner than the selected delay, no
1098 * further report need be scheduled.
1099 * Otherwise, prepare to respond to the
1100 * group-specific or group-and-source query.
1102 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1103 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1114 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1116 * Return <0 if any error occured. Currently this is ignored.
1119 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi,
1120 int timer, /*const*/ struct igmpv3 *igmpv3)
1125 IN_MULTI_LOCK_ASSERT();
1130 switch (inm->inm_state) {
1131 case IGMP_NOT_MEMBER:
1132 case IGMP_SILENT_MEMBER:
1133 case IGMP_SLEEPING_MEMBER:
1134 case IGMP_LAZY_MEMBER:
1135 case IGMP_AWAKENING_MEMBER:
1136 case IGMP_IDLE_MEMBER:
1137 case IGMP_LEAVING_MEMBER:
1140 case IGMP_REPORTING_MEMBER:
1141 case IGMP_G_QUERY_PENDING_MEMBER:
1142 case IGMP_SG_QUERY_PENDING_MEMBER:
1146 nsrc = ntohs(igmpv3->igmp_numsrc);
1149 * Deal with group-specific queries upfront.
1150 * If any group query is already pending, purge any recorded
1151 * source-list state if it exists, and schedule a query response
1152 * for this group-specific query.
1155 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1156 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1157 inm_clear_recorded(inm);
1158 timer = min(inm->inm_timer, timer);
1160 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1161 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1162 V_current_state_timers_running = 1;
1167 * Deal with the case where a group-and-source-specific query has
1168 * been received but a group-specific query is already pending.
1170 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1171 timer = min(inm->inm_timer, timer);
1172 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1173 V_current_state_timers_running = 1;
1178 * Finally, deal with the case where a group-and-source-specific
1179 * query has been received, where a response to a previous g-s-r
1180 * query exists, or none exists.
1181 * In this case, we need to parse the source-list which the Querier
1182 * has provided us with and check if we have any source list filter
1183 * entries at T1 for these sources. If we do not, there is no need
1184 * schedule a report and the query may be dropped.
1185 * If we do, we must record them and schedule a current-state
1186 * report for those sources.
1187 * FIXME: Handling source lists larger than 1 mbuf requires that
1188 * we pass the mbuf chain pointer down to this function, and use
1189 * m_getptr() to walk the chain.
1191 if (inm->inm_nsrc > 0) {
1192 const struct in_addr *ap;
1195 ap = (const struct in_addr *)(igmpv3 + 1);
1197 for (i = 0; i < nsrc; i++, ap++) {
1198 retval = inm_record_source(inm, ap->s_addr);
1201 nrecorded += retval;
1203 if (nrecorded > 0) {
1205 "%s: schedule response to SG query", __func__);
1206 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1207 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1208 V_current_state_timers_running = 1;
1216 * Process a received IGMPv1 host membership report.
1218 * NOTE: 0.0.0.0 workaround breaks const correctness.
1221 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1222 /*const*/ struct igmp *igmp)
1224 struct in_ifaddr *ia;
1225 struct in_multi *inm;
1227 ++V_igmpstat.igps_rcv_reports;
1229 if (ifp->if_flags & IFF_LOOPBACK)
1232 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) ||
1233 !in_hosteq(igmp->igmp_group, ip->ip_dst))) {
1234 ++V_igmpstat.igps_rcv_badreports;
1239 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1240 * Booting clients may use the source address 0.0.0.0. Some
1241 * IGMP daemons may not know how to use IP_RECVIF to determine
1242 * the interface upon which this message was received.
1243 * Replace 0.0.0.0 with the subnet address if told to do so.
1245 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1248 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1251 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)",
1252 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1255 * IGMPv1 report suppression.
1256 * If we are a member of this group, and our membership should be
1257 * reported, stop our group timer and transition to the 'lazy' state.
1260 inm = inm_lookup(ifp, igmp->igmp_group);
1262 struct igmp_ifinfo *igi;
1266 KASSERT(igi != NULL,
1267 ("%s: no igi for ifp %p", __func__, ifp));
1271 ++V_igmpstat.igps_rcv_ourreports;
1274 * If we are in IGMPv3 host mode, do not allow the
1275 * other host's IGMPv1 report to suppress our reports
1276 * unless explicitly configured to do so.
1278 if (igi->igi_version == IGMP_VERSION_3) {
1279 if (V_igmp_legacysupp)
1280 igmp_v3_suppress_group_record(inm);
1286 switch (inm->inm_state) {
1287 case IGMP_NOT_MEMBER:
1288 case IGMP_SILENT_MEMBER:
1290 case IGMP_IDLE_MEMBER:
1291 case IGMP_LAZY_MEMBER:
1292 case IGMP_AWAKENING_MEMBER:
1294 "report suppressed for %s on ifp %p(%s)",
1295 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1296 case IGMP_SLEEPING_MEMBER:
1297 inm->inm_state = IGMP_SLEEPING_MEMBER;
1299 case IGMP_REPORTING_MEMBER:
1301 "report suppressed for %s on ifp %p(%s)",
1302 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1303 if (igi->igi_version == IGMP_VERSION_1)
1304 inm->inm_state = IGMP_LAZY_MEMBER;
1305 else if (igi->igi_version == IGMP_VERSION_2)
1306 inm->inm_state = IGMP_SLEEPING_MEMBER;
1308 case IGMP_G_QUERY_PENDING_MEMBER:
1309 case IGMP_SG_QUERY_PENDING_MEMBER:
1310 case IGMP_LEAVING_MEMBER:
1322 * Process a received IGMPv2 host membership report.
1324 * NOTE: 0.0.0.0 workaround breaks const correctness.
1327 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1328 /*const*/ struct igmp *igmp)
1330 struct in_ifaddr *ia;
1331 struct in_multi *inm;
1334 * Make sure we don't hear our own membership report. Fast
1335 * leave requires knowing that we are the only member of a
1339 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr))
1342 ++V_igmpstat.igps_rcv_reports;
1344 if (ifp->if_flags & IFF_LOOPBACK)
1347 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1348 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1349 ++V_igmpstat.igps_rcv_badreports;
1354 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1355 * Booting clients may use the source address 0.0.0.0. Some
1356 * IGMP daemons may not know how to use IP_RECVIF to determine
1357 * the interface upon which this message was received.
1358 * Replace 0.0.0.0 with the subnet address if told to do so.
1360 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1362 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1365 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)",
1366 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1369 * IGMPv2 report suppression.
1370 * If we are a member of this group, and our membership should be
1371 * reported, and our group timer is pending or about to be reset,
1372 * stop our group timer by transitioning to the 'lazy' state.
1375 inm = inm_lookup(ifp, igmp->igmp_group);
1377 struct igmp_ifinfo *igi;
1380 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1382 ++V_igmpstat.igps_rcv_ourreports;
1385 * If we are in IGMPv3 host mode, do not allow the
1386 * other host's IGMPv1 report to suppress our reports
1387 * unless explicitly configured to do so.
1389 if (igi->igi_version == IGMP_VERSION_3) {
1390 if (V_igmp_legacysupp)
1391 igmp_v3_suppress_group_record(inm);
1397 switch (inm->inm_state) {
1398 case IGMP_NOT_MEMBER:
1399 case IGMP_SILENT_MEMBER:
1400 case IGMP_SLEEPING_MEMBER:
1402 case IGMP_REPORTING_MEMBER:
1403 case IGMP_IDLE_MEMBER:
1404 case IGMP_AWAKENING_MEMBER:
1406 "report suppressed for %s on ifp %p(%s)",
1407 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1408 case IGMP_LAZY_MEMBER:
1409 inm->inm_state = IGMP_LAZY_MEMBER;
1411 case IGMP_G_QUERY_PENDING_MEMBER:
1412 case IGMP_SG_QUERY_PENDING_MEMBER:
1413 case IGMP_LEAVING_MEMBER:
1425 igmp_input(struct mbuf *m, int off)
1435 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1437 ifp = m->m_pkthdr.rcvif;
1438 INIT_VNET_INET(ifp->if_vnet);
1440 ++V_igmpstat.igps_rcv_total;
1442 ip = mtod(m, struct ip *);
1444 igmplen = ip->ip_len;
1449 if (igmplen < IGMP_MINLEN) {
1450 ++V_igmpstat.igps_rcv_tooshort;
1456 * Always pullup to the minimum size for v1/v2 or v3
1457 * to amortize calls to m_pullup().
1460 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1461 minlen += IGMP_V3_QUERY_MINLEN;
1463 minlen += IGMP_MINLEN;
1464 if ((m->m_flags & M_EXT || m->m_len < minlen) &&
1465 (m = m_pullup(m, minlen)) == 0) {
1466 ++V_igmpstat.igps_rcv_tooshort;
1469 ip = mtod(m, struct ip *);
1471 if (ip->ip_ttl != 1) {
1472 ++V_igmpstat.igps_rcv_badttl;
1478 * Validate checksum.
1480 m->m_data += iphlen;
1482 igmp = mtod(m, struct igmp *);
1483 if (in_cksum(m, igmplen)) {
1484 ++V_igmpstat.igps_rcv_badsum;
1488 m->m_data -= iphlen;
1491 switch (igmp->igmp_type) {
1492 case IGMP_HOST_MEMBERSHIP_QUERY:
1493 if (igmplen == IGMP_MINLEN) {
1494 if (igmp->igmp_code == 0)
1495 queryver = IGMP_VERSION_1;
1497 queryver = IGMP_VERSION_2;
1498 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1499 queryver = IGMP_VERSION_3;
1501 ++V_igmpstat.igps_rcv_tooshort;
1507 case IGMP_VERSION_1:
1508 ++V_igmpstat.igps_rcv_v1v2_queries;
1509 if (!V_igmp_v1enable)
1511 if (igmp_input_v1_query(ifp, ip) != 0) {
1517 case IGMP_VERSION_2:
1518 ++V_igmpstat.igps_rcv_v1v2_queries;
1519 if (!V_igmp_v2enable)
1521 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1527 case IGMP_VERSION_3: {
1528 struct igmpv3 *igmpv3;
1533 ++V_igmpstat.igps_rcv_v3_queries;
1534 igmpv3 = (struct igmpv3 *)igmp;
1536 * Validate length based on source count.
1538 nsrc = ntohs(igmpv3->igmp_numsrc);
1539 srclen = sizeof(struct in_addr) * nsrc;
1540 if (nsrc * sizeof(in_addr_t) > srclen) {
1541 ++V_igmpstat.igps_rcv_tooshort;
1545 * m_pullup() may modify m, so pullup in
1548 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1550 if ((m->m_flags & M_EXT ||
1551 m->m_len < igmpv3len) &&
1552 (m = m_pullup(m, igmpv3len)) == NULL) {
1553 ++V_igmpstat.igps_rcv_tooshort;
1556 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1558 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1567 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1568 if (!V_igmp_v1enable)
1570 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1576 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1577 if (!V_igmp_v2enable)
1579 if (!ip_checkrouteralert(m))
1580 ++V_igmpstat.igps_rcv_nora;
1581 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1587 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1589 * Hosts do not need to process IGMPv3 membership reports,
1590 * as report suppression is no longer required.
1592 if (!ip_checkrouteralert(m))
1593 ++V_igmpstat.igps_rcv_nora;
1601 * Pass all valid IGMP packets up to any process(es) listening on a
1609 * Fast timeout handler (global).
1610 * VIMAGE: Timeout handlers are expected to service all vimages.
1616 VNET_ITERATOR_DECL(vnet_iter);
1619 VNET_FOREACH(vnet_iter) {
1620 CURVNET_SET(vnet_iter);
1621 INIT_VNET_INET(vnet_iter);
1622 igmp_fasttimo_vnet();
1625 VNET_LIST_RUNLOCK();
1628 igmp_fasttimo_vnet();
1633 * Fast timeout handler (per-vnet).
1634 * Sends are shuffled off to a netisr to deal with Giant.
1636 * VIMAGE: Assume caller has set up our curvnet.
1639 igmp_fasttimo_vnet(void)
1641 struct ifqueue scq; /* State-change packets */
1642 struct ifqueue qrq; /* Query response packets */
1644 struct igmp_ifinfo *igi;
1645 struct ifmultiaddr *ifma, *tifma;
1646 struct in_multi *inm;
1647 int loop, uri_fasthz;
1653 * Quick check to see if any work needs to be done, in order to
1654 * minimize the overhead of fasttimo processing.
1655 * SMPng: XXX Unlocked reads.
1657 if (!V_current_state_timers_running &&
1658 !V_interface_timers_running &&
1659 !V_state_change_timers_running)
1669 * IGMPv3 General Query response timer processing.
1671 if (V_interface_timers_running) {
1672 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1674 V_interface_timers_running = 0;
1675 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1676 if (igi->igi_v3_timer == 0) {
1678 } else if (--igi->igi_v3_timer == 0) {
1679 igmp_v3_dispatch_general_query(igi);
1681 V_interface_timers_running = 1;
1686 if (!V_current_state_timers_running &&
1687 !V_state_change_timers_running)
1690 V_current_state_timers_running = 0;
1691 V_state_change_timers_running = 0;
1693 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1696 * IGMPv1/v2/v3 host report and state-change timer processing.
1697 * Note: Processing a v3 group timer may remove a node.
1699 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1702 if (igi->igi_version == IGMP_VERSION_3) {
1703 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1704 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1707 memset(&qrq, 0, sizeof(struct ifqueue));
1708 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS);
1710 memset(&scq, 0, sizeof(struct ifqueue));
1711 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1715 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link,
1717 if (ifma->ifma_addr->sa_family != AF_INET ||
1718 ifma->ifma_protospec == NULL)
1720 inm = (struct in_multi *)ifma->ifma_protospec;
1721 switch (igi->igi_version) {
1722 case IGMP_VERSION_1:
1723 case IGMP_VERSION_2:
1724 igmp_v1v2_process_group_timer(inm,
1727 case IGMP_VERSION_3:
1728 igmp_v3_process_group_timers(igi, &qrq,
1729 &scq, inm, uri_fasthz);
1733 IF_ADDR_UNLOCK(ifp);
1735 if (igi->igi_version == IGMP_VERSION_3) {
1736 struct in_multi *tinm;
1738 igmp_dispatch_queue(&qrq, 0, loop);
1739 igmp_dispatch_queue(&scq, 0, loop);
1742 * Free the in_multi reference(s) for this
1745 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
1747 SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
1749 inm_release_locked(inm);
1762 * Update host report group timer for IGMPv1/v2.
1763 * Will update the global pending timer flags.
1766 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1768 int report_timer_expired;
1770 IN_MULTI_LOCK_ASSERT();
1773 if (inm->inm_timer == 0) {
1774 report_timer_expired = 0;
1775 } else if (--inm->inm_timer == 0) {
1776 report_timer_expired = 1;
1778 V_current_state_timers_running = 1;
1782 switch (inm->inm_state) {
1783 case IGMP_NOT_MEMBER:
1784 case IGMP_SILENT_MEMBER:
1785 case IGMP_IDLE_MEMBER:
1786 case IGMP_LAZY_MEMBER:
1787 case IGMP_SLEEPING_MEMBER:
1788 case IGMP_AWAKENING_MEMBER:
1790 case IGMP_REPORTING_MEMBER:
1791 if (report_timer_expired) {
1792 inm->inm_state = IGMP_IDLE_MEMBER;
1793 (void)igmp_v1v2_queue_report(inm,
1794 (version == IGMP_VERSION_2) ?
1795 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1796 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1799 case IGMP_G_QUERY_PENDING_MEMBER:
1800 case IGMP_SG_QUERY_PENDING_MEMBER:
1801 case IGMP_LEAVING_MEMBER:
1807 * Update a group's timers for IGMPv3.
1808 * Will update the global pending timer flags.
1809 * Note: Unlocked read from igi.
1812 igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
1813 struct ifqueue *qrq, struct ifqueue *scq,
1814 struct in_multi *inm, const int uri_fasthz)
1816 int query_response_timer_expired;
1817 int state_change_retransmit_timer_expired;
1819 IN_MULTI_LOCK_ASSERT();
1822 query_response_timer_expired = 0;
1823 state_change_retransmit_timer_expired = 0;
1826 * During a transition from v1/v2 compatibility mode back to v3,
1827 * a group record in REPORTING state may still have its group
1828 * timer active. This is a no-op in this function; it is easier
1829 * to deal with it here than to complicate the slow-timeout path.
1831 if (inm->inm_timer == 0) {
1832 query_response_timer_expired = 0;
1833 } else if (--inm->inm_timer == 0) {
1834 query_response_timer_expired = 1;
1836 V_current_state_timers_running = 1;
1839 if (inm->inm_sctimer == 0) {
1840 state_change_retransmit_timer_expired = 0;
1841 } else if (--inm->inm_sctimer == 0) {
1842 state_change_retransmit_timer_expired = 1;
1844 V_state_change_timers_running = 1;
1847 /* We are in fasttimo, so be quick about it. */
1848 if (!state_change_retransmit_timer_expired &&
1849 !query_response_timer_expired)
1852 switch (inm->inm_state) {
1853 case IGMP_NOT_MEMBER:
1854 case IGMP_SILENT_MEMBER:
1855 case IGMP_SLEEPING_MEMBER:
1856 case IGMP_LAZY_MEMBER:
1857 case IGMP_AWAKENING_MEMBER:
1858 case IGMP_IDLE_MEMBER:
1860 case IGMP_G_QUERY_PENDING_MEMBER:
1861 case IGMP_SG_QUERY_PENDING_MEMBER:
1863 * Respond to a previously pending Group-Specific
1864 * or Group-and-Source-Specific query by enqueueing
1865 * the appropriate Current-State report for
1866 * immediate transmission.
1868 if (query_response_timer_expired) {
1871 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1872 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1873 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1875 inm->inm_state = IGMP_REPORTING_MEMBER;
1876 /* XXX Clear recorded sources for next time. */
1877 inm_clear_recorded(inm);
1880 case IGMP_REPORTING_MEMBER:
1881 case IGMP_LEAVING_MEMBER:
1882 if (state_change_retransmit_timer_expired) {
1884 * State-change retransmission timer fired.
1885 * If there are any further pending retransmissions,
1886 * set the global pending state-change flag, and
1889 if (--inm->inm_scrv > 0) {
1890 inm->inm_sctimer = uri_fasthz;
1891 V_state_change_timers_running = 1;
1894 * Retransmit the previously computed state-change
1895 * report. If there are no further pending
1896 * retransmissions, the mbuf queue will be consumed.
1897 * Update T0 state to T1 as we have now sent
1900 (void)igmp_v3_merge_state_changes(inm, scq);
1903 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
1904 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
1907 * If we are leaving the group for good, make sure
1908 * we release IGMP's reference to it.
1909 * This release must be deferred using a SLIST,
1910 * as we are called from a loop which traverses
1911 * the in_ifmultiaddr TAILQ.
1913 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1914 inm->inm_scrv == 0) {
1915 inm->inm_state = IGMP_NOT_MEMBER;
1916 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
1926 * Suppress a group's pending response to a group or source/group query.
1928 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1929 * Do NOT update ST1/ST0 as this operation merely suppresses
1930 * the currently pending group record.
1931 * Do NOT suppress the response to a general query. It is possible but
1932 * it would require adding another state or flag.
1935 igmp_v3_suppress_group_record(struct in_multi *inm)
1938 IN_MULTI_LOCK_ASSERT();
1940 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1941 ("%s: not IGMPv3 mode on link", __func__));
1943 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1944 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1947 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1948 inm_clear_recorded(inm);
1951 inm->inm_state = IGMP_REPORTING_MEMBER;
1955 * Switch to a different IGMP version on the given interface,
1956 * as per Section 7.2.1.
1959 igmp_set_version(struct igmp_ifinfo *igi, const int version)
1964 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1965 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1967 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1968 int old_version_timer;
1970 * Compute the "Older Version Querier Present" timer as per
1973 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1974 old_version_timer *= PR_SLOWHZ;
1976 if (version == IGMP_VERSION_1) {
1977 igi->igi_v1_timer = old_version_timer;
1978 igi->igi_v2_timer = 0;
1979 } else if (version == IGMP_VERSION_2) {
1980 igi->igi_v1_timer = 0;
1981 igi->igi_v2_timer = old_version_timer;
1985 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1986 if (igi->igi_version != IGMP_VERSION_2) {
1987 igi->igi_version = IGMP_VERSION_2;
1988 igmp_v3_cancel_link_timers(igi);
1990 } else if (igi->igi_v1_timer > 0) {
1991 if (igi->igi_version != IGMP_VERSION_1) {
1992 igi->igi_version = IGMP_VERSION_1;
1993 igmp_v3_cancel_link_timers(igi);
1999 * Cancel pending IGMPv3 timers for the given link and all groups
2000 * joined on it; state-change, general-query, and group-query timers.
2003 igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
2005 struct ifmultiaddr *ifma;
2007 struct in_multi *inm;
2009 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2010 igi->igi_ifp, igi->igi_ifp->if_xname);
2012 IN_MULTI_LOCK_ASSERT();
2016 * Fast-track this potentially expensive operation
2017 * by checking all the global 'timer pending' flags.
2019 if (!V_interface_timers_running &&
2020 !V_state_change_timers_running &&
2021 !V_current_state_timers_running)
2024 igi->igi_v3_timer = 0;
2029 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2030 if (ifma->ifma_addr->sa_family != AF_INET)
2032 inm = (struct in_multi *)ifma->ifma_protospec;
2033 switch (inm->inm_state) {
2034 case IGMP_NOT_MEMBER:
2035 case IGMP_SILENT_MEMBER:
2036 case IGMP_IDLE_MEMBER:
2037 case IGMP_LAZY_MEMBER:
2038 case IGMP_SLEEPING_MEMBER:
2039 case IGMP_AWAKENING_MEMBER:
2041 case IGMP_LEAVING_MEMBER:
2043 * If we are leaving the group and switching
2044 * IGMP version, we need to release the final
2045 * reference held for issuing the INCLUDE {}.
2047 * SMPNG: Must drop and re-acquire IF_ADDR_LOCK
2048 * around inm_release_locked(), as it is not
2049 * a recursive mutex.
2051 IF_ADDR_UNLOCK(ifp);
2052 inm_release_locked(inm);
2055 case IGMP_G_QUERY_PENDING_MEMBER:
2056 case IGMP_SG_QUERY_PENDING_MEMBER:
2057 inm_clear_recorded(inm);
2059 case IGMP_REPORTING_MEMBER:
2060 inm->inm_sctimer = 0;
2062 inm->inm_state = IGMP_REPORTING_MEMBER;
2064 * Free any pending IGMPv3 state-change records.
2066 _IF_DRAIN(&inm->inm_scq);
2070 IF_ADDR_UNLOCK(ifp);
2074 * Update the Older Version Querier Present timers for a link.
2075 * See Section 7.2.1 of RFC 3376.
2078 igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi)
2083 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2085 * IGMPv1 and IGMPv2 Querier Present timers expired.
2089 if (igi->igi_version != IGMP_VERSION_3) {
2091 "%s: transition from v%d -> v%d on %p(%s)",
2092 __func__, igi->igi_version, IGMP_VERSION_3,
2093 igi->igi_ifp, igi->igi_ifp->if_xname);
2094 igi->igi_version = IGMP_VERSION_3;
2096 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2098 * IGMPv1 Querier Present timer expired,
2099 * IGMPv2 Querier Present timer running.
2100 * If IGMPv2 was disabled since last timeout,
2102 * If IGMPv2 is enabled, revert to IGMPv2.
2104 if (!V_igmp_v2enable) {
2106 "%s: transition from v%d -> v%d on %p(%s)",
2107 __func__, igi->igi_version, IGMP_VERSION_3,
2108 igi->igi_ifp, igi->igi_ifp->if_xname);
2109 igi->igi_v2_timer = 0;
2110 igi->igi_version = IGMP_VERSION_3;
2112 --igi->igi_v2_timer;
2113 if (igi->igi_version != IGMP_VERSION_2) {
2115 "%s: transition from v%d -> v%d on %p(%s)",
2116 __func__, igi->igi_version, IGMP_VERSION_2,
2117 igi->igi_ifp, igi->igi_ifp->if_xname);
2118 igi->igi_version = IGMP_VERSION_2;
2121 } else if (igi->igi_v1_timer > 0) {
2123 * IGMPv1 Querier Present timer running.
2124 * Stop IGMPv2 timer if running.
2126 * If IGMPv1 was disabled since last timeout,
2128 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2130 if (!V_igmp_v1enable) {
2132 "%s: transition from v%d -> v%d on %p(%s)",
2133 __func__, igi->igi_version, IGMP_VERSION_3,
2134 igi->igi_ifp, igi->igi_ifp->if_xname);
2135 igi->igi_v1_timer = 0;
2136 igi->igi_version = IGMP_VERSION_3;
2138 --igi->igi_v1_timer;
2140 if (igi->igi_v2_timer > 0) {
2142 "%s: cancel v2 timer on %p(%s)",
2143 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2144 igi->igi_v2_timer = 0;
2150 * Global slowtimo handler.
2151 * VIMAGE: Timeout handlers are expected to service all vimages.
2157 VNET_ITERATOR_DECL(vnet_iter);
2160 VNET_FOREACH(vnet_iter) {
2161 CURVNET_SET(vnet_iter);
2162 INIT_VNET_INET(vnet_iter);
2163 igmp_slowtimo_vnet();
2166 VNET_LIST_RUNLOCK();
2168 igmp_slowtimo_vnet();
2173 * Per-vnet slowtimo handler.
2176 igmp_slowtimo_vnet(void)
2178 struct igmp_ifinfo *igi;
2182 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2183 igmp_v1v2_process_querier_timers(igi);
2190 * Dispatch an IGMPv1/v2 host report or leave message.
2191 * These are always small enough to fit inside a single mbuf.
2194 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2201 IN_MULTI_LOCK_ASSERT();
2205 /* XXX are these needed ? */
2206 INIT_VNET_NET(ifp->if_vnet);
2207 INIT_VNET_INET(ifp->if_vnet);
2209 MGETHDR(m, M_DONTWAIT, MT_DATA);
2212 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2214 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2216 m->m_data += sizeof(struct ip);
2217 m->m_len = sizeof(struct igmp);
2219 igmp = mtod(m, struct igmp *);
2220 igmp->igmp_type = type;
2221 igmp->igmp_code = 0;
2222 igmp->igmp_group = inm->inm_addr;
2223 igmp->igmp_cksum = 0;
2224 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2226 m->m_data -= sizeof(struct ip);
2227 m->m_len += sizeof(struct ip);
2229 ip = mtod(m, struct ip *);
2231 ip->ip_len = sizeof(struct ip) + sizeof(struct igmp);
2233 ip->ip_p = IPPROTO_IGMP;
2234 ip->ip_src.s_addr = INADDR_ANY;
2236 if (type == IGMP_HOST_LEAVE_MESSAGE)
2237 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2239 ip->ip_dst = inm->inm_addr;
2241 igmp_save_context(m, ifp);
2243 m->m_flags |= M_IGMPV2;
2244 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2245 m->m_flags |= M_IGMP_LOOP;
2247 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2248 netisr_dispatch(NETISR_IGMP, m);
2254 * Process a state change from the upper layer for the given IPv4 group.
2256 * Each socket holds a reference on the in_multi in its own ip_moptions.
2257 * The socket layer will have made the necessary updates to.the group
2258 * state, it is now up to IGMP to issue a state change report if there
2259 * has been any change between T0 (when the last state-change was issued)
2262 * We use the IGMPv3 state machine at group level. The IGMP module
2263 * however makes the decision as to which IGMP protocol version to speak.
2264 * A state change *from* INCLUDE {} always means an initial join.
2265 * A state change *to* INCLUDE {} always means a final leave.
2267 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2268 * save ourselves a bunch of work; any exclusive mode groups need not
2269 * compute source filter lists.
2271 * VIMAGE: curvnet should have been set by caller, as this routine
2272 * is called from the socket option handlers.
2275 igmp_change_state(struct in_multi *inm)
2277 struct igmp_ifinfo *igi;
2281 IN_MULTI_LOCK_ASSERT();
2286 * Try to detect if the upper layer just asked us to change state
2287 * for an interface which has now gone away.
2289 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2290 ifp = inm->inm_ifma->ifma_ifp;
2293 * Sanity check that netinet's notion of ifp is the
2296 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2301 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2302 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
2305 * If we detect a state transition to or from MCAST_UNDEFINED
2306 * for this group, then we are starting or finishing an IGMP
2307 * life cycle for this group.
2309 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2310 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2311 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2312 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2313 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2314 error = igmp_initial_join(inm, igi);
2316 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2317 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2318 igmp_final_leave(inm, igi);
2322 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2325 error = igmp_handle_state_change(inm, igi);
2333 * Perform the initial join for an IGMP group.
2335 * When joining a group:
2336 * If the group should have its IGMP traffic suppressed, do nothing.
2337 * IGMPv1 starts sending IGMPv1 host membership reports.
2338 * IGMPv2 starts sending IGMPv2 host membership reports.
2339 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2340 * initial state of the membership.
2343 igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
2346 struct ifqueue *ifq;
2347 int error, retval, syncstates;
2349 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
2350 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2351 inm->inm_ifp->if_xname);
2358 IN_MULTI_LOCK_ASSERT();
2361 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2364 * Groups joined on loopback or marked as 'not reported',
2365 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2366 * are never reported in any IGMP protocol exchanges.
2367 * All other groups enter the appropriate IGMP state machine
2368 * for the version in use on this link.
2369 * A link marked as IGIF_SILENT causes IGMP to be completely
2370 * disabled for the link.
2372 if ((ifp->if_flags & IFF_LOOPBACK) ||
2373 (igi->igi_flags & IGIF_SILENT) ||
2374 !igmp_isgroupreported(inm->inm_addr)) {
2376 "%s: not kicking state machine for silent group", __func__);
2377 inm->inm_state = IGMP_SILENT_MEMBER;
2381 * Deal with overlapping in_multi lifecycle.
2382 * If this group was LEAVING, then make sure
2383 * we drop the reference we picked up to keep the
2384 * group around for the final INCLUDE {} enqueue.
2386 if (igi->igi_version == IGMP_VERSION_3 &&
2387 inm->inm_state == IGMP_LEAVING_MEMBER)
2388 inm_release_locked(inm);
2390 inm->inm_state = IGMP_REPORTING_MEMBER;
2392 switch (igi->igi_version) {
2393 case IGMP_VERSION_1:
2394 case IGMP_VERSION_2:
2395 inm->inm_state = IGMP_IDLE_MEMBER;
2396 error = igmp_v1v2_queue_report(inm,
2397 (igi->igi_version == IGMP_VERSION_2) ?
2398 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2399 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2401 inm->inm_timer = IGMP_RANDOM_DELAY(
2402 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2403 V_current_state_timers_running = 1;
2407 case IGMP_VERSION_3:
2409 * Defer update of T0 to T1, until the first copy
2410 * of the state change has been transmitted.
2415 * Immediately enqueue a State-Change Report for
2416 * this interface, freeing any previous reports.
2417 * Don't kick the timers if there is nothing to do,
2418 * or if an error occurred.
2420 ifq = &inm->inm_scq;
2422 retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
2424 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2427 error = retval * -1;
2432 * Schedule transmission of pending state-change
2433 * report up to RV times for this link. The timer
2434 * will fire at the next igmp_fasttimo (~200ms),
2435 * giving us an opportunity to merge the reports.
2437 if (igi->igi_flags & IGIF_LOOPBACK) {
2440 KASSERT(igi->igi_rv > 1,
2441 ("%s: invalid robustness %d", __func__,
2443 inm->inm_scrv = igi->igi_rv;
2445 inm->inm_sctimer = 1;
2446 V_state_change_timers_running = 1;
2454 * Only update the T0 state if state change is atomic,
2455 * i.e. we don't need to wait for a timer to fire before we
2456 * can consider the state change to have been communicated.
2460 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2461 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2468 * Issue an intermediate state change during the IGMP life-cycle.
2471 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
2476 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)",
2477 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2478 inm->inm_ifp->if_xname);
2482 IN_MULTI_LOCK_ASSERT();
2485 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2487 if ((ifp->if_flags & IFF_LOOPBACK) ||
2488 (igi->igi_flags & IGIF_SILENT) ||
2489 !igmp_isgroupreported(inm->inm_addr) ||
2490 (igi->igi_version != IGMP_VERSION_3)) {
2491 if (!igmp_isgroupreported(inm->inm_addr)) {
2493 "%s: not kicking state machine for silent group", __func__);
2495 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2497 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2498 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2502 _IF_DRAIN(&inm->inm_scq);
2504 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2505 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2510 * If record(s) were enqueued, start the state-change
2511 * report timer for this group.
2513 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2514 inm->inm_sctimer = 1;
2515 V_state_change_timers_running = 1;
2521 * Perform the final leave for an IGMP group.
2523 * When leaving a group:
2524 * IGMPv1 does nothing.
2525 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2526 * IGMPv3 enqueues a state-change report containing a transition
2527 * to INCLUDE {} for immediate transmission.
2530 igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
2536 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)",
2537 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2538 inm->inm_ifp->if_xname);
2540 IN_MULTI_LOCK_ASSERT();
2543 switch (inm->inm_state) {
2544 case IGMP_NOT_MEMBER:
2545 case IGMP_SILENT_MEMBER:
2546 case IGMP_LEAVING_MEMBER:
2547 /* Already leaving or left; do nothing. */
2549 "%s: not kicking state machine for silent group", __func__);
2551 case IGMP_REPORTING_MEMBER:
2552 case IGMP_IDLE_MEMBER:
2553 case IGMP_G_QUERY_PENDING_MEMBER:
2554 case IGMP_SG_QUERY_PENDING_MEMBER:
2555 if (igi->igi_version == IGMP_VERSION_2) {
2557 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2558 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2559 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2562 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2563 inm->inm_state = IGMP_NOT_MEMBER;
2564 } else if (igi->igi_version == IGMP_VERSION_3) {
2566 * Stop group timer and all pending reports.
2567 * Immediately enqueue a state-change report
2568 * TO_IN {} to be sent on the next fast timeout,
2569 * giving us an opportunity to merge reports.
2571 _IF_DRAIN(&inm->inm_scq);
2573 if (igi->igi_flags & IGIF_LOOPBACK) {
2576 inm->inm_scrv = igi->igi_rv;
2578 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d "
2579 "pending retransmissions.", __func__,
2580 inet_ntoa(inm->inm_addr),
2581 inm->inm_ifp->if_xname, inm->inm_scrv);
2582 if (inm->inm_scrv == 0) {
2583 inm->inm_state = IGMP_NOT_MEMBER;
2584 inm->inm_sctimer = 0;
2588 inm_acquire_locked(inm);
2590 retval = igmp_v3_enqueue_group_record(
2591 &inm->inm_scq, inm, 1, 0, 0);
2592 KASSERT(retval != 0,
2593 ("%s: enqueue record = %d", __func__,
2596 inm->inm_state = IGMP_LEAVING_MEMBER;
2597 inm->inm_sctimer = 1;
2598 V_state_change_timers_running = 1;
2604 case IGMP_LAZY_MEMBER:
2605 case IGMP_SLEEPING_MEMBER:
2606 case IGMP_AWAKENING_MEMBER:
2607 /* Our reports are suppressed; do nothing. */
2613 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2614 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2615 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2616 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s",
2617 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2622 * Enqueue an IGMPv3 group record to the given output queue.
2624 * XXX This function could do with having the allocation code
2625 * split out, and the multiple-tree-walks coalesced into a single
2626 * routine as has been done in igmp_v3_enqueue_filter_change().
2628 * If is_state_change is zero, a current-state record is appended.
2629 * If is_state_change is non-zero, a state-change report is appended.
2631 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2632 * If is_group_query is zero, and if there is a packet with free space
2633 * at the tail of the queue, it will be appended to providing there
2634 * is enough free space.
2635 * Otherwise a new mbuf packet chain is allocated.
2637 * If is_source_query is non-zero, each source is checked to see if
2638 * it was recorded for a Group-Source query, and will be omitted if
2639 * it is not both in-mode and recorded.
2641 * The function will attempt to allocate leading space in the packet
2642 * for the IP/IGMP header to be prepended without fragmenting the chain.
2644 * If successful the size of all data appended to the queue is returned,
2645 * otherwise an error code less than zero is returned, or zero if
2646 * no record(s) were appended.
2649 igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
2650 const int is_state_change, const int is_group_query,
2651 const int is_source_query)
2653 struct igmp_grouprec ig;
2654 struct igmp_grouprec *pig;
2656 struct ip_msource *ims, *nims;
2657 struct mbuf *m0, *m, *md;
2658 int error, is_filter_list_change;
2659 int minrec0len, m0srcs, msrcs, nbytes, off;
2660 int record_has_sources;
2666 IN_MULTI_LOCK_ASSERT();
2670 is_filter_list_change = 0;
2677 record_has_sources = 1;
2679 type = IGMP_DO_NOTHING;
2680 mode = inm->inm_st[1].iss_fmode;
2683 * If we did not transition out of ASM mode during t0->t1,
2684 * and there are no source nodes to process, we can skip
2685 * the generation of source records.
2687 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2689 record_has_sources = 0;
2691 if (is_state_change) {
2693 * Queue a state change record.
2694 * If the mode did not change, and there are non-ASM
2695 * listeners or source filters present,
2696 * we potentially need to issue two records for the group.
2697 * If we are transitioning to MCAST_UNDEFINED, we need
2698 * not send any sources.
2699 * If there are ASM listeners, and there was no filter
2700 * mode transition of any kind, do nothing.
2702 if (mode != inm->inm_st[0].iss_fmode) {
2703 if (mode == MCAST_EXCLUDE) {
2704 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2706 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2708 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2710 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2711 if (mode == MCAST_UNDEFINED)
2712 record_has_sources = 0;
2715 if (record_has_sources) {
2716 is_filter_list_change = 1;
2718 type = IGMP_DO_NOTHING;
2723 * Queue a current state record.
2725 if (mode == MCAST_EXCLUDE) {
2726 type = IGMP_MODE_IS_EXCLUDE;
2727 } else if (mode == MCAST_INCLUDE) {
2728 type = IGMP_MODE_IS_INCLUDE;
2729 KASSERT(inm->inm_st[1].iss_asm == 0,
2730 ("%s: inm %p is INCLUDE but ASM count is %d",
2731 __func__, inm, inm->inm_st[1].iss_asm));
2736 * Generate the filter list changes using a separate function.
2738 if (is_filter_list_change)
2739 return (igmp_v3_enqueue_filter_change(ifq, inm));
2741 if (type == IGMP_DO_NOTHING) {
2742 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
2743 __func__, inet_ntoa(inm->inm_addr),
2744 inm->inm_ifp->if_xname);
2749 * If any sources are present, we must be able to fit at least
2750 * one in the trailing space of the tail packet's mbuf,
2753 minrec0len = sizeof(struct igmp_grouprec);
2754 if (record_has_sources)
2755 minrec0len += sizeof(in_addr_t);
2757 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__,
2758 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
2759 inm->inm_ifp->if_xname);
2762 * Check if we have a packet in the tail of the queue for this
2763 * group into which the first group record for this group will fit.
2764 * Otherwise allocate a new packet.
2765 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2766 * Note: Group records for G/GSR query responses MUST be sent
2767 * in their own packet.
2770 if (!is_group_query &&
2772 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2773 (m0->m_pkthdr.len + minrec0len) <
2774 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2775 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2776 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2778 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2780 if (_IF_QFULL(ifq)) {
2781 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2785 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2786 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2787 if (!is_state_change && !is_group_query)
2788 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2790 m = m_gethdr(M_DONTWAIT, MT_DATA);
2792 MH_ALIGN(m, IGMP_LEADINGSPACE);
2796 m->m_data += IGMP_LEADINGSPACE;
2798 igmp_save_context(m, ifp);
2800 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2804 * Append group record.
2805 * If we have sources, we don't know how many yet.
2810 ig.ig_group = inm->inm_addr;
2811 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2814 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2817 nbytes += sizeof(struct igmp_grouprec);
2820 * Append as many sources as will fit in the first packet.
2821 * If we are appending to a new packet, the chain allocation
2822 * may potentially use clusters; use m_getptr() in this case.
2823 * If we are appending to an existing packet, we need to obtain
2824 * a pointer to the group record after m_append(), in case a new
2825 * mbuf was allocated.
2826 * Only append sources which are in-mode at t1. If we are
2827 * transitioning to MCAST_UNDEFINED state on the group, do not
2828 * include source entries.
2829 * Only report recorded sources in our filter set when responding
2830 * to a group-source query.
2832 if (record_has_sources) {
2835 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2836 md->m_len - nbytes);
2838 md = m_getptr(m, 0, &off);
2839 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2843 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2844 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2845 inet_ntoa_haddr(ims->ims_haddr));
2846 now = ims_get_mode(inm, ims, 1);
2847 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2848 if ((now != mode) ||
2849 (now == mode && mode == MCAST_UNDEFINED)) {
2850 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2853 if (is_source_query && ims->ims_stp == 0) {
2854 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2858 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2859 naddr = htonl(ims->ims_haddr);
2860 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2863 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2867 nbytes += sizeof(in_addr_t);
2869 if (msrcs == m0srcs)
2872 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2874 pig->ig_numsrc = htons(msrcs);
2875 nbytes += (msrcs * sizeof(in_addr_t));
2878 if (is_source_query && msrcs == 0) {
2879 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2886 * We are good to go with first packet.
2889 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2890 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2891 _IF_ENQUEUE(ifq, m);
2893 m->m_pkthdr.PH_vt.vt_nrecs++;
2896 * No further work needed if no source list in packet(s).
2898 if (!record_has_sources)
2902 * Whilst sources remain to be announced, we need to allocate
2903 * a new packet and fill out as many sources as will fit.
2904 * Always try for a cluster first.
2906 while (nims != NULL) {
2907 if (_IF_QFULL(ifq)) {
2908 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2911 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2913 m = m_gethdr(M_DONTWAIT, MT_DATA);
2915 MH_ALIGN(m, IGMP_LEADINGSPACE);
2919 igmp_save_context(m, ifp);
2920 m->m_data += IGMP_LEADINGSPACE;
2921 md = m_getptr(m, 0, &off);
2922 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2923 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2925 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2928 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2931 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2932 nbytes += sizeof(struct igmp_grouprec);
2934 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2935 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2938 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2939 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2940 inet_ntoa_haddr(ims->ims_haddr));
2941 now = ims_get_mode(inm, ims, 1);
2942 if ((now != mode) ||
2943 (now == mode && mode == MCAST_UNDEFINED)) {
2944 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2947 if (is_source_query && ims->ims_stp == 0) {
2948 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2952 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2953 naddr = htonl(ims->ims_haddr);
2954 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2957 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2962 if (msrcs == m0srcs)
2965 pig->ig_numsrc = htons(msrcs);
2966 nbytes += (msrcs * sizeof(in_addr_t));
2968 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2969 _IF_ENQUEUE(ifq, m);
2976 * Type used to mark record pass completion.
2977 * We exploit the fact we can cast to this easily from the
2978 * current filter modes on each ip_msource node.
2981 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2982 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2983 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2984 REC_FULL = REC_ALLOW | REC_BLOCK
2988 * Enqueue an IGMPv3 filter list change to the given output queue.
2990 * Source list filter state is held in an RB-tree. When the filter list
2991 * for a group is changed without changing its mode, we need to compute
2992 * the deltas between T0 and T1 for each source in the filter set,
2993 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2995 * As we may potentially queue two record types, and the entire R-B tree
2996 * needs to be walked at once, we break this out into its own function
2997 * so we can generate a tightly packed queue of packets.
2999 * XXX This could be written to only use one tree walk, although that makes
3000 * serializing into the mbuf chains a bit harder. For now we do two walks
3001 * which makes things easier on us, and it may or may not be harder on
3004 * If successful the size of all data appended to the queue is returned,
3005 * otherwise an error code less than zero is returned, or zero if
3006 * no record(s) were appended.
3009 igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
3011 static const int MINRECLEN =
3012 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3014 struct igmp_grouprec ig;
3015 struct igmp_grouprec *pig;
3016 struct ip_msource *ims, *nims;
3017 struct mbuf *m, *m0, *md;
3019 int m0srcs, nbytes, off, rsrcs, schanged;
3021 uint8_t mode, now, then;
3022 rectype_t crt, drt, nrt;
3024 IN_MULTI_LOCK_ASSERT();
3026 if (inm->inm_nsrc == 0 ||
3027 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3030 ifp = inm->inm_ifp; /* interface */
3031 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3032 crt = REC_NONE; /* current group record type */
3033 drt = REC_NONE; /* mask of completed group record types */
3034 nrt = REC_NONE; /* record type for current node */
3035 m0srcs = 0; /* # source which will fit in current mbuf chain */
3036 nbytes = 0; /* # of bytes appended to group's state-change queue */
3037 rsrcs = 0; /* # sources encoded in current record */
3038 schanged = 0; /* # nodes encoded in overall filter change */
3039 nallow = 0; /* # of source entries in ALLOW_NEW */
3040 nblock = 0; /* # of source entries in BLOCK_OLD */
3041 nims = NULL; /* next tree node pointer */
3044 * For each possible filter record mode.
3045 * The first kind of source we encounter tells us which
3046 * is the first kind of record we start appending.
3047 * If a node transitioned to UNDEFINED at t1, its mode is treated
3048 * as the inverse of the group's filter mode.
3050 while (drt != REC_FULL) {
3054 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3055 IGMP_V3_REPORT_MAXRECS) &&
3056 (m0->m_pkthdr.len + MINRECLEN) <
3057 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3059 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3060 sizeof(struct igmp_grouprec)) /
3063 "%s: use previous packet", __func__);
3065 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3067 m = m_gethdr(M_DONTWAIT, MT_DATA);
3069 MH_ALIGN(m, IGMP_LEADINGSPACE);
3073 "%s: m_get*() failed", __func__);
3076 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3077 igmp_save_context(m, ifp);
3078 m->m_data += IGMP_LEADINGSPACE;
3079 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3080 sizeof(struct igmp_grouprec)) /
3083 "%s: allocated new packet", __func__);
3086 * Append the IGMP group record header to the
3087 * current packet's data area.
3088 * Recalculate pointer to free space for next
3089 * group record, in case m_append() allocated
3090 * a new mbuf or cluster.
3092 memset(&ig, 0, sizeof(ig));
3093 ig.ig_group = inm->inm_addr;
3094 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3098 "%s: m_append() failed", __func__);
3101 nbytes += sizeof(struct igmp_grouprec);
3104 pig = (struct igmp_grouprec *)(mtod(md,
3105 uint8_t *) + md->m_len - nbytes);
3107 md = m_getptr(m, 0, &off);
3108 pig = (struct igmp_grouprec *)(mtod(md,
3112 * Begin walking the tree for this record type
3113 * pass, or continue from where we left off
3114 * previously if we had to allocate a new packet.
3115 * Only report deltas in-mode at t1.
3116 * We need not report included sources as allowed
3117 * if we are in inclusive mode on the group,
3118 * however the converse is not true.
3122 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3123 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3124 CTR2(KTR_IGMPV3, "%s: visit node %s",
3125 __func__, inet_ntoa_haddr(ims->ims_haddr));
3126 now = ims_get_mode(inm, ims, 1);
3127 then = ims_get_mode(inm, ims, 0);
3128 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3129 __func__, then, now);
3132 "%s: skip unchanged", __func__);
3135 if (mode == MCAST_EXCLUDE &&
3136 now == MCAST_INCLUDE) {
3138 "%s: skip IN src on EX group",
3142 nrt = (rectype_t)now;
3143 if (nrt == REC_NONE)
3144 nrt = (rectype_t)(~mode & REC_FULL);
3145 if (schanged++ == 0) {
3147 } else if (crt != nrt)
3149 naddr = htonl(ims->ims_haddr);
3150 if (!m_append(m, sizeof(in_addr_t),
3155 "%s: m_append() failed", __func__);
3158 nallow += !!(crt == REC_ALLOW);
3159 nblock += !!(crt == REC_BLOCK);
3160 if (++rsrcs == m0srcs)
3164 * If we did not append any tree nodes on this
3165 * pass, back out of allocations.
3168 nbytes -= sizeof(struct igmp_grouprec);
3171 "%s: m_free(m)", __func__);
3175 "%s: m_adj(m, -ig)", __func__);
3176 m_adj(m, -((int)sizeof(
3177 struct igmp_grouprec)));
3181 nbytes += (rsrcs * sizeof(in_addr_t));
3182 if (crt == REC_ALLOW)
3183 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3184 else if (crt == REC_BLOCK)
3185 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3186 pig->ig_numsrc = htons(rsrcs);
3188 * Count the new group record, and enqueue this
3189 * packet if it wasn't already queued.
3191 m->m_pkthdr.PH_vt.vt_nrecs++;
3193 _IF_ENQUEUE(ifq, m);
3194 } while (nims != NULL);
3196 crt = (~crt & REC_FULL);
3199 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3206 igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
3209 struct mbuf *m; /* pending state-change */
3210 struct mbuf *m0; /* copy of pending state-change */
3211 struct mbuf *mt; /* last state-change in packet */
3212 int docopy, domerge;
3219 IN_MULTI_LOCK_ASSERT();
3223 * If there are further pending retransmissions, make a writable
3224 * copy of each queued state-change message before merging.
3226 if (inm->inm_scrv > 0)
3231 if (gq->ifq_head == NULL) {
3232 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3240 * Only merge the report into the current packet if
3241 * there is sufficient space to do so; an IGMPv3 report
3242 * packet may only contain 65,535 group records.
3243 * Always use a simple mbuf chain concatentation to do this,
3244 * as large state changes for single groups may have
3245 * allocated clusters.
3248 mt = ifscq->ifq_tail;
3250 recslen = m_length(m, NULL);
3252 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3253 m->m_pkthdr.PH_vt.vt_nrecs <=
3254 IGMP_V3_REPORT_MAXRECS) &&
3255 (mt->m_pkthdr.len + recslen <=
3256 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3260 if (!domerge && _IF_QFULL(gq)) {
3262 "%s: outbound queue full, skipping whole packet %p",
3272 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3273 _IF_DEQUEUE(gq, m0);
3276 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3277 m0 = m_dup(m, M_NOWAIT);
3280 m0->m_nextpkt = NULL;
3285 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)",
3286 __func__, m0, ifscq);
3287 _IF_ENQUEUE(ifscq, m0);
3289 struct mbuf *mtl; /* last mbuf of packet mt */
3291 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)",
3295 m0->m_flags &= ~M_PKTHDR;
3296 mt->m_pkthdr.len += recslen;
3297 mt->m_pkthdr.PH_vt.vt_nrecs +=
3298 m0->m_pkthdr.PH_vt.vt_nrecs;
3308 * Respond to a pending IGMPv3 General Query.
3311 igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
3313 struct ifmultiaddr *ifma, *tifma;
3315 struct in_multi *inm;
3318 IN_MULTI_LOCK_ASSERT();
3321 KASSERT(igi->igi_version == IGMP_VERSION_3,
3322 ("%s: called when version %d", __func__, igi->igi_version));
3327 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, tifma) {
3328 if (ifma->ifma_addr->sa_family != AF_INET ||
3329 ifma->ifma_protospec == NULL)
3332 inm = (struct in_multi *)ifma->ifma_protospec;
3333 KASSERT(ifp == inm->inm_ifp,
3334 ("%s: inconsistent ifp", __func__));
3336 switch (inm->inm_state) {
3337 case IGMP_NOT_MEMBER:
3338 case IGMP_SILENT_MEMBER:
3340 case IGMP_REPORTING_MEMBER:
3341 case IGMP_IDLE_MEMBER:
3342 case IGMP_LAZY_MEMBER:
3343 case IGMP_SLEEPING_MEMBER:
3344 case IGMP_AWAKENING_MEMBER:
3345 inm->inm_state = IGMP_REPORTING_MEMBER;
3346 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3348 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3351 case IGMP_G_QUERY_PENDING_MEMBER:
3352 case IGMP_SG_QUERY_PENDING_MEMBER:
3353 case IGMP_LEAVING_MEMBER:
3357 IF_ADDR_UNLOCK(ifp);
3359 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3360 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3363 * Slew transmission of bursts over 500ms intervals.
3365 if (igi->igi_gq.ifq_head != NULL) {
3366 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3367 IGMP_RESPONSE_BURST_INTERVAL);
3368 V_interface_timers_running = 1;
3373 * Transmit the next pending IGMP message in the output queue.
3375 * We get called from netisr_processqueue(). A mutex private to igmpoq
3376 * will be acquired and released around this routine.
3378 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3379 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3380 * a link and uses a link-scope multicast address.
3383 igmp_intr(struct mbuf *m)
3385 struct ip_moptions imo;
3387 struct mbuf *ipopts, *m0;
3391 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3394 * Restore VNET image pointer from enqueued mbuf chain
3395 * before doing anything else. Whilst we use interface
3396 * indexes to guard against interface detach, they are
3397 * unique to each VIMAGE and must be retrieved.
3399 CURVNET_SET(m->m_pkthdr.header);
3400 ifindex = igmp_restore_context(m);
3403 * Check if the ifnet still exists. This limits the scope of
3404 * any race in the absence of a global ifp lock for low cost
3405 * (an array lookup).
3407 ifp = ifnet_byindex(ifindex);
3409 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3410 __func__, m, ifindex);
3412 V_ipstat.ips_noroute++;
3416 ipopts = V_igmp_sendra ? m_raopt : NULL;
3418 imo.imo_multicast_ttl = 1;
3419 imo.imo_multicast_vif = -1;
3420 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3423 * If the user requested that IGMP traffic be explicitly
3424 * redirected to the loopback interface (e.g. they are running a
3425 * MANET interface and the routing protocol needs to see the
3426 * updates), handle this now.
3428 if (m->m_flags & M_IGMP_LOOP)
3429 imo.imo_multicast_ifp = V_loif;
3431 imo.imo_multicast_ifp = ifp;
3433 if (m->m_flags & M_IGMPV2) {
3436 m0 = igmp_v3_encap_report(ifp, m);
3438 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3440 V_ipstat.ips_odropped++;
3445 igmp_scrub_context(m0);
3446 m->m_flags &= ~(M_PROTOFLAGS);
3447 m0->m_pkthdr.rcvif = V_loif;
3449 mac_netinet_igmp_send(ifp, m0);
3451 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3453 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3457 ++V_igmpstat.igps_snd_reports;
3461 * We must restore the existing vnet pointer before
3462 * continuing as we are run from netisr context.
3468 * Encapsulate an IGMPv3 report.
3470 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3471 * chain has already had its IP/IGMPv3 header prepended. In this case
3472 * the function will not attempt to prepend; the lengths and checksums
3473 * will however be re-computed.
3475 * Returns a pointer to the new mbuf chain head, or NULL if the
3476 * allocation failed.
3478 static struct mbuf *
3479 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3481 INIT_VNET_NET(curvnet);
3482 INIT_VNET_INET(curvnet);
3483 struct igmp_report *igmp;
3485 int hdrlen, igmpreclen;
3487 KASSERT((m->m_flags & M_PKTHDR),
3488 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3490 igmpreclen = m_length(m, NULL);
3491 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3493 if (m->m_flags & M_IGMPV3_HDR) {
3494 igmpreclen -= hdrlen;
3496 M_PREPEND(m, hdrlen, M_DONTWAIT);
3499 m->m_flags |= M_IGMPV3_HDR;
3502 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3504 m->m_data += sizeof(struct ip);
3505 m->m_len -= sizeof(struct ip);
3507 igmp = mtod(m, struct igmp_report *);
3508 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3511 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3513 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3514 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3516 m->m_data -= sizeof(struct ip);
3517 m->m_len += sizeof(struct ip);
3519 ip = mtod(m, struct ip *);
3520 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3521 ip->ip_len = hdrlen + igmpreclen;
3523 ip->ip_p = IPPROTO_IGMP;
3526 ip->ip_src.s_addr = INADDR_ANY;
3528 if (m->m_flags & M_IGMP_LOOP) {
3529 struct in_ifaddr *ia;
3533 ip->ip_src = ia->ia_addr.sin_addr;
3536 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3543 igmp_rec_type_to_str(const int type)
3547 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3550 case IGMP_CHANGE_TO_INCLUDE_MODE:
3553 case IGMP_MODE_IS_EXCLUDE:
3556 case IGMP_MODE_IS_INCLUDE:
3559 case IGMP_ALLOW_NEW_SOURCES:
3562 case IGMP_BLOCK_OLD_SOURCES:
3576 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3579 TUNABLE_INT_FETCH("debug.mpsafeigmp", &mpsafe_igmp);
3581 mtx_init(&igmpoq.ifq_mtx, "igmpoq_mtx", NULL, MTX_DEF);
3582 IFQ_SET_MAXLEN(&igmpoq, IFQ_MAXLEN);
3584 m_raopt = igmp_ra_alloc();
3586 #if __FreeBSD_version < 800000
3587 netisr_register(NETISR_IGMP, igmp_intr, &igmpoq,
3588 mpsafe_igmp ? NETISR_MPSAFE : 0);
3590 netisr_register(NETISR_IGMP, igmp_intr, &igmpoq,
3591 mpsafe_igmp ? 0 : NETISR_FORCEQUEUE);
3596 igmp_sysuninit(void)
3599 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3601 netisr_unregister(NETISR_IGMP);
3602 mtx_destroy(&igmpoq.ifq_mtx);
3607 IGMP_LOCK_DESTROY();
3611 * Initialize an IGMPv3 instance.
3612 * VIMAGE: Assumes curvnet set by caller and called per vimage.
3615 vnet_igmp_iattach(const void *unused __unused)
3617 INIT_VNET_INET(curvnet);
3619 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3621 LIST_INIT(&V_igi_head);
3623 V_current_state_timers_running = 0;
3624 V_state_change_timers_running = 0;
3625 V_interface_timers_running = 0;
3628 * Initialize sysctls to default values.
3630 V_igmp_recvifkludge = 1;
3632 V_igmp_sendlocal = 1;
3633 V_igmp_v1enable = 1;
3634 V_igmp_v2enable = 1;
3635 V_igmp_legacysupp = 0;
3636 V_igmp_default_version = IGMP_VERSION_3;
3637 V_igmp_gsrdelay.tv_sec = 10;
3638 V_igmp_gsrdelay.tv_usec = 0;
3640 memset(&V_igmpstat, 0, sizeof(struct igmpstat));
3641 V_igmpstat.igps_version = IGPS_VERSION_3;
3642 V_igmpstat.igps_len = sizeof(struct igmpstat);
3648 vnet_igmp_idetach(const void *unused __unused)
3650 INIT_VNET_INET(curvnet);
3652 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3654 KASSERT(LIST_EMPTY(&V_igi_head),
3655 ("%s: igi list not empty; ifnets not detached?", __func__));
3661 static struct vnet_symmap vnet_igmp_symmap[] = {
3662 VNET_SYMMAP(igmp, igi_head),
3663 VNET_SYMMAP(igmp, igmpstat),
3666 VNET_MOD_DECLARE(IGMP, igmp, vnet_igmp_iattach, vnet_igmp_idetach,
3671 igmp_modevent(module_t mod, int type, void *unused __unused)
3678 vnet_mod_register(&vnet_igmp_modinfo);
3680 (void)vnet_igmp_iattach(NULL);
3686 * TODO: Allow module unload if any VIMAGE instances
3687 * are using this module.
3691 (void)vnet_igmp_idetach(NULL);
3696 return (EOPNOTSUPP);
3701 static moduledata_t igmp_mod = {
3706 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);