2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Bruce Simpson.
5 * Copyright (c) 1988 Stephen Deering.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Stephen Deering of Stanford University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
40 * Internet Group Management Protocol (IGMP) routines.
41 * [RFC1112, RFC2236, RFC3376]
43 * Written by Steve Deering, Stanford, May 1988.
44 * Modified by Rosen Sharma, Stanford, Aug 1994.
45 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
46 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
47 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
49 * MULTICAST Revision: 3.5.1.4
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/module.h>
60 #include <sys/malloc.h>
62 #include <sys/socket.h>
63 #include <sys/protosw.h>
64 #include <sys/kernel.h>
66 #include <sys/rmlock.h>
67 #include <sys/sysctl.h>
69 #include <sys/condvar.h>
76 #include <net/if_var.h>
77 #include <net/netisr.h>
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/in_systm.h>
83 #include <netinet/ip.h>
84 #include <netinet/ip_var.h>
85 #include <netinet/ip_options.h>
86 #include <netinet/igmp.h>
87 #include <netinet/igmp_var.h>
89 #include <machine/in_cksum.h>
91 #include <security/mac/mac_framework.h>
94 #define KTR_IGMPV3 KTR_INET
97 static struct igmp_ifsoftc *
98 igi_alloc_locked(struct ifnet *);
99 static void igi_delete_locked(const struct ifnet *);
100 static void igmp_dispatch_queue(struct mbufq *, int, const int);
101 static void igmp_fasttimo_vnet(void);
102 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
103 static int igmp_handle_state_change(struct in_multi *,
104 struct igmp_ifsoftc *);
105 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
106 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
107 const struct igmp *);
108 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
109 const struct igmp *);
110 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
111 /*const*/ struct igmpv3 *);
112 static int igmp_input_v3_group_query(struct in_multi *,
113 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
114 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
115 /*const*/ struct igmp *);
116 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
117 /*const*/ struct igmp *);
118 static void igmp_intr(struct mbuf *);
119 static int igmp_isgroupreported(const struct in_addr);
123 static char * igmp_rec_type_to_str(const int);
125 static void igmp_set_version(struct igmp_ifsoftc *, const int);
126 static void igmp_slowtimo_vnet(void);
127 static int igmp_v1v2_queue_report(struct in_multi *, const int);
128 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
129 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
130 static void igmp_v2_update_group(struct in_multi *, const int);
131 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
132 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
134 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
135 static int igmp_v3_enqueue_group_record(struct mbufq *,
136 struct in_multi *, const int, const int, const int);
137 static int igmp_v3_enqueue_filter_change(struct mbufq *,
139 static void igmp_v3_process_group_timers(struct in_multi_head *,
140 struct mbufq *, struct mbufq *, struct in_multi *,
142 static int igmp_v3_merge_state_changes(struct in_multi *,
144 static void igmp_v3_suppress_group_record(struct in_multi *);
145 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
146 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
147 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
149 static const struct netisr_handler igmp_nh = {
151 .nh_handler = igmp_intr,
152 .nh_proto = NETISR_IGMP,
153 .nh_policy = NETISR_POLICY_SOURCE,
157 * System-wide globals.
159 * Unlocked access to these is OK, except for the global IGMP output
160 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
161 * because all VIMAGEs have to share a global output queue, as netisrs
162 * themselves are not virtualized.
165 * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
166 * Any may be taken independently; if any are held at the same
167 * time, the above lock order must be followed.
168 * * All output is delegated to the netisr.
169 * Now that Giant has been eliminated, the netisr may be inlined.
170 * * IN_MULTI_LIST_LOCK covers in_multi.
171 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
172 * including the output queue.
173 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
174 * per-link state iterators.
175 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
176 * therefore it is not refcounted.
177 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
180 * * IGMP acquires its own reference every time an in_multi is passed to
181 * it and the group is being joined for the first time.
182 * * IGMP releases its reference(s) on in_multi in a deferred way,
183 * because the operations which process the release run as part of
184 * a loop whose control variables are directly affected by the release
185 * (that, and not recursing on the IF_ADDR_LOCK).
187 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
188 * to a vnet in ifp->if_vnet.
190 * SMPng: XXX We may potentially race operations on ifma_protospec.
191 * The problem is that we currently lack a clean way of taking the
192 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
193 * as anything which modifies ifma needs to be covered by that lock.
194 * So check for ifma_protospec being NULL before proceeding.
198 struct mbuf *m_raopt; /* Router Alert option */
199 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
202 * VIMAGE-wide globals.
204 * The IGMPv3 timers themselves need to run per-image, however,
205 * protosw timers run globally (see tcp).
206 * An ifnet can only be in one vimage at a time, and the loopback
207 * ifnet, loif, is itself virtualized.
208 * It would otherwise be possible to seriously hose IGMP state,
209 * and create inconsistencies in upstream multicast routing, if you have
210 * multiple VIMAGEs running on the same link joining different multicast
211 * groups, UNLESS the "primary IP address" is different. This is because
212 * IGMP for IPv4 does not force link-local addresses to be used for each
213 * node, unlike MLD for IPv6.
214 * Obviously the IGMPv3 per-interface state has per-vimage granularity
217 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
218 * policy to control the address used by IGMP on the link.
220 VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
222 VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
224 VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
225 * report; IGMPv3 g/sg
228 #define V_interface_timers_running VNET(interface_timers_running)
229 #define V_state_change_timers_running VNET(state_change_timers_running)
230 #define V_current_state_timers_running VNET(current_state_timers_running)
232 VNET_DEFINE_STATIC(LIST_HEAD(, igmp_ifsoftc), igi_head) =
233 LIST_HEAD_INITIALIZER(igi_head);
234 VNET_DEFINE_STATIC(struct igmpstat, igmpstat) = {
235 .igps_version = IGPS_VERSION_3,
236 .igps_len = sizeof(struct igmpstat),
238 VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
240 #define V_igi_head VNET(igi_head)
241 #define V_igmpstat VNET(igmpstat)
242 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
244 VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
245 VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
246 VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
247 VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
248 VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
249 VNET_DEFINE_STATIC(int, igmp_legacysupp);
250 VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
252 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
253 #define V_igmp_sendra VNET(igmp_sendra)
254 #define V_igmp_sendlocal VNET(igmp_sendlocal)
255 #define V_igmp_v1enable VNET(igmp_v1enable)
256 #define V_igmp_v2enable VNET(igmp_v2enable)
257 #define V_igmp_legacysupp VNET(igmp_legacysupp)
258 #define V_igmp_default_version VNET(igmp_default_version)
261 * Virtualized sysctls.
263 SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET | CTLFLAG_RW,
264 &VNET_NAME(igmpstat), igmpstat, "");
265 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
266 &VNET_NAME(igmp_recvifkludge), 0,
267 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
268 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
269 &VNET_NAME(igmp_sendra), 0,
270 "Send IP Router Alert option in IGMPv2/v3 messages");
271 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
272 &VNET_NAME(igmp_sendlocal), 0,
273 "Send IGMP membership reports for 224.0.0.0/24 groups");
274 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
275 &VNET_NAME(igmp_v1enable), 0,
276 "Enable backwards compatibility with IGMPv1");
277 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
278 &VNET_NAME(igmp_v2enable), 0,
279 "Enable backwards compatibility with IGMPv2");
280 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
281 &VNET_NAME(igmp_legacysupp), 0,
282 "Allow v1/v2 reports to suppress v3 group responses");
283 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
284 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
285 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
286 "Default version of IGMP to run on each interface");
287 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
288 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
289 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
290 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
293 * Non-virtualized sysctls.
295 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
296 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
297 "Per-interface IGMPv3 state");
300 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
304 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
306 m->m_pkthdr.flowid = ifp->if_index;
310 igmp_scrub_context(struct mbuf *m)
313 m->m_pkthdr.PH_loc.ptr = NULL;
314 m->m_pkthdr.flowid = 0;
318 * Restore context from a queued IGMP output chain.
319 * Return saved ifindex.
321 * VIMAGE: The assertion is there to make sure that we
322 * actually called CURVNET_SET() with what's in the mbuf chain.
324 static __inline uint32_t
325 igmp_restore_context(struct mbuf *m)
329 #if defined(VIMAGE) && defined(INVARIANTS)
330 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
331 ("%s: called when curvnet was not restored", __func__));
334 return (m->m_pkthdr.flowid);
338 * Retrieve or set default IGMP version.
340 * VIMAGE: Assume curvnet set by caller.
341 * SMPng: NOTE: Serialized by IGMP lock.
344 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
349 error = sysctl_wire_old_buffer(req, sizeof(int));
355 new = V_igmp_default_version;
357 error = sysctl_handle_int(oidp, &new, 0, req);
358 if (error || !req->newptr)
361 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
366 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
367 V_igmp_default_version, new);
369 V_igmp_default_version = new;
377 * Retrieve or set threshold between group-source queries in seconds.
379 * VIMAGE: Assume curvnet set by caller.
380 * SMPng: NOTE: Serialized by IGMP lock.
383 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
388 error = sysctl_wire_old_buffer(req, sizeof(int));
394 i = V_igmp_gsrdelay.tv_sec;
396 error = sysctl_handle_int(oidp, &i, 0, req);
397 if (error || !req->newptr)
400 if (i < -1 || i >= 60) {
405 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
406 V_igmp_gsrdelay.tv_sec, i);
407 V_igmp_gsrdelay.tv_sec = i;
415 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
416 * For use by ifmcstat(8).
418 * SMPng: NOTE: Does an unlocked ifindex space read.
419 * VIMAGE: Assume curvnet set by caller. The node handler itself
420 * is not directly virtualized.
423 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
429 struct igmp_ifsoftc *igi;
434 if (req->newptr != NULL)
440 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
444 IN_MULTI_LIST_LOCK();
447 if (name[0] <= 0 || name[0] > V_if_index) {
454 ifp = ifnet_byindex(name[0]);
458 LIST_FOREACH(igi, &V_igi_head, igi_link) {
459 if (ifp == igi->igi_ifp) {
460 struct igmp_ifinfo info;
462 info.igi_version = igi->igi_version;
463 info.igi_v1_timer = igi->igi_v1_timer;
464 info.igi_v2_timer = igi->igi_v2_timer;
465 info.igi_v3_timer = igi->igi_v3_timer;
466 info.igi_flags = igi->igi_flags;
467 info.igi_rv = igi->igi_rv;
468 info.igi_qi = igi->igi_qi;
469 info.igi_qri = igi->igi_qri;
470 info.igi_uri = igi->igi_uri;
471 error = SYSCTL_OUT(req, &info, sizeof(info));
478 IN_MULTI_LIST_UNLOCK();
483 * Dispatch an entire queue of pending packet chains
485 * VIMAGE: Assumes the vnet pointer has been set.
488 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
492 while ((m = mbufq_dequeue(mq)) != NULL) {
493 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
495 m->m_flags |= M_IGMP_LOOP;
496 netisr_dispatch(NETISR_IGMP, m);
503 * Filter outgoing IGMP report state by group.
505 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
506 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
507 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
508 * this may break certain IGMP snooping switches which rely on the old
511 * Return zero if the given group is one for which IGMP reports
512 * should be suppressed, or non-zero if reports should be issued.
515 igmp_isgroupreported(const struct in_addr addr)
518 if (in_allhosts(addr) ||
519 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
526 * Construct a Router Alert option to use in outgoing packets.
534 m = m_get(M_WAITOK, MT_DATA);
535 p = mtod(m, struct ipoption *);
536 p->ipopt_dst.s_addr = INADDR_ANY;
537 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
538 p->ipopt_list[1] = 0x04; /* 4 bytes long */
539 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
540 p->ipopt_list[3] = 0x00; /* pad byte */
541 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
547 * Attach IGMP when PF_INET is attached to an interface.
549 struct igmp_ifsoftc *
550 igmp_domifattach(struct ifnet *ifp)
552 struct igmp_ifsoftc *igi;
554 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
555 __func__, ifp, ifp->if_xname);
559 igi = igi_alloc_locked(ifp);
560 if (!(ifp->if_flags & IFF_MULTICAST))
561 igi->igi_flags |= IGIF_SILENT;
569 * VIMAGE: assume curvnet set by caller.
571 static struct igmp_ifsoftc *
572 igi_alloc_locked(/*const*/ struct ifnet *ifp)
574 struct igmp_ifsoftc *igi;
578 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
583 igi->igi_version = V_igmp_default_version;
585 igi->igi_rv = IGMP_RV_INIT;
586 igi->igi_qi = IGMP_QI_INIT;
587 igi->igi_qri = IGMP_QRI_INIT;
588 igi->igi_uri = IGMP_URI_INIT;
589 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
591 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
593 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
603 * NOTE: Some finalization tasks need to run before the protocol domain
604 * is detached, but also before the link layer does its cleanup.
606 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
607 * XXX This is also bitten by unlocked ifma_protospec access.
610 igmp_ifdetach(struct ifnet *ifp)
612 struct igmp_ifsoftc *igi;
613 struct ifmultiaddr *ifma, *next;
614 struct in_multi *inm;
615 struct in_multi_head inm_free_tmp;
616 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
619 SLIST_INIT(&inm_free_tmp);
622 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
623 if (igi->igi_version == IGMP_VERSION_3) {
626 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
627 if (ifma->ifma_addr->sa_family != AF_INET ||
628 ifma->ifma_protospec == NULL)
630 inm = (struct in_multi *)ifma->ifma_protospec;
631 if (inm->inm_state == IGMP_LEAVING_MEMBER)
632 inm_rele_locked(&inm_free_tmp, inm);
633 inm_clear_recorded(inm);
634 if (__predict_false(ifma_restart)) {
635 ifma_restart = false;
639 IF_ADDR_WUNLOCK(ifp);
640 inm_release_list_deferred(&inm_free_tmp);
647 * Hook for domifdetach.
650 igmp_domifdetach(struct ifnet *ifp)
653 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
654 __func__, ifp, ifp->if_xname);
657 igi_delete_locked(ifp);
662 igi_delete_locked(const struct ifnet *ifp)
664 struct igmp_ifsoftc *igi, *tigi;
666 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
667 __func__, ifp, ifp->if_xname);
671 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
672 if (igi->igi_ifp == ifp) {
674 * Free deferred General Query responses.
676 mbufq_drain(&igi->igi_gq);
678 LIST_REMOVE(igi, igi_link);
686 * Process a received IGMPv1 query.
687 * Return non-zero if the message should be dropped.
689 * VIMAGE: The curvnet pointer is derived from the input ifp.
692 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
693 const struct igmp *igmp)
695 struct epoch_tracker et;
696 struct ifmultiaddr *ifma;
697 struct igmp_ifsoftc *igi;
698 struct in_multi *inm;
701 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
702 * 224.0.0.1. They are always treated as General Queries.
703 * igmp_group is always ignored. Do not drop it as a userland
704 * daemon may wish to see it.
705 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
707 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
708 IGMPSTAT_INC(igps_rcv_badqueries);
711 IGMPSTAT_INC(igps_rcv_gen_queries);
713 IN_MULTI_LIST_LOCK();
716 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
717 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
719 if (igi->igi_flags & IGIF_LOOPBACK) {
720 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
726 * Switch to IGMPv1 host compatibility mode.
728 igmp_set_version(igi, IGMP_VERSION_1);
730 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
733 * Start the timers in all of our group records
734 * for the interface on which the query arrived,
735 * except those which are already running.
738 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
739 if (ifma->ifma_addr->sa_family != AF_INET ||
740 ifma->ifma_protospec == NULL)
742 inm = (struct in_multi *)ifma->ifma_protospec;
743 if (inm->inm_timer != 0)
745 switch (inm->inm_state) {
746 case IGMP_NOT_MEMBER:
747 case IGMP_SILENT_MEMBER:
749 case IGMP_G_QUERY_PENDING_MEMBER:
750 case IGMP_SG_QUERY_PENDING_MEMBER:
751 case IGMP_REPORTING_MEMBER:
752 case IGMP_IDLE_MEMBER:
753 case IGMP_LAZY_MEMBER:
754 case IGMP_SLEEPING_MEMBER:
755 case IGMP_AWAKENING_MEMBER:
756 inm->inm_state = IGMP_REPORTING_MEMBER;
757 inm->inm_timer = IGMP_RANDOM_DELAY(
758 IGMP_V1V2_MAX_RI * PR_FASTHZ);
759 V_current_state_timers_running = 1;
761 case IGMP_LEAVING_MEMBER:
769 IN_MULTI_LIST_UNLOCK();
775 * Process a received IGMPv2 general or group-specific query.
778 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
779 const struct igmp *igmp)
781 struct epoch_tracker et;
782 struct ifmultiaddr *ifma;
783 struct igmp_ifsoftc *igi;
784 struct in_multi *inm;
785 int is_general_query;
788 is_general_query = 0;
791 * Validate address fields upfront.
792 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
794 if (in_nullhost(igmp->igmp_group)) {
796 * IGMPv2 General Query.
797 * If this was not sent to the all-hosts group, ignore it.
799 if (!in_allhosts(ip->ip_dst))
801 IGMPSTAT_INC(igps_rcv_gen_queries);
802 is_general_query = 1;
804 /* IGMPv2 Group-Specific Query. */
805 IGMPSTAT_INC(igps_rcv_group_queries);
808 IN_MULTI_LIST_LOCK();
811 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
812 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
814 if (igi->igi_flags & IGIF_LOOPBACK) {
815 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
821 * Ignore v2 query if in v1 Compatibility Mode.
823 if (igi->igi_version == IGMP_VERSION_1)
826 igmp_set_version(igi, IGMP_VERSION_2);
828 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
832 if (is_general_query) {
834 * For each reporting group joined on this
835 * interface, kick the report timer.
837 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
840 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
841 if (ifma->ifma_addr->sa_family != AF_INET ||
842 ifma->ifma_protospec == NULL)
844 inm = (struct in_multi *)ifma->ifma_protospec;
845 igmp_v2_update_group(inm, timer);
850 * Group-specific IGMPv2 query, we need only
851 * look up the single group to process it.
853 inm = inm_lookup(ifp, igmp->igmp_group);
856 "process v2 query 0x%08x on ifp %p(%s)",
857 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
858 igmp_v2_update_group(inm, timer);
864 IN_MULTI_LIST_UNLOCK();
870 * Update the report timer on a group in response to an IGMPv2 query.
872 * If we are becoming the reporting member for this group, start the timer.
873 * If we already are the reporting member for this group, and timer is
874 * below the threshold, reset it.
876 * We may be updating the group for the first time since we switched
877 * to IGMPv3. If we are, then we must clear any recorded source lists,
878 * and transition to REPORTING state; the group timer is overloaded
879 * for group and group-source query responses.
881 * Unlike IGMPv3, the delay per group should be jittered
882 * to avoid bursts of IGMPv2 reports.
885 igmp_v2_update_group(struct in_multi *inm, const int timer)
888 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
889 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
891 IN_MULTI_LIST_LOCK_ASSERT();
893 switch (inm->inm_state) {
894 case IGMP_NOT_MEMBER:
895 case IGMP_SILENT_MEMBER:
897 case IGMP_REPORTING_MEMBER:
898 if (inm->inm_timer != 0 &&
899 inm->inm_timer <= timer) {
900 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
901 "skipping.", __func__);
905 case IGMP_SG_QUERY_PENDING_MEMBER:
906 case IGMP_G_QUERY_PENDING_MEMBER:
907 case IGMP_IDLE_MEMBER:
908 case IGMP_LAZY_MEMBER:
909 case IGMP_AWAKENING_MEMBER:
910 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
911 inm->inm_state = IGMP_REPORTING_MEMBER;
912 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
913 V_current_state_timers_running = 1;
915 case IGMP_SLEEPING_MEMBER:
916 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
917 inm->inm_state = IGMP_AWAKENING_MEMBER;
919 case IGMP_LEAVING_MEMBER:
925 * Process a received IGMPv3 general, group-specific or
926 * group-and-source-specific query.
927 * Assumes m has already been pulled up to the full IGMP message length.
928 * Return 0 if successful, otherwise an appropriate error code is returned.
931 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
932 /*const*/ struct igmpv3 *igmpv3)
934 struct igmp_ifsoftc *igi;
935 struct in_multi *inm;
936 int is_general_query;
937 uint32_t maxresp, nsrc, qqi;
941 is_general_query = 0;
943 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
945 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
946 if (maxresp >= 128) {
947 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
948 (IGMP_EXP(igmpv3->igmp_code) + 3);
952 * Robustness must never be less than 2 for on-wire IGMPv3.
953 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
954 * an exception for interfaces whose IGMPv3 state changes
955 * are redirected to loopback (e.g. MANET).
957 qrv = IGMP_QRV(igmpv3->igmp_misc);
959 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
964 qqi = igmpv3->igmp_qqi;
966 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
967 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
970 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
974 nsrc = ntohs(igmpv3->igmp_numsrc);
977 * Validate address fields and versions upfront before
978 * accepting v3 query.
979 * XXX SMPng: Unlocked access to igmpstat counters here.
981 if (in_nullhost(igmpv3->igmp_group)) {
983 * IGMPv3 General Query.
985 * General Queries SHOULD be directed to 224.0.0.1.
986 * A general query with a source list has undefined
987 * behaviour; discard it.
989 IGMPSTAT_INC(igps_rcv_gen_queries);
990 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
991 IGMPSTAT_INC(igps_rcv_badqueries);
994 is_general_query = 1;
996 /* Group or group-source specific query. */
998 IGMPSTAT_INC(igps_rcv_group_queries);
1000 IGMPSTAT_INC(igps_rcv_gsr_queries);
1003 IN_MULTI_LIST_LOCK();
1006 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1007 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1009 if (igi->igi_flags & IGIF_LOOPBACK) {
1010 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1011 ifp, ifp->if_xname);
1016 * Discard the v3 query if we're in Compatibility Mode.
1017 * The RFC is not obviously worded that hosts need to stay in
1018 * compatibility mode until the Old Version Querier Present
1021 if (igi->igi_version != IGMP_VERSION_3) {
1022 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1023 igi->igi_version, ifp, ifp->if_xname);
1027 igmp_set_version(igi, IGMP_VERSION_3);
1030 igi->igi_qri = maxresp;
1032 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1035 if (is_general_query) {
1037 * Schedule a current-state report on this ifp for
1038 * all groups, possibly containing source lists.
1039 * If there is a pending General Query response
1040 * scheduled earlier than the selected delay, do
1041 * not schedule any other reports.
1042 * Otherwise, reset the interface timer.
1044 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1045 ifp, ifp->if_xname);
1046 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1047 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1048 V_interface_timers_running = 1;
1052 * Group-source-specific queries are throttled on
1053 * a per-group basis to defeat denial-of-service attempts.
1054 * Queries for groups we are not a member of on this
1055 * link are simply ignored.
1057 inm = inm_lookup(ifp, igmpv3->igmp_group);
1061 if (!ratecheck(&inm->inm_lastgsrtv,
1062 &V_igmp_gsrdelay)) {
1063 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1065 IGMPSTAT_INC(igps_drop_gsr_queries);
1069 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1070 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1072 * If there is a pending General Query response
1073 * scheduled sooner than the selected delay, no
1074 * further report need be scheduled.
1075 * Otherwise, prepare to respond to the
1076 * group-specific or group-and-source query.
1078 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1079 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1084 IN_MULTI_LIST_UNLOCK();
1090 * Process a received IGMPv3 group-specific or group-and-source-specific
1092 * Return <0 if any error occurred. Currently this is ignored.
1095 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1096 int timer, /*const*/ struct igmpv3 *igmpv3)
1101 IN_MULTI_LIST_LOCK_ASSERT();
1106 switch (inm->inm_state) {
1107 case IGMP_NOT_MEMBER:
1108 case IGMP_SILENT_MEMBER:
1109 case IGMP_SLEEPING_MEMBER:
1110 case IGMP_LAZY_MEMBER:
1111 case IGMP_AWAKENING_MEMBER:
1112 case IGMP_IDLE_MEMBER:
1113 case IGMP_LEAVING_MEMBER:
1116 case IGMP_REPORTING_MEMBER:
1117 case IGMP_G_QUERY_PENDING_MEMBER:
1118 case IGMP_SG_QUERY_PENDING_MEMBER:
1122 nsrc = ntohs(igmpv3->igmp_numsrc);
1125 * Deal with group-specific queries upfront.
1126 * If any group query is already pending, purge any recorded
1127 * source-list state if it exists, and schedule a query response
1128 * for this group-specific query.
1131 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1132 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1133 inm_clear_recorded(inm);
1134 timer = min(inm->inm_timer, timer);
1136 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1137 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1138 V_current_state_timers_running = 1;
1143 * Deal with the case where a group-and-source-specific query has
1144 * been received but a group-specific query is already pending.
1146 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1147 timer = min(inm->inm_timer, timer);
1148 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1149 V_current_state_timers_running = 1;
1154 * Finally, deal with the case where a group-and-source-specific
1155 * query has been received, where a response to a previous g-s-r
1156 * query exists, or none exists.
1157 * In this case, we need to parse the source-list which the Querier
1158 * has provided us with and check if we have any source list filter
1159 * entries at T1 for these sources. If we do not, there is no need
1160 * schedule a report and the query may be dropped.
1161 * If we do, we must record them and schedule a current-state
1162 * report for those sources.
1163 * FIXME: Handling source lists larger than 1 mbuf requires that
1164 * we pass the mbuf chain pointer down to this function, and use
1165 * m_getptr() to walk the chain.
1167 if (inm->inm_nsrc > 0) {
1168 const struct in_addr *ap;
1171 ap = (const struct in_addr *)(igmpv3 + 1);
1173 for (i = 0; i < nsrc; i++, ap++) {
1174 retval = inm_record_source(inm, ap->s_addr);
1177 nrecorded += retval;
1179 if (nrecorded > 0) {
1181 "%s: schedule response to SG query", __func__);
1182 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1183 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1184 V_current_state_timers_running = 1;
1192 * Process a received IGMPv1 host membership report.
1194 * NOTE: 0.0.0.0 workaround breaks const correctness.
1197 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1198 /*const*/ struct igmp *igmp)
1200 struct rm_priotracker in_ifa_tracker;
1201 struct in_ifaddr *ia;
1202 struct in_multi *inm;
1204 IGMPSTAT_INC(igps_rcv_reports);
1206 if (ifp->if_flags & IFF_LOOPBACK)
1209 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1210 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1211 IGMPSTAT_INC(igps_rcv_badreports);
1216 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1217 * Booting clients may use the source address 0.0.0.0. Some
1218 * IGMP daemons may not know how to use IP_RECVIF to determine
1219 * the interface upon which this message was received.
1220 * Replace 0.0.0.0 with the subnet address if told to do so.
1222 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1223 struct epoch_tracker et;
1225 NET_EPOCH_ENTER(et);
1226 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1228 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1232 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1233 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1236 * IGMPv1 report suppression.
1237 * If we are a member of this group, and our membership should be
1238 * reported, stop our group timer and transition to the 'lazy' state.
1240 IN_MULTI_LIST_LOCK();
1241 inm = inm_lookup(ifp, igmp->igmp_group);
1243 struct igmp_ifsoftc *igi;
1247 KASSERT(igi != NULL,
1248 ("%s: no igi for ifp %p", __func__, ifp));
1252 IGMPSTAT_INC(igps_rcv_ourreports);
1255 * If we are in IGMPv3 host mode, do not allow the
1256 * other host's IGMPv1 report to suppress our reports
1257 * unless explicitly configured to do so.
1259 if (igi->igi_version == IGMP_VERSION_3) {
1260 if (V_igmp_legacysupp)
1261 igmp_v3_suppress_group_record(inm);
1267 switch (inm->inm_state) {
1268 case IGMP_NOT_MEMBER:
1269 case IGMP_SILENT_MEMBER:
1271 case IGMP_IDLE_MEMBER:
1272 case IGMP_LAZY_MEMBER:
1273 case IGMP_AWAKENING_MEMBER:
1275 "report suppressed for 0x%08x on ifp %p(%s)",
1276 ntohl(igmp->igmp_group.s_addr), ifp,
1278 case IGMP_SLEEPING_MEMBER:
1279 inm->inm_state = IGMP_SLEEPING_MEMBER;
1281 case IGMP_REPORTING_MEMBER:
1283 "report suppressed for 0x%08x on ifp %p(%s)",
1284 ntohl(igmp->igmp_group.s_addr), ifp,
1286 if (igi->igi_version == IGMP_VERSION_1)
1287 inm->inm_state = IGMP_LAZY_MEMBER;
1288 else if (igi->igi_version == IGMP_VERSION_2)
1289 inm->inm_state = IGMP_SLEEPING_MEMBER;
1291 case IGMP_G_QUERY_PENDING_MEMBER:
1292 case IGMP_SG_QUERY_PENDING_MEMBER:
1293 case IGMP_LEAVING_MEMBER:
1299 IN_MULTI_LIST_UNLOCK();
1305 * Process a received IGMPv2 host membership report.
1307 * NOTE: 0.0.0.0 workaround breaks const correctness.
1310 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1311 /*const*/ struct igmp *igmp)
1313 struct rm_priotracker in_ifa_tracker;
1314 struct epoch_tracker et;
1315 struct in_ifaddr *ia;
1316 struct in_multi *inm;
1319 * Make sure we don't hear our own membership report. Fast
1320 * leave requires knowing that we are the only member of a
1323 NET_EPOCH_ENTER(et);
1324 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
1325 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1330 IGMPSTAT_INC(igps_rcv_reports);
1332 if (ifp->if_flags & IFF_LOOPBACK) {
1337 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1338 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1340 IGMPSTAT_INC(igps_rcv_badreports);
1345 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1346 * Booting clients may use the source address 0.0.0.0. Some
1347 * IGMP daemons may not know how to use IP_RECVIF to determine
1348 * the interface upon which this message was received.
1349 * Replace 0.0.0.0 with the subnet address if told to do so.
1351 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1353 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1357 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1358 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1361 * IGMPv2 report suppression.
1362 * If we are a member of this group, and our membership should be
1363 * reported, and our group timer is pending or about to be reset,
1364 * stop our group timer by transitioning to the 'lazy' state.
1366 IN_MULTI_LIST_LOCK();
1367 inm = inm_lookup(ifp, igmp->igmp_group);
1369 struct igmp_ifsoftc *igi;
1372 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1374 IGMPSTAT_INC(igps_rcv_ourreports);
1377 * If we are in IGMPv3 host mode, do not allow the
1378 * other host's IGMPv1 report to suppress our reports
1379 * unless explicitly configured to do so.
1381 if (igi->igi_version == IGMP_VERSION_3) {
1382 if (V_igmp_legacysupp)
1383 igmp_v3_suppress_group_record(inm);
1389 switch (inm->inm_state) {
1390 case IGMP_NOT_MEMBER:
1391 case IGMP_SILENT_MEMBER:
1392 case IGMP_SLEEPING_MEMBER:
1394 case IGMP_REPORTING_MEMBER:
1395 case IGMP_IDLE_MEMBER:
1396 case IGMP_AWAKENING_MEMBER:
1398 "report suppressed for 0x%08x on ifp %p(%s)",
1399 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1400 case IGMP_LAZY_MEMBER:
1401 inm->inm_state = IGMP_LAZY_MEMBER;
1403 case IGMP_G_QUERY_PENDING_MEMBER:
1404 case IGMP_SG_QUERY_PENDING_MEMBER:
1405 case IGMP_LEAVING_MEMBER:
1411 IN_MULTI_LIST_UNLOCK();
1417 igmp_input(struct mbuf **mp, int *offp, int proto)
1428 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1431 ifp = m->m_pkthdr.rcvif;
1434 IGMPSTAT_INC(igps_rcv_total);
1436 ip = mtod(m, struct ip *);
1438 igmplen = ntohs(ip->ip_len) - iphlen;
1443 if (igmplen < IGMP_MINLEN) {
1444 IGMPSTAT_INC(igps_rcv_tooshort);
1446 return (IPPROTO_DONE);
1450 * Always pullup to the minimum size for v1/v2 or v3
1451 * to amortize calls to m_pullup().
1454 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1455 minlen += IGMP_V3_QUERY_MINLEN;
1457 minlen += IGMP_MINLEN;
1458 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1459 (m = m_pullup(m, minlen)) == NULL) {
1460 IGMPSTAT_INC(igps_rcv_tooshort);
1461 return (IPPROTO_DONE);
1463 ip = mtod(m, struct ip *);
1466 * Validate checksum.
1468 m->m_data += iphlen;
1470 igmp = mtod(m, struct igmp *);
1471 if (in_cksum(m, igmplen)) {
1472 IGMPSTAT_INC(igps_rcv_badsum);
1474 return (IPPROTO_DONE);
1476 m->m_data -= iphlen;
1480 * IGMP control traffic is link-scope, and must have a TTL of 1.
1481 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1482 * probe packets may come from beyond the LAN.
1484 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1485 IGMPSTAT_INC(igps_rcv_badttl);
1487 return (IPPROTO_DONE);
1490 switch (igmp->igmp_type) {
1491 case IGMP_HOST_MEMBERSHIP_QUERY:
1492 if (igmplen == IGMP_MINLEN) {
1493 if (igmp->igmp_code == 0)
1494 queryver = IGMP_VERSION_1;
1496 queryver = IGMP_VERSION_2;
1497 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1498 queryver = IGMP_VERSION_3;
1500 IGMPSTAT_INC(igps_rcv_tooshort);
1502 return (IPPROTO_DONE);
1506 case IGMP_VERSION_1:
1507 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1508 if (!V_igmp_v1enable)
1510 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1512 return (IPPROTO_DONE);
1516 case IGMP_VERSION_2:
1517 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1518 if (!V_igmp_v2enable)
1520 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1522 return (IPPROTO_DONE);
1526 case IGMP_VERSION_3: {
1527 struct igmpv3 *igmpv3;
1531 IGMPSTAT_INC(igps_rcv_v3_queries);
1532 igmpv3 = (struct igmpv3 *)igmp;
1534 * Validate length based on source count.
1536 nsrc = ntohs(igmpv3->igmp_numsrc);
1537 if (nsrc * sizeof(in_addr_t) >
1538 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1539 IGMPSTAT_INC(igps_rcv_tooshort);
1540 return (IPPROTO_DONE);
1543 * m_pullup() may modify m, so pullup in
1546 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1547 sizeof(struct in_addr) * nsrc;
1548 if ((!M_WRITABLE(m) ||
1549 m->m_len < igmpv3len) &&
1550 (m = m_pullup(m, igmpv3len)) == NULL) {
1551 IGMPSTAT_INC(igps_rcv_tooshort);
1552 return (IPPROTO_DONE);
1554 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1556 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1558 return (IPPROTO_DONE);
1565 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1566 if (!V_igmp_v1enable)
1568 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1570 return (IPPROTO_DONE);
1574 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1575 if (!V_igmp_v2enable)
1577 if (!ip_checkrouteralert(m))
1578 IGMPSTAT_INC(igps_rcv_nora);
1579 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1581 return (IPPROTO_DONE);
1585 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1587 * Hosts do not need to process IGMPv3 membership reports,
1588 * as report suppression is no longer required.
1590 if (!ip_checkrouteralert(m))
1591 IGMPSTAT_INC(igps_rcv_nora);
1599 * Pass all valid IGMP packets up to any process(es) listening on a
1603 return (rip_input(mp, offp, proto));
1608 * Fast timeout handler (global).
1609 * VIMAGE: Timeout handlers are expected to service all vimages.
1614 VNET_ITERATOR_DECL(vnet_iter);
1616 VNET_LIST_RLOCK_NOSLEEP();
1617 VNET_FOREACH(vnet_iter) {
1618 CURVNET_SET(vnet_iter);
1619 igmp_fasttimo_vnet();
1622 VNET_LIST_RUNLOCK_NOSLEEP();
1626 * Fast timeout handler (per-vnet).
1627 * Sends are shuffled off to a netisr to deal with Giant.
1629 * VIMAGE: Assume caller has set up our curvnet.
1632 igmp_fasttimo_vnet(void)
1634 struct mbufq scq; /* State-change packets */
1635 struct mbufq qrq; /* Query response packets */
1637 struct igmp_ifsoftc *igi;
1638 struct ifmultiaddr *ifma, *next;
1639 struct in_multi *inm;
1640 struct in_multi_head inm_free_tmp;
1641 int loop, uri_fasthz;
1647 * Quick check to see if any work needs to be done, in order to
1648 * minimize the overhead of fasttimo processing.
1649 * SMPng: XXX Unlocked reads.
1651 if (!V_current_state_timers_running &&
1652 !V_interface_timers_running &&
1653 !V_state_change_timers_running)
1656 SLIST_INIT(&inm_free_tmp);
1657 IN_MULTI_LIST_LOCK();
1661 * IGMPv3 General Query response timer processing.
1663 if (V_interface_timers_running) {
1664 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1666 V_interface_timers_running = 0;
1667 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1668 if (igi->igi_v3_timer == 0) {
1670 } else if (--igi->igi_v3_timer == 0) {
1671 igmp_v3_dispatch_general_query(igi);
1673 V_interface_timers_running = 1;
1678 if (!V_current_state_timers_running &&
1679 !V_state_change_timers_running)
1682 V_current_state_timers_running = 0;
1683 V_state_change_timers_running = 0;
1685 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1688 * IGMPv1/v2/v3 host report and state-change timer processing.
1689 * Note: Processing a v3 group timer may remove a node.
1691 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1694 if (igi->igi_version == IGMP_VERSION_3) {
1695 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1696 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1698 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1699 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1704 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
1705 if (ifma->ifma_addr->sa_family != AF_INET ||
1706 ifma->ifma_protospec == NULL)
1708 inm = (struct in_multi *)ifma->ifma_protospec;
1709 switch (igi->igi_version) {
1710 case IGMP_VERSION_1:
1711 case IGMP_VERSION_2:
1712 igmp_v1v2_process_group_timer(inm,
1715 case IGMP_VERSION_3:
1716 igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1717 &scq, inm, uri_fasthz);
1720 if (__predict_false(ifma_restart)) {
1721 ifma_restart = false;
1725 IF_ADDR_WUNLOCK(ifp);
1727 if (igi->igi_version == IGMP_VERSION_3) {
1728 igmp_dispatch_queue(&qrq, 0, loop);
1729 igmp_dispatch_queue(&scq, 0, loop);
1732 * Free the in_multi reference(s) for this
1735 inm_release_list_deferred(&inm_free_tmp);
1741 IN_MULTI_LIST_UNLOCK();
1745 * Update host report group timer for IGMPv1/v2.
1746 * Will update the global pending timer flags.
1749 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1751 int report_timer_expired;
1753 IN_MULTI_LIST_LOCK_ASSERT();
1756 if (inm->inm_timer == 0) {
1757 report_timer_expired = 0;
1758 } else if (--inm->inm_timer == 0) {
1759 report_timer_expired = 1;
1761 V_current_state_timers_running = 1;
1765 switch (inm->inm_state) {
1766 case IGMP_NOT_MEMBER:
1767 case IGMP_SILENT_MEMBER:
1768 case IGMP_IDLE_MEMBER:
1769 case IGMP_LAZY_MEMBER:
1770 case IGMP_SLEEPING_MEMBER:
1771 case IGMP_AWAKENING_MEMBER:
1773 case IGMP_REPORTING_MEMBER:
1774 if (report_timer_expired) {
1775 inm->inm_state = IGMP_IDLE_MEMBER;
1776 (void)igmp_v1v2_queue_report(inm,
1777 (version == IGMP_VERSION_2) ?
1778 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1779 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1782 case IGMP_G_QUERY_PENDING_MEMBER:
1783 case IGMP_SG_QUERY_PENDING_MEMBER:
1784 case IGMP_LEAVING_MEMBER:
1790 * Update a group's timers for IGMPv3.
1791 * Will update the global pending timer flags.
1792 * Note: Unlocked read from igi.
1795 igmp_v3_process_group_timers(struct in_multi_head *inmh,
1796 struct mbufq *qrq, struct mbufq *scq,
1797 struct in_multi *inm, const int uri_fasthz)
1799 int query_response_timer_expired;
1800 int state_change_retransmit_timer_expired;
1802 IN_MULTI_LIST_LOCK_ASSERT();
1805 query_response_timer_expired = 0;
1806 state_change_retransmit_timer_expired = 0;
1809 * During a transition from v1/v2 compatibility mode back to v3,
1810 * a group record in REPORTING state may still have its group
1811 * timer active. This is a no-op in this function; it is easier
1812 * to deal with it here than to complicate the slow-timeout path.
1814 if (inm->inm_timer == 0) {
1815 query_response_timer_expired = 0;
1816 } else if (--inm->inm_timer == 0) {
1817 query_response_timer_expired = 1;
1819 V_current_state_timers_running = 1;
1822 if (inm->inm_sctimer == 0) {
1823 state_change_retransmit_timer_expired = 0;
1824 } else if (--inm->inm_sctimer == 0) {
1825 state_change_retransmit_timer_expired = 1;
1827 V_state_change_timers_running = 1;
1830 /* We are in fasttimo, so be quick about it. */
1831 if (!state_change_retransmit_timer_expired &&
1832 !query_response_timer_expired)
1835 switch (inm->inm_state) {
1836 case IGMP_NOT_MEMBER:
1837 case IGMP_SILENT_MEMBER:
1838 case IGMP_SLEEPING_MEMBER:
1839 case IGMP_LAZY_MEMBER:
1840 case IGMP_AWAKENING_MEMBER:
1841 case IGMP_IDLE_MEMBER:
1843 case IGMP_G_QUERY_PENDING_MEMBER:
1844 case IGMP_SG_QUERY_PENDING_MEMBER:
1846 * Respond to a previously pending Group-Specific
1847 * or Group-and-Source-Specific query by enqueueing
1848 * the appropriate Current-State report for
1849 * immediate transmission.
1851 if (query_response_timer_expired) {
1852 int retval __unused;
1854 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1855 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1856 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1858 inm->inm_state = IGMP_REPORTING_MEMBER;
1859 /* XXX Clear recorded sources for next time. */
1860 inm_clear_recorded(inm);
1863 case IGMP_REPORTING_MEMBER:
1864 case IGMP_LEAVING_MEMBER:
1865 if (state_change_retransmit_timer_expired) {
1867 * State-change retransmission timer fired.
1868 * If there are any further pending retransmissions,
1869 * set the global pending state-change flag, and
1872 if (--inm->inm_scrv > 0) {
1873 inm->inm_sctimer = uri_fasthz;
1874 V_state_change_timers_running = 1;
1877 * Retransmit the previously computed state-change
1878 * report. If there are no further pending
1879 * retransmissions, the mbuf queue will be consumed.
1880 * Update T0 state to T1 as we have now sent
1883 (void)igmp_v3_merge_state_changes(inm, scq);
1886 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1887 ntohl(inm->inm_addr.s_addr),
1888 inm->inm_ifp->if_xname);
1891 * If we are leaving the group for good, make sure
1892 * we release IGMP's reference to it.
1893 * This release must be deferred using a SLIST,
1894 * as we are called from a loop which traverses
1895 * the in_ifmultiaddr TAILQ.
1897 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1898 inm->inm_scrv == 0) {
1899 inm->inm_state = IGMP_NOT_MEMBER;
1900 inm_rele_locked(inmh, inm);
1909 * Suppress a group's pending response to a group or source/group query.
1911 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1912 * Do NOT update ST1/ST0 as this operation merely suppresses
1913 * the currently pending group record.
1914 * Do NOT suppress the response to a general query. It is possible but
1915 * it would require adding another state or flag.
1918 igmp_v3_suppress_group_record(struct in_multi *inm)
1921 IN_MULTI_LIST_LOCK_ASSERT();
1923 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1924 ("%s: not IGMPv3 mode on link", __func__));
1926 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1927 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1930 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1931 inm_clear_recorded(inm);
1934 inm->inm_state = IGMP_REPORTING_MEMBER;
1938 * Switch to a different IGMP version on the given interface,
1939 * as per Section 7.2.1.
1942 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1944 int old_version_timer;
1948 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1949 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1951 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1953 * Compute the "Older Version Querier Present" timer as per
1956 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1957 old_version_timer *= PR_SLOWHZ;
1959 if (version == IGMP_VERSION_1) {
1960 igi->igi_v1_timer = old_version_timer;
1961 igi->igi_v2_timer = 0;
1962 } else if (version == IGMP_VERSION_2) {
1963 igi->igi_v1_timer = 0;
1964 igi->igi_v2_timer = old_version_timer;
1968 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1969 if (igi->igi_version != IGMP_VERSION_2) {
1970 igi->igi_version = IGMP_VERSION_2;
1971 igmp_v3_cancel_link_timers(igi);
1973 } else if (igi->igi_v1_timer > 0) {
1974 if (igi->igi_version != IGMP_VERSION_1) {
1975 igi->igi_version = IGMP_VERSION_1;
1976 igmp_v3_cancel_link_timers(igi);
1982 * Cancel pending IGMPv3 timers for the given link and all groups
1983 * joined on it; state-change, general-query, and group-query timers.
1985 * Only ever called on a transition from v3 to Compatibility mode. Kill
1986 * the timers stone dead (this may be expensive for large N groups), they
1987 * will be restarted if Compatibility Mode deems that they must be due to
1991 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
1993 struct ifmultiaddr *ifma;
1995 struct in_multi *inm;
1996 struct in_multi_head inm_free_tmp;
1997 struct epoch_tracker et;
1999 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2000 igi->igi_ifp, igi->igi_ifp->if_xname);
2002 IN_MULTI_LIST_LOCK_ASSERT();
2004 SLIST_INIT(&inm_free_tmp);
2007 * Stop the v3 General Query Response on this link stone dead.
2008 * If fasttimo is woken up due to V_interface_timers_running,
2009 * the flag will be cleared if there are no pending link timers.
2011 igi->igi_v3_timer = 0;
2014 * Now clear the current-state and state-change report timers
2015 * for all memberships scoped to this link.
2018 NET_EPOCH_ENTER(et);
2019 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2020 if (ifma->ifma_addr->sa_family != AF_INET ||
2021 ifma->ifma_protospec == NULL)
2023 inm = (struct in_multi *)ifma->ifma_protospec;
2024 switch (inm->inm_state) {
2025 case IGMP_NOT_MEMBER:
2026 case IGMP_SILENT_MEMBER:
2027 case IGMP_IDLE_MEMBER:
2028 case IGMP_LAZY_MEMBER:
2029 case IGMP_SLEEPING_MEMBER:
2030 case IGMP_AWAKENING_MEMBER:
2032 * These states are either not relevant in v3 mode,
2033 * or are unreported. Do nothing.
2036 case IGMP_LEAVING_MEMBER:
2038 * If we are leaving the group and switching to
2039 * compatibility mode, we need to release the final
2040 * reference held for issuing the INCLUDE {}, and
2041 * transition to REPORTING to ensure the host leave
2042 * message is sent upstream to the old querier --
2043 * transition to NOT would lose the leave and race.
2045 inm_rele_locked(&inm_free_tmp, inm);
2047 case IGMP_G_QUERY_PENDING_MEMBER:
2048 case IGMP_SG_QUERY_PENDING_MEMBER:
2049 inm_clear_recorded(inm);
2051 case IGMP_REPORTING_MEMBER:
2052 inm->inm_state = IGMP_REPORTING_MEMBER;
2056 * Always clear state-change and group report timers.
2057 * Free any pending IGMPv3 state-change records.
2059 inm->inm_sctimer = 0;
2061 mbufq_drain(&inm->inm_scq);
2065 inm_release_list_deferred(&inm_free_tmp);
2069 * Update the Older Version Querier Present timers for a link.
2070 * See Section 7.2.1 of RFC 3376.
2073 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2078 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2080 * IGMPv1 and IGMPv2 Querier Present timers expired.
2084 if (igi->igi_version != IGMP_VERSION_3) {
2086 "%s: transition from v%d -> v%d on %p(%s)",
2087 __func__, igi->igi_version, IGMP_VERSION_3,
2088 igi->igi_ifp, igi->igi_ifp->if_xname);
2089 igi->igi_version = IGMP_VERSION_3;
2091 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2093 * IGMPv1 Querier Present timer expired,
2094 * IGMPv2 Querier Present timer running.
2095 * If IGMPv2 was disabled since last timeout,
2097 * If IGMPv2 is enabled, revert to IGMPv2.
2099 if (!V_igmp_v2enable) {
2101 "%s: transition from v%d -> v%d on %p(%s)",
2102 __func__, igi->igi_version, IGMP_VERSION_3,
2103 igi->igi_ifp, igi->igi_ifp->if_xname);
2104 igi->igi_v2_timer = 0;
2105 igi->igi_version = IGMP_VERSION_3;
2107 --igi->igi_v2_timer;
2108 if (igi->igi_version != IGMP_VERSION_2) {
2110 "%s: transition from v%d -> v%d on %p(%s)",
2111 __func__, igi->igi_version, IGMP_VERSION_2,
2112 igi->igi_ifp, igi->igi_ifp->if_xname);
2113 igi->igi_version = IGMP_VERSION_2;
2114 igmp_v3_cancel_link_timers(igi);
2117 } else if (igi->igi_v1_timer > 0) {
2119 * IGMPv1 Querier Present timer running.
2120 * Stop IGMPv2 timer if running.
2122 * If IGMPv1 was disabled since last timeout,
2124 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2126 if (!V_igmp_v1enable) {
2128 "%s: transition from v%d -> v%d on %p(%s)",
2129 __func__, igi->igi_version, IGMP_VERSION_3,
2130 igi->igi_ifp, igi->igi_ifp->if_xname);
2131 igi->igi_v1_timer = 0;
2132 igi->igi_version = IGMP_VERSION_3;
2134 --igi->igi_v1_timer;
2136 if (igi->igi_v2_timer > 0) {
2138 "%s: cancel v2 timer on %p(%s)",
2139 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2140 igi->igi_v2_timer = 0;
2146 * Global slowtimo handler.
2147 * VIMAGE: Timeout handlers are expected to service all vimages.
2152 VNET_ITERATOR_DECL(vnet_iter);
2154 VNET_LIST_RLOCK_NOSLEEP();
2155 VNET_FOREACH(vnet_iter) {
2156 CURVNET_SET(vnet_iter);
2157 igmp_slowtimo_vnet();
2160 VNET_LIST_RUNLOCK_NOSLEEP();
2164 * Per-vnet slowtimo handler.
2167 igmp_slowtimo_vnet(void)
2169 struct igmp_ifsoftc *igi;
2173 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2174 igmp_v1v2_process_querier_timers(igi);
2181 * Dispatch an IGMPv1/v2 host report or leave message.
2182 * These are always small enough to fit inside a single mbuf.
2185 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2192 IN_MULTI_LIST_LOCK_ASSERT();
2197 m = m_gethdr(M_NOWAIT, MT_DATA);
2200 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2202 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2204 m->m_data += sizeof(struct ip);
2205 m->m_len = sizeof(struct igmp);
2207 igmp = mtod(m, struct igmp *);
2208 igmp->igmp_type = type;
2209 igmp->igmp_code = 0;
2210 igmp->igmp_group = inm->inm_addr;
2211 igmp->igmp_cksum = 0;
2212 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2214 m->m_data -= sizeof(struct ip);
2215 m->m_len += sizeof(struct ip);
2217 ip = mtod(m, struct ip *);
2219 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2221 ip->ip_p = IPPROTO_IGMP;
2222 ip->ip_src.s_addr = INADDR_ANY;
2224 if (type == IGMP_HOST_LEAVE_MESSAGE)
2225 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2227 ip->ip_dst = inm->inm_addr;
2229 igmp_save_context(m, ifp);
2231 m->m_flags |= M_IGMPV2;
2232 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2233 m->m_flags |= M_IGMP_LOOP;
2235 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2236 netisr_dispatch(NETISR_IGMP, m);
2242 * Process a state change from the upper layer for the given IPv4 group.
2244 * Each socket holds a reference on the in_multi in its own ip_moptions.
2245 * The socket layer will have made the necessary updates to.the group
2246 * state, it is now up to IGMP to issue a state change report if there
2247 * has been any change between T0 (when the last state-change was issued)
2250 * We use the IGMPv3 state machine at group level. The IGMP module
2251 * however makes the decision as to which IGMP protocol version to speak.
2252 * A state change *from* INCLUDE {} always means an initial join.
2253 * A state change *to* INCLUDE {} always means a final leave.
2255 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2256 * save ourselves a bunch of work; any exclusive mode groups need not
2257 * compute source filter lists.
2259 * VIMAGE: curvnet should have been set by caller, as this routine
2260 * is called from the socket option handlers.
2263 igmp_change_state(struct in_multi *inm)
2265 struct igmp_ifsoftc *igi;
2270 IN_MULTI_LOCK_ASSERT();
2272 * Try to detect if the upper layer just asked us to change state
2273 * for an interface which has now gone away.
2275 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2276 ifp = inm->inm_ifma->ifma_ifp;
2278 * Sanity check that netinet's notion of ifp is the
2281 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2285 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2286 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2289 * If we detect a state transition to or from MCAST_UNDEFINED
2290 * for this group, then we are starting or finishing an IGMP
2291 * life cycle for this group.
2293 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2294 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2295 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2296 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2297 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2298 error = igmp_initial_join(inm, igi);
2300 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2301 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2302 igmp_final_leave(inm, igi);
2306 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2309 error = igmp_handle_state_change(inm, igi);
2317 * Perform the initial join for an IGMP group.
2319 * When joining a group:
2320 * If the group should have its IGMP traffic suppressed, do nothing.
2321 * IGMPv1 starts sending IGMPv1 host membership reports.
2322 * IGMPv2 starts sending IGMPv2 host membership reports.
2323 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2324 * initial state of the membership.
2327 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2331 int error, retval, syncstates;
2333 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2334 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2341 IN_MULTI_LOCK_ASSERT();
2344 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2347 * Groups joined on loopback or marked as 'not reported',
2348 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2349 * are never reported in any IGMP protocol exchanges.
2350 * All other groups enter the appropriate IGMP state machine
2351 * for the version in use on this link.
2352 * A link marked as IGIF_SILENT causes IGMP to be completely
2353 * disabled for the link.
2355 if ((ifp->if_flags & IFF_LOOPBACK) ||
2356 (igi->igi_flags & IGIF_SILENT) ||
2357 !igmp_isgroupreported(inm->inm_addr)) {
2359 "%s: not kicking state machine for silent group", __func__);
2360 inm->inm_state = IGMP_SILENT_MEMBER;
2364 * Deal with overlapping in_multi lifecycle.
2365 * If this group was LEAVING, then make sure
2366 * we drop the reference we picked up to keep the
2367 * group around for the final INCLUDE {} enqueue.
2369 if (igi->igi_version == IGMP_VERSION_3 &&
2370 inm->inm_state == IGMP_LEAVING_MEMBER) {
2371 MPASS(inm->inm_refcount > 1);
2372 inm_rele_locked(NULL, inm);
2374 inm->inm_state = IGMP_REPORTING_MEMBER;
2376 switch (igi->igi_version) {
2377 case IGMP_VERSION_1:
2378 case IGMP_VERSION_2:
2379 inm->inm_state = IGMP_IDLE_MEMBER;
2380 error = igmp_v1v2_queue_report(inm,
2381 (igi->igi_version == IGMP_VERSION_2) ?
2382 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2383 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2385 inm->inm_timer = IGMP_RANDOM_DELAY(
2386 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2387 V_current_state_timers_running = 1;
2391 case IGMP_VERSION_3:
2393 * Defer update of T0 to T1, until the first copy
2394 * of the state change has been transmitted.
2399 * Immediately enqueue a State-Change Report for
2400 * this interface, freeing any previous reports.
2401 * Don't kick the timers if there is nothing to do,
2402 * or if an error occurred.
2406 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2408 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2411 error = retval * -1;
2416 * Schedule transmission of pending state-change
2417 * report up to RV times for this link. The timer
2418 * will fire at the next igmp_fasttimo (~200ms),
2419 * giving us an opportunity to merge the reports.
2421 if (igi->igi_flags & IGIF_LOOPBACK) {
2424 KASSERT(igi->igi_rv > 1,
2425 ("%s: invalid robustness %d", __func__,
2427 inm->inm_scrv = igi->igi_rv;
2429 inm->inm_sctimer = 1;
2430 V_state_change_timers_running = 1;
2438 * Only update the T0 state if state change is atomic,
2439 * i.e. we don't need to wait for a timer to fire before we
2440 * can consider the state change to have been communicated.
2444 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2445 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2452 * Issue an intermediate state change during the IGMP life-cycle.
2455 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2460 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2461 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2465 IN_MULTI_LIST_LOCK_ASSERT();
2468 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2470 if ((ifp->if_flags & IFF_LOOPBACK) ||
2471 (igi->igi_flags & IGIF_SILENT) ||
2472 !igmp_isgroupreported(inm->inm_addr) ||
2473 (igi->igi_version != IGMP_VERSION_3)) {
2474 if (!igmp_isgroupreported(inm->inm_addr)) {
2476 "%s: not kicking state machine for silent group", __func__);
2478 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2480 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2481 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2485 mbufq_drain(&inm->inm_scq);
2487 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2488 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2493 * If record(s) were enqueued, start the state-change
2494 * report timer for this group.
2496 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2497 inm->inm_sctimer = 1;
2498 V_state_change_timers_running = 1;
2504 * Perform the final leave for an IGMP group.
2506 * When leaving a group:
2507 * IGMPv1 does nothing.
2508 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2509 * IGMPv3 enqueues a state-change report containing a transition
2510 * to INCLUDE {} for immediate transmission.
2513 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2519 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2520 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2521 inm->inm_ifp->if_xname);
2523 IN_MULTI_LIST_LOCK_ASSERT();
2526 switch (inm->inm_state) {
2527 case IGMP_NOT_MEMBER:
2528 case IGMP_SILENT_MEMBER:
2529 case IGMP_LEAVING_MEMBER:
2530 /* Already leaving or left; do nothing. */
2532 "%s: not kicking state machine for silent group", __func__);
2534 case IGMP_REPORTING_MEMBER:
2535 case IGMP_IDLE_MEMBER:
2536 case IGMP_G_QUERY_PENDING_MEMBER:
2537 case IGMP_SG_QUERY_PENDING_MEMBER:
2538 if (igi->igi_version == IGMP_VERSION_2) {
2540 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2541 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2542 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2545 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2546 inm->inm_state = IGMP_NOT_MEMBER;
2547 } else if (igi->igi_version == IGMP_VERSION_3) {
2549 * Stop group timer and all pending reports.
2550 * Immediately enqueue a state-change report
2551 * TO_IN {} to be sent on the next fast timeout,
2552 * giving us an opportunity to merge reports.
2554 mbufq_drain(&inm->inm_scq);
2556 if (igi->igi_flags & IGIF_LOOPBACK) {
2559 inm->inm_scrv = igi->igi_rv;
2561 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2562 "pending retransmissions.", __func__,
2563 ntohl(inm->inm_addr.s_addr),
2564 inm->inm_ifp->if_xname, inm->inm_scrv);
2565 if (inm->inm_scrv == 0) {
2566 inm->inm_state = IGMP_NOT_MEMBER;
2567 inm->inm_sctimer = 0;
2569 int retval __unused;
2571 inm_acquire_locked(inm);
2573 retval = igmp_v3_enqueue_group_record(
2574 &inm->inm_scq, inm, 1, 0, 0);
2575 KASSERT(retval != 0,
2576 ("%s: enqueue record = %d", __func__,
2579 inm->inm_state = IGMP_LEAVING_MEMBER;
2580 inm->inm_sctimer = 1;
2581 V_state_change_timers_running = 1;
2587 case IGMP_LAZY_MEMBER:
2588 case IGMP_SLEEPING_MEMBER:
2589 case IGMP_AWAKENING_MEMBER:
2590 /* Our reports are suppressed; do nothing. */
2596 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2597 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2598 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2599 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2600 __func__, ntohl(inm->inm_addr.s_addr),
2601 inm->inm_ifp->if_xname);
2606 * Enqueue an IGMPv3 group record to the given output queue.
2608 * XXX This function could do with having the allocation code
2609 * split out, and the multiple-tree-walks coalesced into a single
2610 * routine as has been done in igmp_v3_enqueue_filter_change().
2612 * If is_state_change is zero, a current-state record is appended.
2613 * If is_state_change is non-zero, a state-change report is appended.
2615 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2616 * If is_group_query is zero, and if there is a packet with free space
2617 * at the tail of the queue, it will be appended to providing there
2618 * is enough free space.
2619 * Otherwise a new mbuf packet chain is allocated.
2621 * If is_source_query is non-zero, each source is checked to see if
2622 * it was recorded for a Group-Source query, and will be omitted if
2623 * it is not both in-mode and recorded.
2625 * The function will attempt to allocate leading space in the packet
2626 * for the IP/IGMP header to be prepended without fragmenting the chain.
2628 * If successful the size of all data appended to the queue is returned,
2629 * otherwise an error code less than zero is returned, or zero if
2630 * no record(s) were appended.
2633 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2634 const int is_state_change, const int is_group_query,
2635 const int is_source_query)
2637 struct igmp_grouprec ig;
2638 struct igmp_grouprec *pig;
2640 struct ip_msource *ims, *nims;
2641 struct mbuf *m0, *m, *md;
2642 int is_filter_list_change;
2643 int minrec0len, m0srcs, msrcs, nbytes, off;
2644 int record_has_sources;
2650 IN_MULTI_LIST_LOCK_ASSERT();
2653 is_filter_list_change = 0;
2660 record_has_sources = 1;
2662 type = IGMP_DO_NOTHING;
2663 mode = inm->inm_st[1].iss_fmode;
2666 * If we did not transition out of ASM mode during t0->t1,
2667 * and there are no source nodes to process, we can skip
2668 * the generation of source records.
2670 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2672 record_has_sources = 0;
2674 if (is_state_change) {
2676 * Queue a state change record.
2677 * If the mode did not change, and there are non-ASM
2678 * listeners or source filters present,
2679 * we potentially need to issue two records for the group.
2680 * If we are transitioning to MCAST_UNDEFINED, we need
2681 * not send any sources.
2682 * If there are ASM listeners, and there was no filter
2683 * mode transition of any kind, do nothing.
2685 if (mode != inm->inm_st[0].iss_fmode) {
2686 if (mode == MCAST_EXCLUDE) {
2687 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2689 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2691 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2693 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2694 if (mode == MCAST_UNDEFINED)
2695 record_has_sources = 0;
2698 if (record_has_sources) {
2699 is_filter_list_change = 1;
2701 type = IGMP_DO_NOTHING;
2706 * Queue a current state record.
2708 if (mode == MCAST_EXCLUDE) {
2709 type = IGMP_MODE_IS_EXCLUDE;
2710 } else if (mode == MCAST_INCLUDE) {
2711 type = IGMP_MODE_IS_INCLUDE;
2712 KASSERT(inm->inm_st[1].iss_asm == 0,
2713 ("%s: inm %p is INCLUDE but ASM count is %d",
2714 __func__, inm, inm->inm_st[1].iss_asm));
2719 * Generate the filter list changes using a separate function.
2721 if (is_filter_list_change)
2722 return (igmp_v3_enqueue_filter_change(mq, inm));
2724 if (type == IGMP_DO_NOTHING) {
2725 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2726 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2731 * If any sources are present, we must be able to fit at least
2732 * one in the trailing space of the tail packet's mbuf,
2735 minrec0len = sizeof(struct igmp_grouprec);
2736 if (record_has_sources)
2737 minrec0len += sizeof(in_addr_t);
2739 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2740 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2741 inm->inm_ifp->if_xname);
2744 * Check if we have a packet in the tail of the queue for this
2745 * group into which the first group record for this group will fit.
2746 * Otherwise allocate a new packet.
2747 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2748 * Note: Group records for G/GSR query responses MUST be sent
2749 * in their own packet.
2751 m0 = mbufq_last(mq);
2752 if (!is_group_query &&
2754 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2755 (m0->m_pkthdr.len + minrec0len) <
2756 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2757 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2758 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2760 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2762 if (mbufq_full(mq)) {
2763 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2767 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2768 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2769 if (!is_state_change && !is_group_query) {
2770 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2772 m->m_data += IGMP_LEADINGSPACE;
2775 m = m_gethdr(M_NOWAIT, MT_DATA);
2777 M_ALIGN(m, IGMP_LEADINGSPACE);
2782 igmp_save_context(m, ifp);
2784 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2788 * Append group record.
2789 * If we have sources, we don't know how many yet.
2794 ig.ig_group = inm->inm_addr;
2795 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2798 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2801 nbytes += sizeof(struct igmp_grouprec);
2804 * Append as many sources as will fit in the first packet.
2805 * If we are appending to a new packet, the chain allocation
2806 * may potentially use clusters; use m_getptr() in this case.
2807 * If we are appending to an existing packet, we need to obtain
2808 * a pointer to the group record after m_append(), in case a new
2809 * mbuf was allocated.
2810 * Only append sources which are in-mode at t1. If we are
2811 * transitioning to MCAST_UNDEFINED state on the group, do not
2812 * include source entries.
2813 * Only report recorded sources in our filter set when responding
2814 * to a group-source query.
2816 if (record_has_sources) {
2819 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2820 md->m_len - nbytes);
2822 md = m_getptr(m, 0, &off);
2823 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2827 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2828 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2830 now = ims_get_mode(inm, ims, 1);
2831 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2832 if ((now != mode) ||
2833 (now == mode && mode == MCAST_UNDEFINED)) {
2834 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2837 if (is_source_query && ims->ims_stp == 0) {
2838 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2842 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2843 naddr = htonl(ims->ims_haddr);
2844 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2847 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2851 nbytes += sizeof(in_addr_t);
2853 if (msrcs == m0srcs)
2856 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2858 pig->ig_numsrc = htons(msrcs);
2859 nbytes += (msrcs * sizeof(in_addr_t));
2862 if (is_source_query && msrcs == 0) {
2863 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2870 * We are good to go with first packet.
2873 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2874 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2875 mbufq_enqueue(mq, m);
2877 m->m_pkthdr.PH_vt.vt_nrecs++;
2880 * No further work needed if no source list in packet(s).
2882 if (!record_has_sources)
2886 * Whilst sources remain to be announced, we need to allocate
2887 * a new packet and fill out as many sources as will fit.
2888 * Always try for a cluster first.
2890 while (nims != NULL) {
2891 if (mbufq_full(mq)) {
2892 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2895 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2897 m->m_data += IGMP_LEADINGSPACE;
2899 m = m_gethdr(M_NOWAIT, MT_DATA);
2901 M_ALIGN(m, IGMP_LEADINGSPACE);
2905 igmp_save_context(m, ifp);
2906 md = m_getptr(m, 0, &off);
2907 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2908 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2910 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2913 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2916 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2917 nbytes += sizeof(struct igmp_grouprec);
2919 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2920 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2923 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2924 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2926 now = ims_get_mode(inm, ims, 1);
2927 if ((now != mode) ||
2928 (now == mode && mode == MCAST_UNDEFINED)) {
2929 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2932 if (is_source_query && ims->ims_stp == 0) {
2933 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2937 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2938 naddr = htonl(ims->ims_haddr);
2939 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2942 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2947 if (msrcs == m0srcs)
2950 pig->ig_numsrc = htons(msrcs);
2951 nbytes += (msrcs * sizeof(in_addr_t));
2953 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2954 mbufq_enqueue(mq, m);
2961 * Type used to mark record pass completion.
2962 * We exploit the fact we can cast to this easily from the
2963 * current filter modes on each ip_msource node.
2966 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2967 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2968 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2969 REC_FULL = REC_ALLOW | REC_BLOCK
2973 * Enqueue an IGMPv3 filter list change to the given output queue.
2975 * Source list filter state is held in an RB-tree. When the filter list
2976 * for a group is changed without changing its mode, we need to compute
2977 * the deltas between T0 and T1 for each source in the filter set,
2978 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2980 * As we may potentially queue two record types, and the entire R-B tree
2981 * needs to be walked at once, we break this out into its own function
2982 * so we can generate a tightly packed queue of packets.
2984 * XXX This could be written to only use one tree walk, although that makes
2985 * serializing into the mbuf chains a bit harder. For now we do two walks
2986 * which makes things easier on us, and it may or may not be harder on
2989 * If successful the size of all data appended to the queue is returned,
2990 * otherwise an error code less than zero is returned, or zero if
2991 * no record(s) were appended.
2994 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
2996 static const int MINRECLEN =
2997 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
2999 struct igmp_grouprec ig;
3000 struct igmp_grouprec *pig;
3001 struct ip_msource *ims, *nims;
3002 struct mbuf *m, *m0, *md;
3004 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3006 uint8_t mode, now, then;
3007 rectype_t crt, drt, nrt;
3009 IN_MULTI_LIST_LOCK_ASSERT();
3011 if (inm->inm_nsrc == 0 ||
3012 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3015 ifp = inm->inm_ifp; /* interface */
3016 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3017 crt = REC_NONE; /* current group record type */
3018 drt = REC_NONE; /* mask of completed group record types */
3019 nrt = REC_NONE; /* record type for current node */
3020 m0srcs = 0; /* # source which will fit in current mbuf chain */
3021 nbytes = 0; /* # of bytes appended to group's state-change queue */
3022 npbytes = 0; /* # of bytes appended this packet */
3023 rsrcs = 0; /* # sources encoded in current record */
3024 schanged = 0; /* # nodes encoded in overall filter change */
3025 nallow = 0; /* # of source entries in ALLOW_NEW */
3026 nblock = 0; /* # of source entries in BLOCK_OLD */
3027 nims = NULL; /* next tree node pointer */
3030 * For each possible filter record mode.
3031 * The first kind of source we encounter tells us which
3032 * is the first kind of record we start appending.
3033 * If a node transitioned to UNDEFINED at t1, its mode is treated
3034 * as the inverse of the group's filter mode.
3036 while (drt != REC_FULL) {
3038 m0 = mbufq_last(mq);
3040 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3041 IGMP_V3_REPORT_MAXRECS) &&
3042 (m0->m_pkthdr.len + MINRECLEN) <
3043 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3045 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3046 sizeof(struct igmp_grouprec)) /
3049 "%s: use previous packet", __func__);
3051 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3053 m->m_data += IGMP_LEADINGSPACE;
3055 m = m_gethdr(M_NOWAIT, MT_DATA);
3057 M_ALIGN(m, IGMP_LEADINGSPACE);
3061 "%s: m_get*() failed", __func__);
3064 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3065 igmp_save_context(m, ifp);
3066 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3067 sizeof(struct igmp_grouprec)) /
3071 "%s: allocated new packet", __func__);
3074 * Append the IGMP group record header to the
3075 * current packet's data area.
3076 * Recalculate pointer to free space for next
3077 * group record, in case m_append() allocated
3078 * a new mbuf or cluster.
3080 memset(&ig, 0, sizeof(ig));
3081 ig.ig_group = inm->inm_addr;
3082 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3086 "%s: m_append() failed", __func__);
3089 npbytes += sizeof(struct igmp_grouprec);
3091 /* new packet; offset in c hain */
3092 md = m_getptr(m, npbytes -
3093 sizeof(struct igmp_grouprec), &off);
3094 pig = (struct igmp_grouprec *)(mtod(md,
3097 /* current packet; offset from last append */
3099 pig = (struct igmp_grouprec *)(mtod(md,
3100 uint8_t *) + md->m_len -
3101 sizeof(struct igmp_grouprec));
3104 * Begin walking the tree for this record type
3105 * pass, or continue from where we left off
3106 * previously if we had to allocate a new packet.
3107 * Only report deltas in-mode at t1.
3108 * We need not report included sources as allowed
3109 * if we are in inclusive mode on the group,
3110 * however the converse is not true.
3114 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3115 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3116 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3117 __func__, ims->ims_haddr);
3118 now = ims_get_mode(inm, ims, 1);
3119 then = ims_get_mode(inm, ims, 0);
3120 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3121 __func__, then, now);
3124 "%s: skip unchanged", __func__);
3127 if (mode == MCAST_EXCLUDE &&
3128 now == MCAST_INCLUDE) {
3130 "%s: skip IN src on EX group",
3134 nrt = (rectype_t)now;
3135 if (nrt == REC_NONE)
3136 nrt = (rectype_t)(~mode & REC_FULL);
3137 if (schanged++ == 0) {
3139 } else if (crt != nrt)
3141 naddr = htonl(ims->ims_haddr);
3142 if (!m_append(m, sizeof(in_addr_t),
3147 "%s: m_append() failed", __func__);
3150 nallow += !!(crt == REC_ALLOW);
3151 nblock += !!(crt == REC_BLOCK);
3152 if (++rsrcs == m0srcs)
3156 * If we did not append any tree nodes on this
3157 * pass, back out of allocations.
3160 npbytes -= sizeof(struct igmp_grouprec);
3163 "%s: m_free(m)", __func__);
3167 "%s: m_adj(m, -ig)", __func__);
3168 m_adj(m, -((int)sizeof(
3169 struct igmp_grouprec)));
3173 npbytes += (rsrcs * sizeof(in_addr_t));
3174 if (crt == REC_ALLOW)
3175 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3176 else if (crt == REC_BLOCK)
3177 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3178 pig->ig_numsrc = htons(rsrcs);
3180 * Count the new group record, and enqueue this
3181 * packet if it wasn't already queued.
3183 m->m_pkthdr.PH_vt.vt_nrecs++;
3185 mbufq_enqueue(mq, m);
3187 } while (nims != NULL);
3189 crt = (~crt & REC_FULL);
3192 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3199 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3202 struct mbuf *m; /* pending state-change */
3203 struct mbuf *m0; /* copy of pending state-change */
3204 struct mbuf *mt; /* last state-change in packet */
3205 int docopy, domerge;
3212 IN_MULTI_LIST_LOCK_ASSERT();
3216 * If there are further pending retransmissions, make a writable
3217 * copy of each queued state-change message before merging.
3219 if (inm->inm_scrv > 0)
3224 if (mbufq_first(gq) == NULL) {
3225 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3230 m = mbufq_first(gq);
3233 * Only merge the report into the current packet if
3234 * there is sufficient space to do so; an IGMPv3 report
3235 * packet may only contain 65,535 group records.
3236 * Always use a simple mbuf chain concatentation to do this,
3237 * as large state changes for single groups may have
3238 * allocated clusters.
3241 mt = mbufq_last(scq);
3243 recslen = m_length(m, NULL);
3245 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3246 m->m_pkthdr.PH_vt.vt_nrecs <=
3247 IGMP_V3_REPORT_MAXRECS) &&
3248 (mt->m_pkthdr.len + recslen <=
3249 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3253 if (!domerge && mbufq_full(gq)) {
3255 "%s: outbound queue full, skipping whole packet %p",
3265 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3266 m0 = mbufq_dequeue(gq);
3269 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3270 m0 = m_dup(m, M_NOWAIT);
3273 m0->m_nextpkt = NULL;
3278 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3280 mbufq_enqueue(scq, m0);
3282 struct mbuf *mtl; /* last mbuf of packet mt */
3284 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3288 m0->m_flags &= ~M_PKTHDR;
3289 mt->m_pkthdr.len += recslen;
3290 mt->m_pkthdr.PH_vt.vt_nrecs +=
3291 m0->m_pkthdr.PH_vt.vt_nrecs;
3301 * Respond to a pending IGMPv3 General Query.
3304 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3306 struct epoch_tracker et;
3307 struct ifmultiaddr *ifma;
3309 struct in_multi *inm;
3310 int retval __unused, loop;
3312 IN_MULTI_LIST_LOCK_ASSERT();
3315 KASSERT(igi->igi_version == IGMP_VERSION_3,
3316 ("%s: called when version %d", __func__, igi->igi_version));
3319 * Check that there are some packets queued. If so, send them first.
3320 * For large number of groups the reply to general query can take
3321 * many packets, we should finish sending them before starting of
3322 * queuing the new reply.
3324 if (mbufq_len(&igi->igi_gq) != 0)
3329 NET_EPOCH_ENTER(et);
3330 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3331 if (ifma->ifma_addr->sa_family != AF_INET ||
3332 ifma->ifma_protospec == NULL)
3335 inm = (struct in_multi *)ifma->ifma_protospec;
3336 KASSERT(ifp == inm->inm_ifp,
3337 ("%s: inconsistent ifp", __func__));
3339 switch (inm->inm_state) {
3340 case IGMP_NOT_MEMBER:
3341 case IGMP_SILENT_MEMBER:
3343 case IGMP_REPORTING_MEMBER:
3344 case IGMP_IDLE_MEMBER:
3345 case IGMP_LAZY_MEMBER:
3346 case IGMP_SLEEPING_MEMBER:
3347 case IGMP_AWAKENING_MEMBER:
3348 inm->inm_state = IGMP_REPORTING_MEMBER;
3349 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3351 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3354 case IGMP_G_QUERY_PENDING_MEMBER:
3355 case IGMP_SG_QUERY_PENDING_MEMBER:
3356 case IGMP_LEAVING_MEMBER:
3363 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3364 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3367 * Slew transmission of bursts over 500ms intervals.
3369 if (mbufq_first(&igi->igi_gq) != NULL) {
3370 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3371 IGMP_RESPONSE_BURST_INTERVAL);
3372 V_interface_timers_running = 1;
3377 * Transmit the next pending IGMP message in the output queue.
3379 * We get called from netisr_processqueue(). A mutex private to igmpoq
3380 * will be acquired and released around this routine.
3382 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3383 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3384 * a link and uses a link-scope multicast address.
3387 igmp_intr(struct mbuf *m)
3389 struct ip_moptions imo;
3391 struct mbuf *ipopts, *m0;
3395 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3398 * Set VNET image pointer from enqueued mbuf chain
3399 * before doing anything else. Whilst we use interface
3400 * indexes to guard against interface detach, they are
3401 * unique to each VIMAGE and must be retrieved.
3403 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3404 ifindex = igmp_restore_context(m);
3407 * Check if the ifnet still exists. This limits the scope of
3408 * any race in the absence of a global ifp lock for low cost
3409 * (an array lookup).
3411 ifp = ifnet_byindex(ifindex);
3413 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3414 __func__, m, ifindex);
3416 IPSTAT_INC(ips_noroute);
3420 ipopts = V_igmp_sendra ? m_raopt : NULL;
3422 imo.imo_multicast_ttl = 1;
3423 imo.imo_multicast_vif = -1;
3424 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3427 * If the user requested that IGMP traffic be explicitly
3428 * redirected to the loopback interface (e.g. they are running a
3429 * MANET interface and the routing protocol needs to see the
3430 * updates), handle this now.
3432 if (m->m_flags & M_IGMP_LOOP)
3433 imo.imo_multicast_ifp = V_loif;
3435 imo.imo_multicast_ifp = ifp;
3437 if (m->m_flags & M_IGMPV2) {
3440 m0 = igmp_v3_encap_report(ifp, m);
3442 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3444 IPSTAT_INC(ips_odropped);
3449 igmp_scrub_context(m0);
3451 m0->m_pkthdr.rcvif = V_loif;
3453 mac_netinet_igmp_send(ifp, m0);
3455 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3457 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3461 IGMPSTAT_INC(igps_snd_reports);
3465 * We must restore the existing vnet pointer before
3466 * continuing as we are run from netisr context.
3472 * Encapsulate an IGMPv3 report.
3474 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3475 * chain has already had its IP/IGMPv3 header prepended. In this case
3476 * the function will not attempt to prepend; the lengths and checksums
3477 * will however be re-computed.
3479 * Returns a pointer to the new mbuf chain head, or NULL if the
3480 * allocation failed.
3482 static struct mbuf *
3483 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3485 struct rm_priotracker in_ifa_tracker;
3486 struct igmp_report *igmp;
3488 int hdrlen, igmpreclen;
3490 KASSERT((m->m_flags & M_PKTHDR),
3491 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3493 igmpreclen = m_length(m, NULL);
3494 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3496 if (m->m_flags & M_IGMPV3_HDR) {
3497 igmpreclen -= hdrlen;
3499 M_PREPEND(m, hdrlen, M_NOWAIT);
3502 m->m_flags |= M_IGMPV3_HDR;
3505 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3507 m->m_data += sizeof(struct ip);
3508 m->m_len -= sizeof(struct ip);
3510 igmp = mtod(m, struct igmp_report *);
3511 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3514 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3516 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3517 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3519 m->m_data -= sizeof(struct ip);
3520 m->m_len += sizeof(struct ip);
3522 ip = mtod(m, struct ip *);
3523 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3524 ip->ip_len = htons(hdrlen + igmpreclen);
3525 ip->ip_off = htons(IP_DF);
3526 ip->ip_p = IPPROTO_IGMP;
3529 ip->ip_src.s_addr = INADDR_ANY;
3531 if (m->m_flags & M_IGMP_LOOP) {
3532 struct epoch_tracker et;
3533 struct in_ifaddr *ia;
3535 NET_EPOCH_ENTER(et);
3536 IFP_TO_IA(ifp, ia, &in_ifa_tracker);
3538 ip->ip_src = ia->ia_addr.sin_addr;
3542 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3549 igmp_rec_type_to_str(const int type)
3553 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3556 case IGMP_CHANGE_TO_INCLUDE_MODE:
3559 case IGMP_MODE_IS_EXCLUDE:
3562 case IGMP_MODE_IS_INCLUDE:
3565 case IGMP_ALLOW_NEW_SOURCES:
3568 case IGMP_BLOCK_OLD_SOURCES:
3580 vnet_igmp_init(const void *unused __unused)
3583 netisr_register_vnet(&igmp_nh);
3585 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3586 vnet_igmp_init, NULL);
3589 vnet_igmp_uninit(const void *unused __unused)
3592 /* This can happen when we shutdown the entire network stack. */
3593 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3595 netisr_unregister_vnet(&igmp_nh);
3597 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3598 vnet_igmp_uninit, NULL);
3602 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3604 struct igmp_ifsoftc *igi, *tigi;
3605 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3608 db_printf("usage: show igi_list <addr>\n");
3611 igi_head = (struct _igi_list *)addr;
3613 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3614 db_printf("igmp_ifsoftc %p:\n", igi);
3615 db_printf(" ifp %p\n", igi->igi_ifp);
3616 db_printf(" version %u\n", igi->igi_version);
3617 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3618 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3619 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3620 db_printf(" flags %#x\n", igi->igi_flags);
3621 db_printf(" rv %u\n", igi->igi_rv);
3622 db_printf(" qi %u\n", igi->igi_qi);
3623 db_printf(" qri %u\n", igi->igi_qri);
3624 db_printf(" uri %u\n", igi->igi_uri);
3625 /* struct mbufq igi_gq; */
3632 igmp_modevent(module_t mod, int type, void *unused __unused)
3637 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3639 m_raopt = igmp_ra_alloc();
3640 netisr_register(&igmp_nh);
3643 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3644 netisr_unregister(&igmp_nh);
3647 IGMP_LOCK_DESTROY();
3650 return (EOPNOTSUPP);
3655 static moduledata_t igmp_mod = {
3660 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);