2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Bruce Simpson.
5 * Copyright (c) 1988 Stephen Deering.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Stephen Deering of Stanford University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
40 * Internet Group Management Protocol (IGMP) routines.
41 * [RFC1112, RFC2236, RFC3376]
43 * Written by Steve Deering, Stanford, May 1988.
44 * Modified by Rosen Sharma, Stanford, Aug 1994.
45 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
46 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
47 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
49 * MULTICAST Revision: 3.5.1.4
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/module.h>
60 #include <sys/malloc.h>
62 #include <sys/socket.h>
63 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
67 #include <sys/condvar.h>
74 #include <net/if_var.h>
75 #include <net/if_private.h>
76 #include <net/netisr.h>
79 #include <netinet/in.h>
80 #include <netinet/in_var.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet/ip_options.h>
85 #include <netinet/igmp.h>
86 #include <netinet/igmp_var.h>
88 #include <machine/in_cksum.h>
90 #include <security/mac/mac_framework.h>
93 #define KTR_IGMPV3 KTR_INET
96 #define IGMP_SLOWHZ 2 /* 2 slow timeouts per second */
97 #define IGMP_FASTHZ 5 /* 5 fast timeouts per second */
98 #define IGMP_RESPONSE_BURST_INTERVAL (IGMP_FASTHZ / 2)
100 static struct igmp_ifsoftc *
101 igi_alloc_locked(struct ifnet *);
102 static void igi_delete_locked(const struct ifnet *);
103 static void igmp_dispatch_queue(struct mbufq *, int, const int);
104 static void igmp_fasttimo_vnet(void);
105 static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
106 static int igmp_handle_state_change(struct in_multi *,
107 struct igmp_ifsoftc *);
108 static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
109 static int igmp_input_v1_query(struct ifnet *, const struct ip *,
110 const struct igmp *);
111 static int igmp_input_v2_query(struct ifnet *, const struct ip *,
112 const struct igmp *);
113 static int igmp_input_v3_query(struct ifnet *, const struct ip *,
114 /*const*/ struct igmpv3 *);
115 static int igmp_input_v3_group_query(struct in_multi *,
116 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
117 static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
118 /*const*/ struct igmp *);
119 static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
120 /*const*/ struct igmp *);
121 static void igmp_intr(struct mbuf *);
122 static int igmp_isgroupreported(const struct in_addr);
126 static char * igmp_rec_type_to_str(const int);
128 static void igmp_set_version(struct igmp_ifsoftc *, const int);
129 static void igmp_slowtimo_vnet(void);
130 static int igmp_v1v2_queue_report(struct in_multi *, const int);
131 static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
132 static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
133 static void igmp_v2_update_group(struct in_multi *, const int);
134 static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
135 static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
137 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
138 static int igmp_v3_enqueue_group_record(struct mbufq *,
139 struct in_multi *, const int, const int, const int);
140 static int igmp_v3_enqueue_filter_change(struct mbufq *,
142 static void igmp_v3_process_group_timers(struct in_multi_head *,
143 struct mbufq *, struct mbufq *, struct in_multi *,
145 static int igmp_v3_merge_state_changes(struct in_multi *,
147 static void igmp_v3_suppress_group_record(struct in_multi *);
148 static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
149 static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
150 static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
151 static int sysctl_igmp_stat(SYSCTL_HANDLER_ARGS);
153 static const struct netisr_handler igmp_nh = {
155 .nh_handler = igmp_intr,
156 .nh_proto = NETISR_IGMP,
157 .nh_policy = NETISR_POLICY_SOURCE,
161 * System-wide globals.
163 * Unlocked access to these is OK, except for the global IGMP output
164 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
165 * because all VIMAGEs have to share a global output queue, as netisrs
166 * themselves are not virtualized.
169 * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
170 * Any may be taken independently; if any are held at the same
171 * time, the above lock order must be followed.
172 * * All output is delegated to the netisr.
173 * Now that Giant has been eliminated, the netisr may be inlined.
174 * * IN_MULTI_LIST_LOCK covers in_multi.
175 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
176 * including the output queue.
177 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
178 * per-link state iterators.
179 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
180 * therefore it is not refcounted.
181 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
184 * * IGMP acquires its own reference every time an in_multi is passed to
185 * it and the group is being joined for the first time.
186 * * IGMP releases its reference(s) on in_multi in a deferred way,
187 * because the operations which process the release run as part of
188 * a loop whose control variables are directly affected by the release
189 * (that, and not recursing on the IF_ADDR_LOCK).
191 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
192 * to a vnet in ifp->if_vnet.
194 * SMPng: XXX We may potentially race operations on ifma_protospec.
195 * The problem is that we currently lack a clean way of taking the
196 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
197 * as anything which modifies ifma needs to be covered by that lock.
198 * So check for ifma_protospec being NULL before proceeding.
202 struct mbuf *m_raopt; /* Router Alert option */
203 static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
206 * VIMAGE-wide globals.
208 * The IGMPv3 timers themselves need to run per-image, however, for
209 * historical reasons, timers run globally. This needs to be improved.
210 * An ifnet can only be in one vimage at a time, and the loopback
211 * ifnet, loif, is itself virtualized.
212 * It would otherwise be possible to seriously hose IGMP state,
213 * and create inconsistencies in upstream multicast routing, if you have
214 * multiple VIMAGEs running on the same link joining different multicast
215 * groups, UNLESS the "primary IP address" is different. This is because
216 * IGMP for IPv4 does not force link-local addresses to be used for each
217 * node, unlike MLD for IPv6.
218 * Obviously the IGMPv3 per-interface state has per-vimage granularity
221 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
222 * policy to control the address used by IGMP on the link.
224 VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
226 VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
228 VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
229 * report; IGMPv3 g/sg
232 #define V_interface_timers_running VNET(interface_timers_running)
233 #define V_state_change_timers_running VNET(state_change_timers_running)
234 #define V_current_state_timers_running VNET(current_state_timers_running)
236 VNET_PCPUSTAT_DEFINE(struct igmpstat, igmpstat);
237 VNET_PCPUSTAT_SYSINIT(igmpstat);
238 VNET_PCPUSTAT_SYSUNINIT(igmpstat);
240 VNET_DEFINE_STATIC(LIST_HEAD(, igmp_ifsoftc), igi_head) =
241 LIST_HEAD_INITIALIZER(igi_head);
242 VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
244 #define V_igi_head VNET(igi_head)
245 #define V_igmp_gsrdelay VNET(igmp_gsrdelay)
247 VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
248 VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
249 VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
250 VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
251 VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
252 VNET_DEFINE_STATIC(int, igmp_legacysupp);
253 VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
255 #define V_igmp_recvifkludge VNET(igmp_recvifkludge)
256 #define V_igmp_sendra VNET(igmp_sendra)
257 #define V_igmp_sendlocal VNET(igmp_sendlocal)
258 #define V_igmp_v1enable VNET(igmp_v1enable)
259 #define V_igmp_v2enable VNET(igmp_v2enable)
260 #define V_igmp_legacysupp VNET(igmp_legacysupp)
261 #define V_igmp_default_version VNET(igmp_default_version)
264 * Virtualized sysctls.
266 SYSCTL_PROC(_net_inet_igmp, IGMPCTL_STATS, stats,
267 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_MPSAFE,
268 &VNET_NAME(igmpstat), 0, sysctl_igmp_stat, "S,igmpstat",
269 "IGMP statistics (struct igmpstat, netinet/igmp_var.h)");
270 SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
271 &VNET_NAME(igmp_recvifkludge), 0,
272 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
273 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
274 &VNET_NAME(igmp_sendra), 0,
275 "Send IP Router Alert option in IGMPv2/v3 messages");
276 SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
277 &VNET_NAME(igmp_sendlocal), 0,
278 "Send IGMP membership reports for 224.0.0.0/24 groups");
279 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
280 &VNET_NAME(igmp_v1enable), 0,
281 "Enable backwards compatibility with IGMPv1");
282 SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
283 &VNET_NAME(igmp_v2enable), 0,
284 "Enable backwards compatibility with IGMPv2");
285 SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
286 &VNET_NAME(igmp_legacysupp), 0,
287 "Allow v1/v2 reports to suppress v3 group responses");
288 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
289 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
290 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
291 "Default version of IGMP to run on each interface");
292 SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
293 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
294 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
295 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
298 * Non-virtualized sysctls.
300 static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
301 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
302 "Per-interface IGMPv3 state");
305 igmp_save_context(struct mbuf *m, struct ifnet *ifp)
309 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
311 m->m_pkthdr.rcvif = ifp;
312 m->m_pkthdr.flowid = ifp->if_index;
316 igmp_scrub_context(struct mbuf *m)
319 m->m_pkthdr.PH_loc.ptr = NULL;
320 m->m_pkthdr.flowid = 0;
324 * Restore context from a queued IGMP output chain.
325 * Return saved ifindex.
327 * VIMAGE: The assertion is there to make sure that we
328 * actually called CURVNET_SET() with what's in the mbuf chain.
330 static __inline uint32_t
331 igmp_restore_context(struct mbuf *m)
335 #if defined(VIMAGE) && defined(INVARIANTS)
336 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
337 ("%s: called when curvnet was not restored", __func__));
340 return (m->m_pkthdr.flowid);
347 sysctl_igmp_stat(SYSCTL_HANDLER_ARGS)
349 struct igmpstat igps0;
353 error = sysctl_wire_old_buffer(req, sizeof(struct igmpstat));
357 if (req->oldptr != NULL) {
358 if (req->oldlen < sizeof(struct igmpstat))
362 * Copy the counters, and explicitly set the struct's
363 * version and length fields.
365 COUNTER_ARRAY_COPY(VNET(igmpstat), &igps0,
366 sizeof(struct igmpstat) / sizeof(uint64_t));
367 igps0.igps_version = IGPS_VERSION_3;
368 igps0.igps_len = IGPS_VERSION3_LEN;
369 error = SYSCTL_OUT(req, &igps0,
370 sizeof(struct igmpstat));
373 req->validlen = sizeof(struct igmpstat);
376 if (req->newptr != NULL) {
377 if (req->newlen < sizeof(struct igmpstat))
380 error = SYSCTL_IN(req, &igps0,
385 * igps0 must be "all zero".
388 while (p < (char *)&igps0 + sizeof(igps0) && *p == '\0')
390 if (p != (char *)&igps0 + sizeof(igps0)) {
394 COUNTER_ARRAY_ZERO(VNET(igmpstat),
395 sizeof(struct igmpstat) / sizeof(uint64_t));
402 * Retrieve or set default IGMP version.
404 * VIMAGE: Assume curvnet set by caller.
405 * SMPng: NOTE: Serialized by IGMP lock.
408 sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
413 error = sysctl_wire_old_buffer(req, sizeof(int));
419 new = V_igmp_default_version;
421 error = sysctl_handle_int(oidp, &new, 0, req);
422 if (error || !req->newptr)
425 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
430 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
431 V_igmp_default_version, new);
433 V_igmp_default_version = new;
441 * Retrieve or set threshold between group-source queries in seconds.
443 * VIMAGE: Assume curvnet set by caller.
444 * SMPng: NOTE: Serialized by IGMP lock.
447 sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
452 error = sysctl_wire_old_buffer(req, sizeof(int));
458 i = V_igmp_gsrdelay.tv_sec;
460 error = sysctl_handle_int(oidp, &i, 0, req);
461 if (error || !req->newptr)
464 if (i < -1 || i >= 60) {
469 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
470 V_igmp_gsrdelay.tv_sec, i);
471 V_igmp_gsrdelay.tv_sec = i;
479 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
480 * For use by ifmcstat(8).
482 * SMPng: NOTE: Does an unlocked ifindex space read.
483 * VIMAGE: Assume curvnet set by caller. The node handler itself
484 * is not directly virtualized.
487 sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
489 struct epoch_tracker et;
494 struct igmp_ifsoftc *igi;
499 if (req->newptr != NULL)
505 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
509 IN_MULTI_LIST_LOCK();
515 ifp = ifnet_byindex(name[0]);
520 LIST_FOREACH(igi, &V_igi_head, igi_link) {
521 if (ifp == igi->igi_ifp) {
522 struct igmp_ifinfo info;
524 info.igi_version = igi->igi_version;
525 info.igi_v1_timer = igi->igi_v1_timer;
526 info.igi_v2_timer = igi->igi_v2_timer;
527 info.igi_v3_timer = igi->igi_v3_timer;
528 info.igi_flags = igi->igi_flags;
529 info.igi_rv = igi->igi_rv;
530 info.igi_qi = igi->igi_qi;
531 info.igi_qri = igi->igi_qri;
532 info.igi_uri = igi->igi_uri;
533 error = SYSCTL_OUT(req, &info, sizeof(info));
540 IN_MULTI_LIST_UNLOCK();
545 * Dispatch an entire queue of pending packet chains
547 * VIMAGE: Assumes the vnet pointer has been set.
550 igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
552 struct epoch_tracker et;
556 while ((m = mbufq_dequeue(mq)) != NULL) {
557 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
559 m->m_flags |= M_IGMP_LOOP;
560 netisr_dispatch(NETISR_IGMP, m);
568 * Filter outgoing IGMP report state by group.
570 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
571 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
572 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
573 * this may break certain IGMP snooping switches which rely on the old
576 * Return zero if the given group is one for which IGMP reports
577 * should be suppressed, or non-zero if reports should be issued.
580 igmp_isgroupreported(const struct in_addr addr)
583 if (in_allhosts(addr) ||
584 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
591 * Construct a Router Alert option to use in outgoing packets.
599 m = m_get(M_WAITOK, MT_DATA);
600 p = mtod(m, struct ipoption *);
601 p->ipopt_dst.s_addr = INADDR_ANY;
602 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
603 p->ipopt_list[1] = 0x04; /* 4 bytes long */
604 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
605 p->ipopt_list[3] = 0x00; /* pad byte */
606 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
612 * Attach IGMP when PF_INET is attached to an interface.
614 struct igmp_ifsoftc *
615 igmp_domifattach(struct ifnet *ifp)
617 struct igmp_ifsoftc *igi;
619 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
620 __func__, ifp, ifp->if_xname);
624 igi = igi_alloc_locked(ifp);
625 if (!(ifp->if_flags & IFF_MULTICAST))
626 igi->igi_flags |= IGIF_SILENT;
634 * VIMAGE: assume curvnet set by caller.
636 static struct igmp_ifsoftc *
637 igi_alloc_locked(/*const*/ struct ifnet *ifp)
639 struct igmp_ifsoftc *igi;
643 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
648 igi->igi_version = V_igmp_default_version;
650 igi->igi_rv = IGMP_RV_INIT;
651 igi->igi_qi = IGMP_QI_INIT;
652 igi->igi_qri = IGMP_QRI_INIT;
653 igi->igi_uri = IGMP_URI_INIT;
654 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
656 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
658 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
668 * NOTE: Some finalization tasks need to run before the protocol domain
669 * is detached, but also before the link layer does its cleanup.
671 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
672 * XXX This is also bitten by unlocked ifma_protospec access.
675 igmp_ifdetach(struct ifnet *ifp)
677 struct epoch_tracker et;
678 struct igmp_ifsoftc *igi;
679 struct ifmultiaddr *ifma;
680 struct in_multi *inm;
681 struct in_multi_head inm_free_tmp;
682 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
685 SLIST_INIT(&inm_free_tmp);
688 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
689 if (igi->igi_version == IGMP_VERSION_3) {
692 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
693 inm = inm_ifmultiaddr_get_inm(ifma);
696 if (inm->inm_state == IGMP_LEAVING_MEMBER)
697 inm_rele_locked(&inm_free_tmp, inm);
698 inm_clear_recorded(inm);
701 IF_ADDR_WUNLOCK(ifp);
702 inm_release_list_deferred(&inm_free_tmp);
709 * Hook for domifdetach.
712 igmp_domifdetach(struct ifnet *ifp)
715 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
716 __func__, ifp, ifp->if_xname);
719 igi_delete_locked(ifp);
724 igi_delete_locked(const struct ifnet *ifp)
726 struct igmp_ifsoftc *igi, *tigi;
728 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
729 __func__, ifp, ifp->if_xname);
733 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
734 if (igi->igi_ifp == ifp) {
736 * Free deferred General Query responses.
738 mbufq_drain(&igi->igi_gq);
740 LIST_REMOVE(igi, igi_link);
748 * Process a received IGMPv1 query.
749 * Return non-zero if the message should be dropped.
751 * VIMAGE: The curvnet pointer is derived from the input ifp.
754 igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
755 const struct igmp *igmp)
757 struct ifmultiaddr *ifma;
758 struct igmp_ifsoftc *igi;
759 struct in_multi *inm;
764 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
765 * 224.0.0.1. They are always treated as General Queries.
766 * igmp_group is always ignored. Do not drop it as a userland
767 * daemon may wish to see it.
768 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
770 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
771 IGMPSTAT_INC(igps_rcv_badqueries);
774 IGMPSTAT_INC(igps_rcv_gen_queries);
776 IN_MULTI_LIST_LOCK();
779 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
780 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
782 if (igi->igi_flags & IGIF_LOOPBACK) {
783 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
789 * Switch to IGMPv1 host compatibility mode.
791 igmp_set_version(igi, IGMP_VERSION_1);
793 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
796 * Start the timers in all of our group records
797 * for the interface on which the query arrived,
798 * except those which are already running.
800 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
801 inm = inm_ifmultiaddr_get_inm(ifma);
804 if (inm->inm_timer != 0)
806 switch (inm->inm_state) {
807 case IGMP_NOT_MEMBER:
808 case IGMP_SILENT_MEMBER:
810 case IGMP_G_QUERY_PENDING_MEMBER:
811 case IGMP_SG_QUERY_PENDING_MEMBER:
812 case IGMP_REPORTING_MEMBER:
813 case IGMP_IDLE_MEMBER:
814 case IGMP_LAZY_MEMBER:
815 case IGMP_SLEEPING_MEMBER:
816 case IGMP_AWAKENING_MEMBER:
817 inm->inm_state = IGMP_REPORTING_MEMBER;
818 inm->inm_timer = IGMP_RANDOM_DELAY(
819 IGMP_V1V2_MAX_RI * IGMP_FASTHZ);
820 V_current_state_timers_running = 1;
822 case IGMP_LEAVING_MEMBER:
829 IN_MULTI_LIST_UNLOCK();
835 * Process a received IGMPv2 general or group-specific query.
838 igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
839 const struct igmp *igmp)
841 struct ifmultiaddr *ifma;
842 struct igmp_ifsoftc *igi;
843 struct in_multi *inm;
844 int is_general_query;
849 is_general_query = 0;
852 * Validate address fields upfront.
853 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
855 if (in_nullhost(igmp->igmp_group)) {
857 * IGMPv2 General Query.
858 * If this was not sent to the all-hosts group, ignore it.
860 if (!in_allhosts(ip->ip_dst))
862 IGMPSTAT_INC(igps_rcv_gen_queries);
863 is_general_query = 1;
865 /* IGMPv2 Group-Specific Query. */
866 IGMPSTAT_INC(igps_rcv_group_queries);
869 IN_MULTI_LIST_LOCK();
872 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
873 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
875 if (igi->igi_flags & IGIF_LOOPBACK) {
876 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
882 * Ignore v2 query if in v1 Compatibility Mode.
884 if (igi->igi_version == IGMP_VERSION_1)
887 igmp_set_version(igi, IGMP_VERSION_2);
889 timer = igmp->igmp_code * IGMP_FASTHZ / IGMP_TIMER_SCALE;
893 if (is_general_query) {
895 * For each reporting group joined on this
896 * interface, kick the report timer.
898 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
900 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
901 inm = inm_ifmultiaddr_get_inm(ifma);
904 igmp_v2_update_group(inm, timer);
908 * Group-specific IGMPv2 query, we need only
909 * look up the single group to process it.
911 inm = inm_lookup(ifp, igmp->igmp_group);
914 "process v2 query 0x%08x on ifp %p(%s)",
915 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
916 igmp_v2_update_group(inm, timer);
922 IN_MULTI_LIST_UNLOCK();
928 * Update the report timer on a group in response to an IGMPv2 query.
930 * If we are becoming the reporting member for this group, start the timer.
931 * If we already are the reporting member for this group, and timer is
932 * below the threshold, reset it.
934 * We may be updating the group for the first time since we switched
935 * to IGMPv3. If we are, then we must clear any recorded source lists,
936 * and transition to REPORTING state; the group timer is overloaded
937 * for group and group-source query responses.
939 * Unlike IGMPv3, the delay per group should be jittered
940 * to avoid bursts of IGMPv2 reports.
943 igmp_v2_update_group(struct in_multi *inm, const int timer)
946 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
947 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
949 IN_MULTI_LIST_LOCK_ASSERT();
951 switch (inm->inm_state) {
952 case IGMP_NOT_MEMBER:
953 case IGMP_SILENT_MEMBER:
955 case IGMP_REPORTING_MEMBER:
956 if (inm->inm_timer != 0 &&
957 inm->inm_timer <= timer) {
958 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
959 "skipping.", __func__);
963 case IGMP_SG_QUERY_PENDING_MEMBER:
964 case IGMP_G_QUERY_PENDING_MEMBER:
965 case IGMP_IDLE_MEMBER:
966 case IGMP_LAZY_MEMBER:
967 case IGMP_AWAKENING_MEMBER:
968 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
969 inm->inm_state = IGMP_REPORTING_MEMBER;
970 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
971 V_current_state_timers_running = 1;
973 case IGMP_SLEEPING_MEMBER:
974 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
975 inm->inm_state = IGMP_AWAKENING_MEMBER;
977 case IGMP_LEAVING_MEMBER:
983 * Process a received IGMPv3 general, group-specific or
984 * group-and-source-specific query.
985 * Assumes m has already been pulled up to the full IGMP message length.
986 * Return 0 if successful, otherwise an appropriate error code is returned.
989 igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
990 /*const*/ struct igmpv3 *igmpv3)
992 struct igmp_ifsoftc *igi;
993 struct in_multi *inm;
994 int is_general_query;
995 uint32_t maxresp, nsrc, qqi;
999 is_general_query = 0;
1001 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
1003 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
1004 if (maxresp >= 128) {
1005 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
1006 (IGMP_EXP(igmpv3->igmp_code) + 3);
1010 * Robustness must never be less than 2 for on-wire IGMPv3.
1011 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1012 * an exception for interfaces whose IGMPv3 state changes
1013 * are redirected to loopback (e.g. MANET).
1015 qrv = IGMP_QRV(igmpv3->igmp_misc);
1017 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
1022 qqi = igmpv3->igmp_qqi;
1024 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
1025 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
1028 timer = maxresp * IGMP_FASTHZ / IGMP_TIMER_SCALE;
1032 nsrc = ntohs(igmpv3->igmp_numsrc);
1035 * Validate address fields and versions upfront before
1036 * accepting v3 query.
1037 * XXX SMPng: Unlocked access to igmpstat counters here.
1039 if (in_nullhost(igmpv3->igmp_group)) {
1041 * IGMPv3 General Query.
1043 * General Queries SHOULD be directed to 224.0.0.1.
1044 * A general query with a source list has undefined
1045 * behaviour; discard it.
1047 IGMPSTAT_INC(igps_rcv_gen_queries);
1048 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1049 IGMPSTAT_INC(igps_rcv_badqueries);
1052 is_general_query = 1;
1054 /* Group or group-source specific query. */
1056 IGMPSTAT_INC(igps_rcv_group_queries);
1058 IGMPSTAT_INC(igps_rcv_gsr_queries);
1061 IN_MULTI_LIST_LOCK();
1064 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1065 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1067 if (igi->igi_flags & IGIF_LOOPBACK) {
1068 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1069 ifp, ifp->if_xname);
1074 * Discard the v3 query if we're in Compatibility Mode.
1075 * The RFC is not obviously worded that hosts need to stay in
1076 * compatibility mode until the Old Version Querier Present
1079 if (igi->igi_version != IGMP_VERSION_3) {
1080 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1081 igi->igi_version, ifp, ifp->if_xname);
1085 igmp_set_version(igi, IGMP_VERSION_3);
1088 igi->igi_qri = maxresp;
1090 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1093 if (is_general_query) {
1095 * Schedule a current-state report on this ifp for
1096 * all groups, possibly containing source lists.
1097 * If there is a pending General Query response
1098 * scheduled earlier than the selected delay, do
1099 * not schedule any other reports.
1100 * Otherwise, reset the interface timer.
1102 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1103 ifp, ifp->if_xname);
1104 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1105 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1106 V_interface_timers_running = 1;
1110 * Group-source-specific queries are throttled on
1111 * a per-group basis to defeat denial-of-service attempts.
1112 * Queries for groups we are not a member of on this
1113 * link are simply ignored.
1115 inm = inm_lookup(ifp, igmpv3->igmp_group);
1119 if (!ratecheck(&inm->inm_lastgsrtv,
1120 &V_igmp_gsrdelay)) {
1121 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1123 IGMPSTAT_INC(igps_drop_gsr_queries);
1127 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1128 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1130 * If there is a pending General Query response
1131 * scheduled sooner than the selected delay, no
1132 * further report need be scheduled.
1133 * Otherwise, prepare to respond to the
1134 * group-specific or group-and-source query.
1136 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1137 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1142 IN_MULTI_LIST_UNLOCK();
1148 * Process a received IGMPv3 group-specific or group-and-source-specific
1150 * Return <0 if any error occurred. Currently this is ignored.
1153 igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1154 int timer, /*const*/ struct igmpv3 *igmpv3)
1159 IN_MULTI_LIST_LOCK_ASSERT();
1164 switch (inm->inm_state) {
1165 case IGMP_NOT_MEMBER:
1166 case IGMP_SILENT_MEMBER:
1167 case IGMP_SLEEPING_MEMBER:
1168 case IGMP_LAZY_MEMBER:
1169 case IGMP_AWAKENING_MEMBER:
1170 case IGMP_IDLE_MEMBER:
1171 case IGMP_LEAVING_MEMBER:
1174 case IGMP_REPORTING_MEMBER:
1175 case IGMP_G_QUERY_PENDING_MEMBER:
1176 case IGMP_SG_QUERY_PENDING_MEMBER:
1180 nsrc = ntohs(igmpv3->igmp_numsrc);
1183 * Deal with group-specific queries upfront.
1184 * If any group query is already pending, purge any recorded
1185 * source-list state if it exists, and schedule a query response
1186 * for this group-specific query.
1189 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1190 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1191 inm_clear_recorded(inm);
1192 timer = min(inm->inm_timer, timer);
1194 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1195 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1196 V_current_state_timers_running = 1;
1201 * Deal with the case where a group-and-source-specific query has
1202 * been received but a group-specific query is already pending.
1204 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1205 timer = min(inm->inm_timer, timer);
1206 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1207 V_current_state_timers_running = 1;
1212 * Finally, deal with the case where a group-and-source-specific
1213 * query has been received, where a response to a previous g-s-r
1214 * query exists, or none exists.
1215 * In this case, we need to parse the source-list which the Querier
1216 * has provided us with and check if we have any source list filter
1217 * entries at T1 for these sources. If we do not, there is no need
1218 * schedule a report and the query may be dropped.
1219 * If we do, we must record them and schedule a current-state
1220 * report for those sources.
1221 * FIXME: Handling source lists larger than 1 mbuf requires that
1222 * we pass the mbuf chain pointer down to this function, and use
1223 * m_getptr() to walk the chain.
1225 if (inm->inm_nsrc > 0) {
1226 const struct in_addr *ap;
1229 ap = (const struct in_addr *)(igmpv3 + 1);
1231 for (i = 0; i < nsrc; i++, ap++) {
1232 retval = inm_record_source(inm, ap->s_addr);
1235 nrecorded += retval;
1237 if (nrecorded > 0) {
1239 "%s: schedule response to SG query", __func__);
1240 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1241 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1242 V_current_state_timers_running = 1;
1250 * Process a received IGMPv1 host membership report.
1252 * NOTE: 0.0.0.0 workaround breaks const correctness.
1255 igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1256 /*const*/ struct igmp *igmp)
1258 struct in_ifaddr *ia;
1259 struct in_multi *inm;
1261 IGMPSTAT_INC(igps_rcv_reports);
1263 if (ifp->if_flags & IFF_LOOPBACK)
1266 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1267 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1268 IGMPSTAT_INC(igps_rcv_badreports);
1273 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1274 * Booting clients may use the source address 0.0.0.0. Some
1275 * IGMP daemons may not know how to use IP_RECVIF to determine
1276 * the interface upon which this message was received.
1277 * Replace 0.0.0.0 with the subnet address if told to do so.
1279 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1282 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1285 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1286 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1289 * IGMPv1 report suppression.
1290 * If we are a member of this group, and our membership should be
1291 * reported, stop our group timer and transition to the 'lazy' state.
1293 IN_MULTI_LIST_LOCK();
1294 inm = inm_lookup(ifp, igmp->igmp_group);
1296 struct igmp_ifsoftc *igi;
1300 KASSERT(igi != NULL,
1301 ("%s: no igi for ifp %p", __func__, ifp));
1305 IGMPSTAT_INC(igps_rcv_ourreports);
1308 * If we are in IGMPv3 host mode, do not allow the
1309 * other host's IGMPv1 report to suppress our reports
1310 * unless explicitly configured to do so.
1312 if (igi->igi_version == IGMP_VERSION_3) {
1313 if (V_igmp_legacysupp)
1314 igmp_v3_suppress_group_record(inm);
1320 switch (inm->inm_state) {
1321 case IGMP_NOT_MEMBER:
1322 case IGMP_SILENT_MEMBER:
1324 case IGMP_IDLE_MEMBER:
1325 case IGMP_LAZY_MEMBER:
1326 case IGMP_AWAKENING_MEMBER:
1328 "report suppressed for 0x%08x on ifp %p(%s)",
1329 ntohl(igmp->igmp_group.s_addr), ifp,
1331 case IGMP_SLEEPING_MEMBER:
1332 inm->inm_state = IGMP_SLEEPING_MEMBER;
1334 case IGMP_REPORTING_MEMBER:
1336 "report suppressed for 0x%08x on ifp %p(%s)",
1337 ntohl(igmp->igmp_group.s_addr), ifp,
1339 if (igi->igi_version == IGMP_VERSION_1)
1340 inm->inm_state = IGMP_LAZY_MEMBER;
1341 else if (igi->igi_version == IGMP_VERSION_2)
1342 inm->inm_state = IGMP_SLEEPING_MEMBER;
1344 case IGMP_G_QUERY_PENDING_MEMBER:
1345 case IGMP_SG_QUERY_PENDING_MEMBER:
1346 case IGMP_LEAVING_MEMBER:
1352 IN_MULTI_LIST_UNLOCK();
1358 * Process a received IGMPv2 host membership report.
1360 * NOTE: 0.0.0.0 workaround breaks const correctness.
1363 igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1364 /*const*/ struct igmp *igmp)
1366 struct in_ifaddr *ia;
1367 struct in_multi *inm;
1370 * Make sure we don't hear our own membership report. Fast
1371 * leave requires knowing that we are the only member of a
1375 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1379 IGMPSTAT_INC(igps_rcv_reports);
1381 if (ifp->if_flags & IFF_LOOPBACK) {
1385 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1386 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1387 IGMPSTAT_INC(igps_rcv_badreports);
1392 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1393 * Booting clients may use the source address 0.0.0.0. Some
1394 * IGMP daemons may not know how to use IP_RECVIF to determine
1395 * the interface upon which this message was received.
1396 * Replace 0.0.0.0 with the subnet address if told to do so.
1398 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1400 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1403 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1404 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1407 * IGMPv2 report suppression.
1408 * If we are a member of this group, and our membership should be
1409 * reported, and our group timer is pending or about to be reset,
1410 * stop our group timer by transitioning to the 'lazy' state.
1412 IN_MULTI_LIST_LOCK();
1413 inm = inm_lookup(ifp, igmp->igmp_group);
1415 struct igmp_ifsoftc *igi;
1418 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1420 IGMPSTAT_INC(igps_rcv_ourreports);
1423 * If we are in IGMPv3 host mode, do not allow the
1424 * other host's IGMPv1 report to suppress our reports
1425 * unless explicitly configured to do so.
1427 if (igi->igi_version == IGMP_VERSION_3) {
1428 if (V_igmp_legacysupp)
1429 igmp_v3_suppress_group_record(inm);
1435 switch (inm->inm_state) {
1436 case IGMP_NOT_MEMBER:
1437 case IGMP_SILENT_MEMBER:
1438 case IGMP_SLEEPING_MEMBER:
1440 case IGMP_REPORTING_MEMBER:
1441 case IGMP_IDLE_MEMBER:
1442 case IGMP_AWAKENING_MEMBER:
1444 "report suppressed for 0x%08x on ifp %p(%s)",
1445 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1446 case IGMP_LAZY_MEMBER:
1447 inm->inm_state = IGMP_LAZY_MEMBER;
1449 case IGMP_G_QUERY_PENDING_MEMBER:
1450 case IGMP_SG_QUERY_PENDING_MEMBER:
1451 case IGMP_LEAVING_MEMBER:
1457 IN_MULTI_LIST_UNLOCK();
1463 igmp_input(struct mbuf **mp, int *offp, int proto)
1474 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1477 ifp = m->m_pkthdr.rcvif;
1480 IGMPSTAT_INC(igps_rcv_total);
1482 ip = mtod(m, struct ip *);
1484 igmplen = ntohs(ip->ip_len) - iphlen;
1489 if (igmplen < IGMP_MINLEN) {
1490 IGMPSTAT_INC(igps_rcv_tooshort);
1492 return (IPPROTO_DONE);
1496 * Always pullup to the minimum size for v1/v2 or v3
1497 * to amortize calls to m_pullup().
1500 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1501 minlen += IGMP_V3_QUERY_MINLEN;
1503 minlen += IGMP_MINLEN;
1504 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1505 (m = m_pullup(m, minlen)) == NULL) {
1506 IGMPSTAT_INC(igps_rcv_tooshort);
1507 return (IPPROTO_DONE);
1509 ip = mtod(m, struct ip *);
1512 * Validate checksum.
1514 m->m_data += iphlen;
1516 igmp = mtod(m, struct igmp *);
1517 if (in_cksum(m, igmplen)) {
1518 IGMPSTAT_INC(igps_rcv_badsum);
1520 return (IPPROTO_DONE);
1522 m->m_data -= iphlen;
1526 * IGMP control traffic is link-scope, and must have a TTL of 1.
1527 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1528 * probe packets may come from beyond the LAN.
1530 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1531 IGMPSTAT_INC(igps_rcv_badttl);
1533 return (IPPROTO_DONE);
1536 switch (igmp->igmp_type) {
1537 case IGMP_HOST_MEMBERSHIP_QUERY:
1538 if (igmplen == IGMP_MINLEN) {
1539 if (igmp->igmp_code == 0)
1540 queryver = IGMP_VERSION_1;
1542 queryver = IGMP_VERSION_2;
1543 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1544 queryver = IGMP_VERSION_3;
1546 IGMPSTAT_INC(igps_rcv_tooshort);
1548 return (IPPROTO_DONE);
1552 case IGMP_VERSION_1:
1553 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1554 if (!V_igmp_v1enable)
1556 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1558 return (IPPROTO_DONE);
1562 case IGMP_VERSION_2:
1563 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1564 if (!V_igmp_v2enable)
1566 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1568 return (IPPROTO_DONE);
1572 case IGMP_VERSION_3: {
1573 struct igmpv3 *igmpv3;
1577 IGMPSTAT_INC(igps_rcv_v3_queries);
1578 igmpv3 = (struct igmpv3 *)igmp;
1580 * Validate length based on source count.
1582 nsrc = ntohs(igmpv3->igmp_numsrc);
1583 if (nsrc * sizeof(in_addr_t) >
1584 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1585 IGMPSTAT_INC(igps_rcv_tooshort);
1587 return (IPPROTO_DONE);
1590 * m_pullup() may modify m, so pullup in
1593 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1594 sizeof(struct in_addr) * nsrc;
1595 if ((!M_WRITABLE(m) ||
1596 m->m_len < igmpv3len) &&
1597 (m = m_pullup(m, igmpv3len)) == NULL) {
1598 IGMPSTAT_INC(igps_rcv_tooshort);
1599 return (IPPROTO_DONE);
1601 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1603 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1605 return (IPPROTO_DONE);
1612 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1613 if (!V_igmp_v1enable)
1615 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1617 return (IPPROTO_DONE);
1621 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1622 if (!V_igmp_v2enable)
1624 if (!ip_checkrouteralert(m))
1625 IGMPSTAT_INC(igps_rcv_nora);
1626 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1628 return (IPPROTO_DONE);
1632 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1634 * Hosts do not need to process IGMPv3 membership reports,
1635 * as report suppression is no longer required.
1637 if (!ip_checkrouteralert(m))
1638 IGMPSTAT_INC(igps_rcv_nora);
1646 * Pass all valid IGMP packets up to any process(es) listening on a
1650 return (rip_input(mp, offp, proto));
1654 * Fast timeout handler (global).
1655 * VIMAGE: Timeout handlers are expected to service all vimages.
1657 static struct callout igmpfast_callout;
1659 igmp_fasttimo(void *arg __unused)
1661 struct epoch_tracker et;
1662 VNET_ITERATOR_DECL(vnet_iter);
1664 NET_EPOCH_ENTER(et);
1665 VNET_LIST_RLOCK_NOSLEEP();
1666 VNET_FOREACH(vnet_iter) {
1667 CURVNET_SET(vnet_iter);
1668 igmp_fasttimo_vnet();
1671 VNET_LIST_RUNLOCK_NOSLEEP();
1674 callout_reset(&igmpfast_callout, hz / IGMP_FASTHZ, igmp_fasttimo, NULL);
1678 * Fast timeout handler (per-vnet).
1679 * Sends are shuffled off to a netisr to deal with Giant.
1681 * VIMAGE: Assume caller has set up our curvnet.
1684 igmp_fasttimo_vnet(void)
1686 struct mbufq scq; /* State-change packets */
1687 struct mbufq qrq; /* Query response packets */
1689 struct igmp_ifsoftc *igi;
1690 struct ifmultiaddr *ifma;
1691 struct in_multi *inm;
1692 struct in_multi_head inm_free_tmp;
1693 int loop, uri_fasthz;
1699 * Quick check to see if any work needs to be done, in order to
1700 * minimize the overhead of fasttimo processing.
1701 * SMPng: XXX Unlocked reads.
1703 if (!V_current_state_timers_running &&
1704 !V_interface_timers_running &&
1705 !V_state_change_timers_running)
1708 SLIST_INIT(&inm_free_tmp);
1709 IN_MULTI_LIST_LOCK();
1713 * IGMPv3 General Query response timer processing.
1715 if (V_interface_timers_running) {
1716 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1718 V_interface_timers_running = 0;
1719 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1720 if (igi->igi_v3_timer == 0) {
1722 } else if (--igi->igi_v3_timer == 0) {
1723 igmp_v3_dispatch_general_query(igi);
1725 V_interface_timers_running = 1;
1730 if (!V_current_state_timers_running &&
1731 !V_state_change_timers_running)
1734 V_current_state_timers_running = 0;
1735 V_state_change_timers_running = 0;
1737 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1740 * IGMPv1/v2/v3 host report and state-change timer processing.
1741 * Note: Processing a v3 group timer may remove a node.
1743 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1746 if (igi->igi_version == IGMP_VERSION_3) {
1747 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1748 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1750 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1751 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1755 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1756 inm = inm_ifmultiaddr_get_inm(ifma);
1759 switch (igi->igi_version) {
1760 case IGMP_VERSION_1:
1761 case IGMP_VERSION_2:
1762 igmp_v1v2_process_group_timer(inm,
1765 case IGMP_VERSION_3:
1766 igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1767 &scq, inm, uri_fasthz);
1771 IF_ADDR_WUNLOCK(ifp);
1773 if (igi->igi_version == IGMP_VERSION_3) {
1774 igmp_dispatch_queue(&qrq, 0, loop);
1775 igmp_dispatch_queue(&scq, 0, loop);
1778 * Free the in_multi reference(s) for this
1781 inm_release_list_deferred(&inm_free_tmp);
1787 IN_MULTI_LIST_UNLOCK();
1791 * Update host report group timer for IGMPv1/v2.
1792 * Will update the global pending timer flags.
1795 igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1797 int report_timer_expired;
1799 IN_MULTI_LIST_LOCK_ASSERT();
1802 if (inm->inm_timer == 0) {
1803 report_timer_expired = 0;
1804 } else if (--inm->inm_timer == 0) {
1805 report_timer_expired = 1;
1807 V_current_state_timers_running = 1;
1811 switch (inm->inm_state) {
1812 case IGMP_NOT_MEMBER:
1813 case IGMP_SILENT_MEMBER:
1814 case IGMP_IDLE_MEMBER:
1815 case IGMP_LAZY_MEMBER:
1816 case IGMP_SLEEPING_MEMBER:
1817 case IGMP_AWAKENING_MEMBER:
1819 case IGMP_REPORTING_MEMBER:
1820 if (report_timer_expired) {
1821 inm->inm_state = IGMP_IDLE_MEMBER;
1822 (void)igmp_v1v2_queue_report(inm,
1823 (version == IGMP_VERSION_2) ?
1824 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1825 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1828 case IGMP_G_QUERY_PENDING_MEMBER:
1829 case IGMP_SG_QUERY_PENDING_MEMBER:
1830 case IGMP_LEAVING_MEMBER:
1836 * Update a group's timers for IGMPv3.
1837 * Will update the global pending timer flags.
1838 * Note: Unlocked read from igi.
1841 igmp_v3_process_group_timers(struct in_multi_head *inmh,
1842 struct mbufq *qrq, struct mbufq *scq,
1843 struct in_multi *inm, const int uri_fasthz)
1845 int query_response_timer_expired;
1846 int state_change_retransmit_timer_expired;
1848 IN_MULTI_LIST_LOCK_ASSERT();
1851 query_response_timer_expired = 0;
1852 state_change_retransmit_timer_expired = 0;
1855 * During a transition from v1/v2 compatibility mode back to v3,
1856 * a group record in REPORTING state may still have its group
1857 * timer active. This is a no-op in this function; it is easier
1858 * to deal with it here than to complicate the slow-timeout path.
1860 if (inm->inm_timer == 0) {
1861 query_response_timer_expired = 0;
1862 } else if (--inm->inm_timer == 0) {
1863 query_response_timer_expired = 1;
1865 V_current_state_timers_running = 1;
1868 if (inm->inm_sctimer == 0) {
1869 state_change_retransmit_timer_expired = 0;
1870 } else if (--inm->inm_sctimer == 0) {
1871 state_change_retransmit_timer_expired = 1;
1873 V_state_change_timers_running = 1;
1876 /* We are in fasttimo, so be quick about it. */
1877 if (!state_change_retransmit_timer_expired &&
1878 !query_response_timer_expired)
1881 switch (inm->inm_state) {
1882 case IGMP_NOT_MEMBER:
1883 case IGMP_SILENT_MEMBER:
1884 case IGMP_SLEEPING_MEMBER:
1885 case IGMP_LAZY_MEMBER:
1886 case IGMP_AWAKENING_MEMBER:
1887 case IGMP_IDLE_MEMBER:
1889 case IGMP_G_QUERY_PENDING_MEMBER:
1890 case IGMP_SG_QUERY_PENDING_MEMBER:
1892 * Respond to a previously pending Group-Specific
1893 * or Group-and-Source-Specific query by enqueueing
1894 * the appropriate Current-State report for
1895 * immediate transmission.
1897 if (query_response_timer_expired) {
1898 int retval __unused;
1900 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1901 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1902 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1904 inm->inm_state = IGMP_REPORTING_MEMBER;
1905 /* XXX Clear recorded sources for next time. */
1906 inm_clear_recorded(inm);
1909 case IGMP_REPORTING_MEMBER:
1910 case IGMP_LEAVING_MEMBER:
1911 if (state_change_retransmit_timer_expired) {
1913 * State-change retransmission timer fired.
1914 * If there are any further pending retransmissions,
1915 * set the global pending state-change flag, and
1918 if (--inm->inm_scrv > 0) {
1919 inm->inm_sctimer = uri_fasthz;
1920 V_state_change_timers_running = 1;
1923 * Retransmit the previously computed state-change
1924 * report. If there are no further pending
1925 * retransmissions, the mbuf queue will be consumed.
1926 * Update T0 state to T1 as we have now sent
1929 (void)igmp_v3_merge_state_changes(inm, scq);
1932 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1933 ntohl(inm->inm_addr.s_addr),
1934 inm->inm_ifp->if_xname);
1937 * If we are leaving the group for good, make sure
1938 * we release IGMP's reference to it.
1939 * This release must be deferred using a SLIST,
1940 * as we are called from a loop which traverses
1941 * the in_ifmultiaddr TAILQ.
1943 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1944 inm->inm_scrv == 0) {
1945 inm->inm_state = IGMP_NOT_MEMBER;
1946 inm_rele_locked(inmh, inm);
1954 * Suppress a group's pending response to a group or source/group query.
1956 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1957 * Do NOT update ST1/ST0 as this operation merely suppresses
1958 * the currently pending group record.
1959 * Do NOT suppress the response to a general query. It is possible but
1960 * it would require adding another state or flag.
1963 igmp_v3_suppress_group_record(struct in_multi *inm)
1966 IN_MULTI_LIST_LOCK_ASSERT();
1968 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1969 ("%s: not IGMPv3 mode on link", __func__));
1971 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1972 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1975 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1976 inm_clear_recorded(inm);
1979 inm->inm_state = IGMP_REPORTING_MEMBER;
1983 * Switch to a different IGMP version on the given interface,
1984 * as per Section 7.2.1.
1987 igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1989 int old_version_timer;
1993 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1994 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1996 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1998 * Compute the "Older Version Querier Present" timer as per
2001 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
2002 old_version_timer *= IGMP_SLOWHZ;
2004 if (version == IGMP_VERSION_1) {
2005 igi->igi_v1_timer = old_version_timer;
2006 igi->igi_v2_timer = 0;
2007 } else if (version == IGMP_VERSION_2) {
2008 igi->igi_v1_timer = 0;
2009 igi->igi_v2_timer = old_version_timer;
2013 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2014 if (igi->igi_version != IGMP_VERSION_2) {
2015 igi->igi_version = IGMP_VERSION_2;
2016 igmp_v3_cancel_link_timers(igi);
2018 } else if (igi->igi_v1_timer > 0) {
2019 if (igi->igi_version != IGMP_VERSION_1) {
2020 igi->igi_version = IGMP_VERSION_1;
2021 igmp_v3_cancel_link_timers(igi);
2027 * Cancel pending IGMPv3 timers for the given link and all groups
2028 * joined on it; state-change, general-query, and group-query timers.
2030 * Only ever called on a transition from v3 to Compatibility mode. Kill
2031 * the timers stone dead (this may be expensive for large N groups), they
2032 * will be restarted if Compatibility Mode deems that they must be due to
2036 igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
2038 struct ifmultiaddr *ifma;
2040 struct in_multi *inm;
2041 struct in_multi_head inm_free_tmp;
2043 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2044 igi->igi_ifp, igi->igi_ifp->if_xname);
2046 IN_MULTI_LIST_LOCK_ASSERT();
2050 SLIST_INIT(&inm_free_tmp);
2053 * Stop the v3 General Query Response on this link stone dead.
2054 * If fasttimo is woken up due to V_interface_timers_running,
2055 * the flag will be cleared if there are no pending link timers.
2057 igi->igi_v3_timer = 0;
2060 * Now clear the current-state and state-change report timers
2061 * for all memberships scoped to this link.
2065 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2066 inm = inm_ifmultiaddr_get_inm(ifma);
2069 switch (inm->inm_state) {
2070 case IGMP_NOT_MEMBER:
2071 case IGMP_SILENT_MEMBER:
2072 case IGMP_IDLE_MEMBER:
2073 case IGMP_LAZY_MEMBER:
2074 case IGMP_SLEEPING_MEMBER:
2075 case IGMP_AWAKENING_MEMBER:
2077 * These states are either not relevant in v3 mode,
2078 * or are unreported. Do nothing.
2081 case IGMP_LEAVING_MEMBER:
2083 * If we are leaving the group and switching to
2084 * compatibility mode, we need to release the final
2085 * reference held for issuing the INCLUDE {}, and
2086 * transition to REPORTING to ensure the host leave
2087 * message is sent upstream to the old querier --
2088 * transition to NOT would lose the leave and race.
2090 inm_rele_locked(&inm_free_tmp, inm);
2092 case IGMP_G_QUERY_PENDING_MEMBER:
2093 case IGMP_SG_QUERY_PENDING_MEMBER:
2094 inm_clear_recorded(inm);
2096 case IGMP_REPORTING_MEMBER:
2097 inm->inm_state = IGMP_REPORTING_MEMBER;
2101 * Always clear state-change and group report timers.
2102 * Free any pending IGMPv3 state-change records.
2104 inm->inm_sctimer = 0;
2106 mbufq_drain(&inm->inm_scq);
2108 IF_ADDR_WUNLOCK(ifp);
2110 inm_release_list_deferred(&inm_free_tmp);
2114 * Update the Older Version Querier Present timers for a link.
2115 * See Section 7.2.1 of RFC 3376.
2118 igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2123 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2125 * IGMPv1 and IGMPv2 Querier Present timers expired.
2129 if (igi->igi_version != IGMP_VERSION_3) {
2131 "%s: transition from v%d -> v%d on %p(%s)",
2132 __func__, igi->igi_version, IGMP_VERSION_3,
2133 igi->igi_ifp, igi->igi_ifp->if_xname);
2134 igi->igi_version = IGMP_VERSION_3;
2136 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2138 * IGMPv1 Querier Present timer expired,
2139 * IGMPv2 Querier Present timer running.
2140 * If IGMPv2 was disabled since last timeout,
2142 * If IGMPv2 is enabled, revert to IGMPv2.
2144 if (!V_igmp_v2enable) {
2146 "%s: transition from v%d -> v%d on %p(%s)",
2147 __func__, igi->igi_version, IGMP_VERSION_3,
2148 igi->igi_ifp, igi->igi_ifp->if_xname);
2149 igi->igi_v2_timer = 0;
2150 igi->igi_version = IGMP_VERSION_3;
2152 --igi->igi_v2_timer;
2153 if (igi->igi_version != IGMP_VERSION_2) {
2155 "%s: transition from v%d -> v%d on %p(%s)",
2156 __func__, igi->igi_version, IGMP_VERSION_2,
2157 igi->igi_ifp, igi->igi_ifp->if_xname);
2158 igi->igi_version = IGMP_VERSION_2;
2159 igmp_v3_cancel_link_timers(igi);
2162 } else if (igi->igi_v1_timer > 0) {
2164 * IGMPv1 Querier Present timer running.
2165 * Stop IGMPv2 timer if running.
2167 * If IGMPv1 was disabled since last timeout,
2169 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2171 if (!V_igmp_v1enable) {
2173 "%s: transition from v%d -> v%d on %p(%s)",
2174 __func__, igi->igi_version, IGMP_VERSION_3,
2175 igi->igi_ifp, igi->igi_ifp->if_xname);
2176 igi->igi_v1_timer = 0;
2177 igi->igi_version = IGMP_VERSION_3;
2179 --igi->igi_v1_timer;
2181 if (igi->igi_v2_timer > 0) {
2183 "%s: cancel v2 timer on %p(%s)",
2184 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2185 igi->igi_v2_timer = 0;
2191 * Global slowtimo handler.
2192 * VIMAGE: Timeout handlers are expected to service all vimages.
2194 static struct callout igmpslow_callout;
2196 igmp_slowtimo(void *arg __unused)
2198 struct epoch_tracker et;
2199 VNET_ITERATOR_DECL(vnet_iter);
2201 NET_EPOCH_ENTER(et);
2202 VNET_LIST_RLOCK_NOSLEEP();
2203 VNET_FOREACH(vnet_iter) {
2204 CURVNET_SET(vnet_iter);
2205 igmp_slowtimo_vnet();
2208 VNET_LIST_RUNLOCK_NOSLEEP();
2211 callout_reset(&igmpslow_callout, hz / IGMP_SLOWHZ, igmp_slowtimo, NULL);
2215 * Per-vnet slowtimo handler.
2218 igmp_slowtimo_vnet(void)
2220 struct igmp_ifsoftc *igi;
2224 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2225 igmp_v1v2_process_querier_timers(igi);
2232 * Dispatch an IGMPv1/v2 host report or leave message.
2233 * These are always small enough to fit inside a single mbuf.
2236 igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2238 struct epoch_tracker et;
2244 IN_MULTI_LIST_LOCK_ASSERT();
2249 m = m_gethdr(M_NOWAIT, MT_DATA);
2252 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2254 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2256 m->m_data += sizeof(struct ip);
2257 m->m_len = sizeof(struct igmp);
2259 igmp = mtod(m, struct igmp *);
2260 igmp->igmp_type = type;
2261 igmp->igmp_code = 0;
2262 igmp->igmp_group = inm->inm_addr;
2263 igmp->igmp_cksum = 0;
2264 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2266 m->m_data -= sizeof(struct ip);
2267 m->m_len += sizeof(struct ip);
2269 ip = mtod(m, struct ip *);
2271 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2273 ip->ip_p = IPPROTO_IGMP;
2274 ip->ip_src.s_addr = INADDR_ANY;
2276 if (type == IGMP_HOST_LEAVE_MESSAGE)
2277 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2279 ip->ip_dst = inm->inm_addr;
2281 igmp_save_context(m, ifp);
2283 m->m_flags |= M_IGMPV2;
2284 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2285 m->m_flags |= M_IGMP_LOOP;
2287 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2288 NET_EPOCH_ENTER(et);
2289 netisr_dispatch(NETISR_IGMP, m);
2296 * Process a state change from the upper layer for the given IPv4 group.
2298 * Each socket holds a reference on the in_multi in its own ip_moptions.
2299 * The socket layer will have made the necessary updates to.the group
2300 * state, it is now up to IGMP to issue a state change report if there
2301 * has been any change between T0 (when the last state-change was issued)
2304 * We use the IGMPv3 state machine at group level. The IGMP module
2305 * however makes the decision as to which IGMP protocol version to speak.
2306 * A state change *from* INCLUDE {} always means an initial join.
2307 * A state change *to* INCLUDE {} always means a final leave.
2309 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2310 * save ourselves a bunch of work; any exclusive mode groups need not
2311 * compute source filter lists.
2313 * VIMAGE: curvnet should have been set by caller, as this routine
2314 * is called from the socket option handlers.
2317 igmp_change_state(struct in_multi *inm)
2319 struct igmp_ifsoftc *igi;
2324 IN_MULTI_LOCK_ASSERT();
2326 * Try to detect if the upper layer just asked us to change state
2327 * for an interface which has now gone away.
2329 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2330 ifp = inm->inm_ifma->ifma_ifp;
2334 * Sanity check that netinet's notion of ifp is the
2337 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2341 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2342 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2345 * If we detect a state transition to or from MCAST_UNDEFINED
2346 * for this group, then we are starting or finishing an IGMP
2347 * life cycle for this group.
2349 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2350 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2351 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2352 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2353 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2354 error = igmp_initial_join(inm, igi);
2356 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2357 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2358 igmp_final_leave(inm, igi);
2362 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2365 error = igmp_handle_state_change(inm, igi);
2373 * Perform the initial join for an IGMP group.
2375 * When joining a group:
2376 * If the group should have its IGMP traffic suppressed, do nothing.
2377 * IGMPv1 starts sending IGMPv1 host membership reports.
2378 * IGMPv2 starts sending IGMPv2 host membership reports.
2379 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2380 * initial state of the membership.
2383 igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2387 int error, retval, syncstates;
2389 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2390 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2397 IN_MULTI_LOCK_ASSERT();
2400 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2403 * Groups joined on loopback or marked as 'not reported',
2404 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2405 * are never reported in any IGMP protocol exchanges.
2406 * All other groups enter the appropriate IGMP state machine
2407 * for the version in use on this link.
2408 * A link marked as IGIF_SILENT causes IGMP to be completely
2409 * disabled for the link.
2411 if ((ifp->if_flags & IFF_LOOPBACK) ||
2412 (igi->igi_flags & IGIF_SILENT) ||
2413 !igmp_isgroupreported(inm->inm_addr)) {
2415 "%s: not kicking state machine for silent group", __func__);
2416 inm->inm_state = IGMP_SILENT_MEMBER;
2420 * Deal with overlapping in_multi lifecycle.
2421 * If this group was LEAVING, then make sure
2422 * we drop the reference we picked up to keep the
2423 * group around for the final INCLUDE {} enqueue.
2425 if (igi->igi_version == IGMP_VERSION_3 &&
2426 inm->inm_state == IGMP_LEAVING_MEMBER) {
2427 MPASS(inm->inm_refcount > 1);
2428 inm_rele_locked(NULL, inm);
2430 inm->inm_state = IGMP_REPORTING_MEMBER;
2432 switch (igi->igi_version) {
2433 case IGMP_VERSION_1:
2434 case IGMP_VERSION_2:
2435 inm->inm_state = IGMP_IDLE_MEMBER;
2436 error = igmp_v1v2_queue_report(inm,
2437 (igi->igi_version == IGMP_VERSION_2) ?
2438 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2439 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2441 inm->inm_timer = IGMP_RANDOM_DELAY(
2442 IGMP_V1V2_MAX_RI * IGMP_FASTHZ);
2443 V_current_state_timers_running = 1;
2447 case IGMP_VERSION_3:
2449 * Defer update of T0 to T1, until the first copy
2450 * of the state change has been transmitted.
2455 * Immediately enqueue a State-Change Report for
2456 * this interface, freeing any previous reports.
2457 * Don't kick the timers if there is nothing to do,
2458 * or if an error occurred.
2462 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2464 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2467 error = retval * -1;
2472 * Schedule transmission of pending state-change
2473 * report up to RV times for this link. The timer
2474 * will fire at the next igmp_fasttimo (~200ms),
2475 * giving us an opportunity to merge the reports.
2477 if (igi->igi_flags & IGIF_LOOPBACK) {
2480 KASSERT(igi->igi_rv > 1,
2481 ("%s: invalid robustness %d", __func__,
2483 inm->inm_scrv = igi->igi_rv;
2485 inm->inm_sctimer = 1;
2486 V_state_change_timers_running = 1;
2494 * Only update the T0 state if state change is atomic,
2495 * i.e. we don't need to wait for a timer to fire before we
2496 * can consider the state change to have been communicated.
2500 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2501 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2508 * Issue an intermediate state change during the IGMP life-cycle.
2511 igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2516 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2517 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2521 IN_MULTI_LIST_LOCK_ASSERT();
2524 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2526 if ((ifp->if_flags & IFF_LOOPBACK) ||
2527 (igi->igi_flags & IGIF_SILENT) ||
2528 !igmp_isgroupreported(inm->inm_addr) ||
2529 (igi->igi_version != IGMP_VERSION_3)) {
2530 if (!igmp_isgroupreported(inm->inm_addr)) {
2532 "%s: not kicking state machine for silent group", __func__);
2534 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2536 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2537 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2541 mbufq_drain(&inm->inm_scq);
2543 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2544 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2549 * If record(s) were enqueued, start the state-change
2550 * report timer for this group.
2552 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2553 inm->inm_sctimer = 1;
2554 V_state_change_timers_running = 1;
2560 * Perform the final leave for an IGMP group.
2562 * When leaving a group:
2563 * IGMPv1 does nothing.
2564 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2565 * IGMPv3 enqueues a state-change report containing a transition
2566 * to INCLUDE {} for immediate transmission.
2569 igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2575 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2576 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2577 inm->inm_ifp->if_xname);
2579 IN_MULTI_LIST_LOCK_ASSERT();
2582 switch (inm->inm_state) {
2583 case IGMP_NOT_MEMBER:
2584 case IGMP_SILENT_MEMBER:
2585 case IGMP_LEAVING_MEMBER:
2586 /* Already leaving or left; do nothing. */
2588 "%s: not kicking state machine for silent group", __func__);
2590 case IGMP_REPORTING_MEMBER:
2591 case IGMP_IDLE_MEMBER:
2592 case IGMP_G_QUERY_PENDING_MEMBER:
2593 case IGMP_SG_QUERY_PENDING_MEMBER:
2594 if (igi->igi_version == IGMP_VERSION_2) {
2596 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2597 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2598 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2601 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2602 inm->inm_state = IGMP_NOT_MEMBER;
2603 } else if (igi->igi_version == IGMP_VERSION_3) {
2605 * Stop group timer and all pending reports.
2606 * Immediately enqueue a state-change report
2607 * TO_IN {} to be sent on the next fast timeout,
2608 * giving us an opportunity to merge reports.
2610 mbufq_drain(&inm->inm_scq);
2612 if (igi->igi_flags & IGIF_LOOPBACK) {
2615 inm->inm_scrv = igi->igi_rv;
2617 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2618 "pending retransmissions.", __func__,
2619 ntohl(inm->inm_addr.s_addr),
2620 inm->inm_ifp->if_xname, inm->inm_scrv);
2621 if (inm->inm_scrv == 0) {
2622 inm->inm_state = IGMP_NOT_MEMBER;
2623 inm->inm_sctimer = 0;
2625 int retval __unused;
2627 inm_acquire_locked(inm);
2629 retval = igmp_v3_enqueue_group_record(
2630 &inm->inm_scq, inm, 1, 0, 0);
2631 KASSERT(retval != 0,
2632 ("%s: enqueue record = %d", __func__,
2635 inm->inm_state = IGMP_LEAVING_MEMBER;
2636 inm->inm_sctimer = 1;
2637 V_state_change_timers_running = 1;
2643 case IGMP_LAZY_MEMBER:
2644 case IGMP_SLEEPING_MEMBER:
2645 case IGMP_AWAKENING_MEMBER:
2646 /* Our reports are suppressed; do nothing. */
2652 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2653 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2654 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2655 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2656 __func__, ntohl(inm->inm_addr.s_addr),
2657 inm->inm_ifp->if_xname);
2662 * Enqueue an IGMPv3 group record to the given output queue.
2664 * XXX This function could do with having the allocation code
2665 * split out, and the multiple-tree-walks coalesced into a single
2666 * routine as has been done in igmp_v3_enqueue_filter_change().
2668 * If is_state_change is zero, a current-state record is appended.
2669 * If is_state_change is non-zero, a state-change report is appended.
2671 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2672 * If is_group_query is zero, and if there is a packet with free space
2673 * at the tail of the queue, it will be appended to providing there
2674 * is enough free space.
2675 * Otherwise a new mbuf packet chain is allocated.
2677 * If is_source_query is non-zero, each source is checked to see if
2678 * it was recorded for a Group-Source query, and will be omitted if
2679 * it is not both in-mode and recorded.
2681 * The function will attempt to allocate leading space in the packet
2682 * for the IP/IGMP header to be prepended without fragmenting the chain.
2684 * If successful the size of all data appended to the queue is returned,
2685 * otherwise an error code less than zero is returned, or zero if
2686 * no record(s) were appended.
2689 igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2690 const int is_state_change, const int is_group_query,
2691 const int is_source_query)
2693 struct igmp_grouprec ig;
2694 struct igmp_grouprec *pig;
2696 struct ip_msource *ims, *nims;
2697 struct mbuf *m0, *m, *md;
2698 int is_filter_list_change;
2699 int minrec0len, m0srcs, msrcs, nbytes, off;
2700 int record_has_sources;
2706 IN_MULTI_LIST_LOCK_ASSERT();
2709 is_filter_list_change = 0;
2716 record_has_sources = 1;
2718 type = IGMP_DO_NOTHING;
2719 mode = inm->inm_st[1].iss_fmode;
2722 * If we did not transition out of ASM mode during t0->t1,
2723 * and there are no source nodes to process, we can skip
2724 * the generation of source records.
2726 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2728 record_has_sources = 0;
2730 if (is_state_change) {
2732 * Queue a state change record.
2733 * If the mode did not change, and there are non-ASM
2734 * listeners or source filters present,
2735 * we potentially need to issue two records for the group.
2736 * If we are transitioning to MCAST_UNDEFINED, we need
2737 * not send any sources.
2738 * If there are ASM listeners, and there was no filter
2739 * mode transition of any kind, do nothing.
2741 if (mode != inm->inm_st[0].iss_fmode) {
2742 if (mode == MCAST_EXCLUDE) {
2743 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2745 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2747 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2749 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2750 if (mode == MCAST_UNDEFINED)
2751 record_has_sources = 0;
2754 if (record_has_sources) {
2755 is_filter_list_change = 1;
2757 type = IGMP_DO_NOTHING;
2762 * Queue a current state record.
2764 if (mode == MCAST_EXCLUDE) {
2765 type = IGMP_MODE_IS_EXCLUDE;
2766 } else if (mode == MCAST_INCLUDE) {
2767 type = IGMP_MODE_IS_INCLUDE;
2768 KASSERT(inm->inm_st[1].iss_asm == 0,
2769 ("%s: inm %p is INCLUDE but ASM count is %d",
2770 __func__, inm, inm->inm_st[1].iss_asm));
2775 * Generate the filter list changes using a separate function.
2777 if (is_filter_list_change)
2778 return (igmp_v3_enqueue_filter_change(mq, inm));
2780 if (type == IGMP_DO_NOTHING) {
2781 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2782 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2787 * If any sources are present, we must be able to fit at least
2788 * one in the trailing space of the tail packet's mbuf,
2791 minrec0len = sizeof(struct igmp_grouprec);
2792 if (record_has_sources)
2793 minrec0len += sizeof(in_addr_t);
2795 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2796 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2797 inm->inm_ifp->if_xname);
2800 * Check if we have a packet in the tail of the queue for this
2801 * group into which the first group record for this group will fit.
2802 * Otherwise allocate a new packet.
2803 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2804 * Note: Group records for G/GSR query responses MUST be sent
2805 * in their own packet.
2807 m0 = mbufq_last(mq);
2808 if (!is_group_query &&
2810 (m0->m_pkthdr.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2811 (m0->m_pkthdr.len + minrec0len) <
2812 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2813 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2814 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2816 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2818 if (mbufq_full(mq)) {
2819 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2823 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2824 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2825 if (!is_state_change && !is_group_query) {
2826 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2828 m->m_data += IGMP_LEADINGSPACE;
2831 m = m_gethdr(M_NOWAIT, MT_DATA);
2833 M_ALIGN(m, IGMP_LEADINGSPACE);
2838 igmp_save_context(m, ifp);
2840 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2844 * Append group record.
2845 * If we have sources, we don't know how many yet.
2850 ig.ig_group = inm->inm_addr;
2851 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2854 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2857 nbytes += sizeof(struct igmp_grouprec);
2860 * Append as many sources as will fit in the first packet.
2861 * If we are appending to a new packet, the chain allocation
2862 * may potentially use clusters; use m_getptr() in this case.
2863 * If we are appending to an existing packet, we need to obtain
2864 * a pointer to the group record after m_append(), in case a new
2865 * mbuf was allocated.
2866 * Only append sources which are in-mode at t1. If we are
2867 * transitioning to MCAST_UNDEFINED state on the group, do not
2868 * include source entries.
2869 * Only report recorded sources in our filter set when responding
2870 * to a group-source query.
2872 if (record_has_sources) {
2875 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2876 md->m_len - nbytes);
2878 md = m_getptr(m, 0, &off);
2879 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2883 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2884 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2886 now = ims_get_mode(inm, ims, 1);
2887 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2888 if ((now != mode) ||
2889 (now == mode && mode == MCAST_UNDEFINED)) {
2890 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2893 if (is_source_query && ims->ims_stp == 0) {
2894 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2898 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2899 naddr = htonl(ims->ims_haddr);
2900 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2903 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2907 nbytes += sizeof(in_addr_t);
2909 if (msrcs == m0srcs)
2912 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2914 pig->ig_numsrc = htons(msrcs);
2915 nbytes += (msrcs * sizeof(in_addr_t));
2918 if (is_source_query && msrcs == 0) {
2919 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2926 * We are good to go with first packet.
2929 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2930 m->m_pkthdr.vt_nrecs = 1;
2931 mbufq_enqueue(mq, m);
2933 m->m_pkthdr.vt_nrecs++;
2936 * No further work needed if no source list in packet(s).
2938 if (!record_has_sources)
2942 * Whilst sources remain to be announced, we need to allocate
2943 * a new packet and fill out as many sources as will fit.
2944 * Always try for a cluster first.
2946 while (nims != NULL) {
2947 if (mbufq_full(mq)) {
2948 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2951 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2953 m->m_data += IGMP_LEADINGSPACE;
2955 m = m_gethdr(M_NOWAIT, MT_DATA);
2957 M_ALIGN(m, IGMP_LEADINGSPACE);
2961 igmp_save_context(m, ifp);
2962 md = m_getptr(m, 0, &off);
2963 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2964 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2966 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2969 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2972 m->m_pkthdr.vt_nrecs = 1;
2973 nbytes += sizeof(struct igmp_grouprec);
2975 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2976 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2979 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2980 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2982 now = ims_get_mode(inm, ims, 1);
2983 if ((now != mode) ||
2984 (now == mode && mode == MCAST_UNDEFINED)) {
2985 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2988 if (is_source_query && ims->ims_stp == 0) {
2989 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2993 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2994 naddr = htonl(ims->ims_haddr);
2995 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2998 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
3003 if (msrcs == m0srcs)
3006 pig->ig_numsrc = htons(msrcs);
3007 nbytes += (msrcs * sizeof(in_addr_t));
3009 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
3010 mbufq_enqueue(mq, m);
3017 * Type used to mark record pass completion.
3018 * We exploit the fact we can cast to this easily from the
3019 * current filter modes on each ip_msource node.
3022 REC_NONE = 0x00, /* MCAST_UNDEFINED */
3023 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
3024 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
3025 REC_FULL = REC_ALLOW | REC_BLOCK
3029 * Enqueue an IGMPv3 filter list change to the given output queue.
3031 * Source list filter state is held in an RB-tree. When the filter list
3032 * for a group is changed without changing its mode, we need to compute
3033 * the deltas between T0 and T1 for each source in the filter set,
3034 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3036 * As we may potentially queue two record types, and the entire R-B tree
3037 * needs to be walked at once, we break this out into its own function
3038 * so we can generate a tightly packed queue of packets.
3040 * XXX This could be written to only use one tree walk, although that makes
3041 * serializing into the mbuf chains a bit harder. For now we do two walks
3042 * which makes things easier on us, and it may or may not be harder on
3045 * If successful the size of all data appended to the queue is returned,
3046 * otherwise an error code less than zero is returned, or zero if
3047 * no record(s) were appended.
3050 igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
3052 static const int MINRECLEN =
3053 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3055 struct igmp_grouprec ig;
3056 struct igmp_grouprec *pig;
3057 struct ip_msource *ims, *nims;
3058 struct mbuf *m, *m0, *md;
3060 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3064 uint8_t mode, now, then;
3065 rectype_t crt, drt, nrt;
3067 IN_MULTI_LIST_LOCK_ASSERT();
3069 if (inm->inm_nsrc == 0 ||
3070 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3073 ifp = inm->inm_ifp; /* interface */
3074 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3075 crt = REC_NONE; /* current group record type */
3076 drt = REC_NONE; /* mask of completed group record types */
3077 nrt = REC_NONE; /* record type for current node */
3078 m0srcs = 0; /* # source which will fit in current mbuf chain */
3079 nbytes = 0; /* # of bytes appended to group's state-change queue */
3080 npbytes = 0; /* # of bytes appended this packet */
3081 rsrcs = 0; /* # sources encoded in current record */
3082 schanged = 0; /* # nodes encoded in overall filter change */
3084 nallow = 0; /* # of source entries in ALLOW_NEW */
3085 nblock = 0; /* # of source entries in BLOCK_OLD */
3087 nims = NULL; /* next tree node pointer */
3090 * For each possible filter record mode.
3091 * The first kind of source we encounter tells us which
3092 * is the first kind of record we start appending.
3093 * If a node transitioned to UNDEFINED at t1, its mode is treated
3094 * as the inverse of the group's filter mode.
3096 while (drt != REC_FULL) {
3098 m0 = mbufq_last(mq);
3100 (m0->m_pkthdr.vt_nrecs + 1 <=
3101 IGMP_V3_REPORT_MAXRECS) &&
3102 (m0->m_pkthdr.len + MINRECLEN) <
3103 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3105 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3106 sizeof(struct igmp_grouprec)) /
3109 "%s: use previous packet", __func__);
3111 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3113 m->m_data += IGMP_LEADINGSPACE;
3115 m = m_gethdr(M_NOWAIT, MT_DATA);
3117 M_ALIGN(m, IGMP_LEADINGSPACE);
3121 "%s: m_get*() failed", __func__);
3124 m->m_pkthdr.vt_nrecs = 0;
3125 igmp_save_context(m, ifp);
3126 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3127 sizeof(struct igmp_grouprec)) /
3131 "%s: allocated new packet", __func__);
3134 * Append the IGMP group record header to the
3135 * current packet's data area.
3136 * Recalculate pointer to free space for next
3137 * group record, in case m_append() allocated
3138 * a new mbuf or cluster.
3140 memset(&ig, 0, sizeof(ig));
3141 ig.ig_group = inm->inm_addr;
3142 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3146 "%s: m_append() failed", __func__);
3149 npbytes += sizeof(struct igmp_grouprec);
3151 /* new packet; offset in c hain */
3152 md = m_getptr(m, npbytes -
3153 sizeof(struct igmp_grouprec), &off);
3154 pig = (struct igmp_grouprec *)(mtod(md,
3157 /* current packet; offset from last append */
3159 pig = (struct igmp_grouprec *)(mtod(md,
3160 uint8_t *) + md->m_len -
3161 sizeof(struct igmp_grouprec));
3164 * Begin walking the tree for this record type
3165 * pass, or continue from where we left off
3166 * previously if we had to allocate a new packet.
3167 * Only report deltas in-mode at t1.
3168 * We need not report included sources as allowed
3169 * if we are in inclusive mode on the group,
3170 * however the converse is not true.
3174 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3175 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3176 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3177 __func__, ims->ims_haddr);
3178 now = ims_get_mode(inm, ims, 1);
3179 then = ims_get_mode(inm, ims, 0);
3180 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3181 __func__, then, now);
3184 "%s: skip unchanged", __func__);
3187 if (mode == MCAST_EXCLUDE &&
3188 now == MCAST_INCLUDE) {
3190 "%s: skip IN src on EX group",
3194 nrt = (rectype_t)now;
3195 if (nrt == REC_NONE)
3196 nrt = (rectype_t)(~mode & REC_FULL);
3197 if (schanged++ == 0) {
3199 } else if (crt != nrt)
3201 naddr = htonl(ims->ims_haddr);
3202 if (!m_append(m, sizeof(in_addr_t),
3207 "%s: m_append() failed", __func__);
3211 nallow += !!(crt == REC_ALLOW);
3212 nblock += !!(crt == REC_BLOCK);
3214 if (++rsrcs == m0srcs)
3218 * If we did not append any tree nodes on this
3219 * pass, back out of allocations.
3222 npbytes -= sizeof(struct igmp_grouprec);
3225 "%s: m_free(m)", __func__);
3229 "%s: m_adj(m, -ig)", __func__);
3230 m_adj(m, -((int)sizeof(
3231 struct igmp_grouprec)));
3235 npbytes += (rsrcs * sizeof(in_addr_t));
3236 if (crt == REC_ALLOW)
3237 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3238 else if (crt == REC_BLOCK)
3239 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3240 pig->ig_numsrc = htons(rsrcs);
3242 * Count the new group record, and enqueue this
3243 * packet if it wasn't already queued.
3245 m->m_pkthdr.vt_nrecs++;
3247 mbufq_enqueue(mq, m);
3249 } while (nims != NULL);
3251 crt = (~crt & REC_FULL);
3254 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3261 igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3264 struct mbuf *m; /* pending state-change */
3265 struct mbuf *m0; /* copy of pending state-change */
3266 struct mbuf *mt; /* last state-change in packet */
3267 int docopy, domerge;
3274 IN_MULTI_LIST_LOCK_ASSERT();
3278 * If there are further pending retransmissions, make a writable
3279 * copy of each queued state-change message before merging.
3281 if (inm->inm_scrv > 0)
3286 if (mbufq_first(gq) == NULL) {
3287 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3292 m = mbufq_first(gq);
3295 * Only merge the report into the current packet if
3296 * there is sufficient space to do so; an IGMPv3 report
3297 * packet may only contain 65,535 group records.
3298 * Always use a simple mbuf chain concatentation to do this,
3299 * as large state changes for single groups may have
3300 * allocated clusters.
3303 mt = mbufq_last(scq);
3305 recslen = m_length(m, NULL);
3307 if ((mt->m_pkthdr.vt_nrecs +
3308 m->m_pkthdr.vt_nrecs <=
3309 IGMP_V3_REPORT_MAXRECS) &&
3310 (mt->m_pkthdr.len + recslen <=
3311 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3315 if (!domerge && mbufq_full(gq)) {
3317 "%s: outbound queue full, skipping whole packet %p",
3327 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3328 m0 = mbufq_dequeue(gq);
3331 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3332 m0 = m_dup(m, M_NOWAIT);
3335 m0->m_nextpkt = NULL;
3340 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3342 mbufq_enqueue(scq, m0);
3344 struct mbuf *mtl; /* last mbuf of packet mt */
3346 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3350 m0->m_flags &= ~M_PKTHDR;
3351 mt->m_pkthdr.len += recslen;
3352 mt->m_pkthdr.vt_nrecs +=
3353 m0->m_pkthdr.vt_nrecs;
3363 * Respond to a pending IGMPv3 General Query.
3366 igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3368 struct ifmultiaddr *ifma;
3370 struct in_multi *inm;
3371 int retval __unused, loop;
3373 IN_MULTI_LIST_LOCK_ASSERT();
3377 KASSERT(igi->igi_version == IGMP_VERSION_3,
3378 ("%s: called when version %d", __func__, igi->igi_version));
3381 * Check that there are some packets queued. If so, send them first.
3382 * For large number of groups the reply to general query can take
3383 * many packets, we should finish sending them before starting of
3384 * queuing the new reply.
3386 if (mbufq_len(&igi->igi_gq) != 0)
3391 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3392 inm = inm_ifmultiaddr_get_inm(ifma);
3395 KASSERT(ifp == inm->inm_ifp,
3396 ("%s: inconsistent ifp", __func__));
3398 switch (inm->inm_state) {
3399 case IGMP_NOT_MEMBER:
3400 case IGMP_SILENT_MEMBER:
3402 case IGMP_REPORTING_MEMBER:
3403 case IGMP_IDLE_MEMBER:
3404 case IGMP_LAZY_MEMBER:
3405 case IGMP_SLEEPING_MEMBER:
3406 case IGMP_AWAKENING_MEMBER:
3407 inm->inm_state = IGMP_REPORTING_MEMBER;
3408 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3410 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3413 case IGMP_G_QUERY_PENDING_MEMBER:
3414 case IGMP_SG_QUERY_PENDING_MEMBER:
3415 case IGMP_LEAVING_MEMBER:
3421 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3422 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3425 * Slew transmission of bursts over 500ms intervals.
3427 if (mbufq_first(&igi->igi_gq) != NULL) {
3428 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3429 IGMP_RESPONSE_BURST_INTERVAL);
3430 V_interface_timers_running = 1;
3435 * Transmit the next pending IGMP message in the output queue.
3437 * We get called from netisr_processqueue(). A mutex private to igmpoq
3438 * will be acquired and released around this routine.
3440 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3441 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3442 * a link and uses a link-scope multicast address.
3445 igmp_intr(struct mbuf *m)
3447 struct ip_moptions imo;
3449 struct mbuf *ipopts, *m0;
3453 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3456 * Set VNET image pointer from enqueued mbuf chain
3457 * before doing anything else. Whilst we use interface
3458 * indexes to guard against interface detach, they are
3459 * unique to each VIMAGE and must be retrieved.
3461 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3462 ifindex = igmp_restore_context(m);
3465 * Check if the ifnet still exists. This limits the scope of
3466 * any race in the absence of a global ifp lock for low cost
3467 * (an array lookup).
3469 ifp = ifnet_byindex(ifindex);
3471 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3472 __func__, m, ifindex);
3474 IPSTAT_INC(ips_noroute);
3478 ipopts = V_igmp_sendra ? m_raopt : NULL;
3480 imo.imo_multicast_ttl = 1;
3481 imo.imo_multicast_vif = -1;
3482 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3485 * If the user requested that IGMP traffic be explicitly
3486 * redirected to the loopback interface (e.g. they are running a
3487 * MANET interface and the routing protocol needs to see the
3488 * updates), handle this now.
3490 if (m->m_flags & M_IGMP_LOOP)
3491 imo.imo_multicast_ifp = V_loif;
3493 imo.imo_multicast_ifp = ifp;
3495 if (m->m_flags & M_IGMPV2) {
3498 m0 = igmp_v3_encap_report(ifp, m);
3500 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3502 IPSTAT_INC(ips_odropped);
3507 igmp_scrub_context(m0);
3509 m0->m_pkthdr.rcvif = V_loif;
3511 mac_netinet_igmp_send(ifp, m0);
3513 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3515 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3519 IGMPSTAT_INC(igps_snd_reports);
3523 * We must restore the existing vnet pointer before
3524 * continuing as we are run from netisr context.
3530 * Encapsulate an IGMPv3 report.
3532 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3533 * chain has already had its IP/IGMPv3 header prepended. In this case
3534 * the function will not attempt to prepend; the lengths and checksums
3535 * will however be re-computed.
3537 * Returns a pointer to the new mbuf chain head, or NULL if the
3538 * allocation failed.
3540 static struct mbuf *
3541 igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3543 struct igmp_report *igmp;
3545 int hdrlen, igmpreclen;
3547 KASSERT((m->m_flags & M_PKTHDR),
3548 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3550 igmpreclen = m_length(m, NULL);
3551 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3553 if (m->m_flags & M_IGMPV3_HDR) {
3554 igmpreclen -= hdrlen;
3556 M_PREPEND(m, hdrlen, M_NOWAIT);
3559 m->m_flags |= M_IGMPV3_HDR;
3562 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3564 m->m_data += sizeof(struct ip);
3565 m->m_len -= sizeof(struct ip);
3567 igmp = mtod(m, struct igmp_report *);
3568 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3571 igmp->ir_numgrps = htons(m->m_pkthdr.vt_nrecs);
3573 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3574 m->m_pkthdr.vt_nrecs = 0;
3576 m->m_data -= sizeof(struct ip);
3577 m->m_len += sizeof(struct ip);
3579 ip = mtod(m, struct ip *);
3580 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3581 ip->ip_len = htons(hdrlen + igmpreclen);
3582 ip->ip_off = htons(IP_DF);
3583 ip->ip_p = IPPROTO_IGMP;
3586 ip->ip_src.s_addr = INADDR_ANY;
3588 if (m->m_flags & M_IGMP_LOOP) {
3589 struct in_ifaddr *ia;
3593 ip->ip_src = ia->ia_addr.sin_addr;
3596 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3603 igmp_rec_type_to_str(const int type)
3607 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3610 case IGMP_CHANGE_TO_INCLUDE_MODE:
3613 case IGMP_MODE_IS_EXCLUDE:
3616 case IGMP_MODE_IS_INCLUDE:
3619 case IGMP_ALLOW_NEW_SOURCES:
3622 case IGMP_BLOCK_OLD_SOURCES:
3634 vnet_igmp_init(const void *unused __unused)
3637 netisr_register_vnet(&igmp_nh);
3639 VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3640 vnet_igmp_init, NULL);
3643 vnet_igmp_uninit(const void *unused __unused)
3646 /* This can happen when we shutdown the entire network stack. */
3647 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3649 netisr_unregister_vnet(&igmp_nh);
3651 VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3652 vnet_igmp_uninit, NULL);
3656 DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3658 struct igmp_ifsoftc *igi, *tigi;
3659 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3662 db_printf("usage: show igi_list <addr>\n");
3665 igi_head = (struct _igi_list *)addr;
3667 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3668 db_printf("igmp_ifsoftc %p:\n", igi);
3669 db_printf(" ifp %p\n", igi->igi_ifp);
3670 db_printf(" version %u\n", igi->igi_version);
3671 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3672 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3673 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3674 db_printf(" flags %#x\n", igi->igi_flags);
3675 db_printf(" rv %u\n", igi->igi_rv);
3676 db_printf(" qi %u\n", igi->igi_qi);
3677 db_printf(" qri %u\n", igi->igi_qri);
3678 db_printf(" uri %u\n", igi->igi_uri);
3679 /* struct mbufq igi_gq; */
3686 igmp_modevent(module_t mod, int type, void *unused __unused)
3691 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3693 m_raopt = igmp_ra_alloc();
3694 netisr_register(&igmp_nh);
3695 callout_init(&igmpslow_callout, 1);
3696 callout_reset(&igmpslow_callout, hz / IGMP_SLOWHZ,
3697 igmp_slowtimo, NULL);
3698 callout_init(&igmpfast_callout, 1);
3699 callout_reset(&igmpfast_callout, hz / IGMP_FASTHZ,
3700 igmp_fasttimo, NULL);
3703 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3704 netisr_unregister(&igmp_nh);
3707 IGMP_LOCK_DESTROY();
3710 return (EOPNOTSUPP);
3715 static moduledata_t igmp_mod = {
3720 DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);