1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
25 #include "opt_inet6.h"
26 #include "opt_kern_tls.h"
27 #include "opt_ratelimit.h"
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
33 #include <sys/queue.h>
34 #include <sys/socket.h>
35 #include <sys/sockio.h>
36 #include <sys/sysctl.h>
37 #include <sys/module.h>
39 #include <sys/systm.h>
42 #include <sys/rmlock.h>
44 #include <sys/taskqueue.h>
45 #include <sys/eventhandler.h>
47 #include <net/ethernet.h>
49 #include <net/if_clone.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_var.h>
58 #if defined(INET) || defined(INET6)
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/if_ether.h>
68 #include <netinet/ip6.h>
69 #include <netinet6/in6_var.h>
70 #include <netinet6/in6_ifattach.h>
73 #include <net/if_vlan_var.h>
74 #include <net/if_lagg.h>
75 #include <net/ieee8023ad_lacp.h>
77 #define LAGG_RLOCK() struct epoch_tracker lagg_et; epoch_enter_preempt(net_epoch_preempt, &lagg_et)
78 #define LAGG_RUNLOCK() epoch_exit_preempt(net_epoch_preempt, &lagg_et)
79 #define LAGG_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt))
80 #define LAGG_UNLOCK_ASSERT() MPASS(!in_epoch(net_epoch_preempt))
82 #define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
83 #define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
84 #define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
85 #define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
86 #define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
87 #define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
89 /* Special flags we should propagate to the lagg ports. */
92 int (*func)(struct ifnet *, int);
94 {IFF_PROMISC, ifpromisc},
95 {IFF_ALLMULTI, if_allmulti},
100 struct m_snd_tag com;
101 struct m_snd_tag *tag;
104 VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
105 #define V_lagg_list VNET(lagg_list)
106 VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
107 #define V_lagg_list_mtx VNET(lagg_list_mtx)
108 #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
109 "if_lagg list", NULL, MTX_DEF)
110 #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
111 #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
112 #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
113 eventhandler_tag lagg_detach_cookie = NULL;
115 static int lagg_clone_create(struct if_clone *, int, caddr_t);
116 static void lagg_clone_destroy(struct ifnet *);
117 VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
118 #define V_lagg_cloner VNET(lagg_cloner)
119 static const char laggname[] = "lagg";
120 static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
122 static void lagg_capabilities(struct lagg_softc *);
123 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
124 static int lagg_port_destroy(struct lagg_port *, int);
125 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
126 static void lagg_linkstate(struct lagg_softc *);
127 static void lagg_port_state(struct ifnet *, int);
128 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
129 static int lagg_port_output(struct ifnet *, struct mbuf *,
130 const struct sockaddr *, struct route *);
131 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
132 #ifdef LAGG_PORT_STACKING
133 static int lagg_port_checkstacking(struct lagg_softc *);
135 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
136 static void lagg_init(void *);
137 static void lagg_stop(struct lagg_softc *);
138 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
139 #if defined(KERN_TLS) || defined(RATELIMIT)
140 static int lagg_snd_tag_alloc(struct ifnet *,
141 union if_snd_tag_alloc_params *,
142 struct m_snd_tag **);
143 static int lagg_snd_tag_modify(struct m_snd_tag *,
144 union if_snd_tag_modify_params *);
145 static int lagg_snd_tag_query(struct m_snd_tag *,
146 union if_snd_tag_query_params *);
147 static void lagg_snd_tag_free(struct m_snd_tag *);
148 static void lagg_ratelimit_query(struct ifnet *,
149 struct if_ratelimit_query_results *);
151 static int lagg_setmulti(struct lagg_port *);
152 static int lagg_clrmulti(struct lagg_port *);
153 static int lagg_setcaps(struct lagg_port *, int cap);
154 static int lagg_setflag(struct lagg_port *, int, int,
155 int (*func)(struct ifnet *, int));
156 static int lagg_setflags(struct lagg_port *, int status);
157 static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
158 static int lagg_transmit(struct ifnet *, struct mbuf *);
159 static void lagg_qflush(struct ifnet *);
160 static int lagg_media_change(struct ifnet *);
161 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
162 static struct lagg_port *lagg_link_active(struct lagg_softc *,
165 /* Simple round robin */
166 static void lagg_rr_attach(struct lagg_softc *);
167 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
168 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
171 /* Active failover */
172 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
173 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
177 static void lagg_lb_attach(struct lagg_softc *);
178 static void lagg_lb_detach(struct lagg_softc *);
179 static int lagg_lb_port_create(struct lagg_port *);
180 static void lagg_lb_port_destroy(struct lagg_port *);
181 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
182 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
184 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
187 static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
188 static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
192 static void lagg_lacp_attach(struct lagg_softc *);
193 static void lagg_lacp_detach(struct lagg_softc *);
194 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
195 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
197 static void lagg_lacp_lladdr(struct lagg_softc *);
199 /* lagg protocol table */
200 static const struct lagg_proto {
202 void (*pr_attach)(struct lagg_softc *);
203 void (*pr_detach)(struct lagg_softc *);
204 int (*pr_start)(struct lagg_softc *, struct mbuf *);
205 struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
207 int (*pr_addport)(struct lagg_port *);
208 void (*pr_delport)(struct lagg_port *);
209 void (*pr_linkstate)(struct lagg_port *);
210 void (*pr_init)(struct lagg_softc *);
211 void (*pr_stop)(struct lagg_softc *);
212 void (*pr_lladdr)(struct lagg_softc *);
213 void (*pr_request)(struct lagg_softc *, void *);
214 void (*pr_portreq)(struct lagg_port *, void *);
217 .pr_num = LAGG_PROTO_NONE
220 .pr_num = LAGG_PROTO_ROUNDROBIN,
221 .pr_attach = lagg_rr_attach,
222 .pr_start = lagg_rr_start,
223 .pr_input = lagg_rr_input,
226 .pr_num = LAGG_PROTO_FAILOVER,
227 .pr_start = lagg_fail_start,
228 .pr_input = lagg_fail_input,
231 .pr_num = LAGG_PROTO_LOADBALANCE,
232 .pr_attach = lagg_lb_attach,
233 .pr_detach = lagg_lb_detach,
234 .pr_start = lagg_lb_start,
235 .pr_input = lagg_lb_input,
236 .pr_addport = lagg_lb_port_create,
237 .pr_delport = lagg_lb_port_destroy,
240 .pr_num = LAGG_PROTO_LACP,
241 .pr_attach = lagg_lacp_attach,
242 .pr_detach = lagg_lacp_detach,
243 .pr_start = lagg_lacp_start,
244 .pr_input = lagg_lacp_input,
245 .pr_addport = lacp_port_create,
246 .pr_delport = lacp_port_destroy,
247 .pr_linkstate = lacp_linkstate,
248 .pr_init = lacp_init,
249 .pr_stop = lacp_stop,
250 .pr_lladdr = lagg_lacp_lladdr,
251 .pr_request = lacp_req,
252 .pr_portreq = lacp_portreq,
255 .pr_num = LAGG_PROTO_BROADCAST,
256 .pr_start = lagg_bcast_start,
257 .pr_input = lagg_bcast_input,
261 SYSCTL_DECL(_net_link);
262 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
265 /* Allow input on any failover links */
266 VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
267 #define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
268 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
269 &VNET_NAME(lagg_failover_rx_all), 0,
270 "Accept input from any interface in a failover lagg");
272 /* Default value for using flowid */
273 VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
274 #define V_def_use_flowid VNET(def_use_flowid)
275 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
276 &VNET_NAME(def_use_flowid), 0,
277 "Default setting for using flow id for load sharing");
279 /* Default value for using numa */
280 VNET_DEFINE_STATIC(int, def_use_numa) = 1;
281 #define V_def_use_numa VNET(def_use_numa)
282 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa, CTLFLAG_RWTUN,
283 &VNET_NAME(def_use_numa), 0,
284 "Use numa to steer flows");
286 /* Default value for flowid shift */
287 VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
288 #define V_def_flowid_shift VNET(def_flowid_shift)
289 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
290 &VNET_NAME(def_flowid_shift), 0,
291 "Default setting for flowid shift for load sharing");
294 vnet_lagg_init(const void *unused __unused)
297 LAGG_LIST_LOCK_INIT();
298 SLIST_INIT(&V_lagg_list);
299 V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
300 lagg_clone_destroy, 0);
302 VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
303 vnet_lagg_init, NULL);
306 vnet_lagg_uninit(const void *unused __unused)
309 if_clone_detach(V_lagg_cloner);
310 LAGG_LIST_LOCK_DESTROY();
312 VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
313 vnet_lagg_uninit, NULL);
316 lagg_modevent(module_t mod, int type, void *data)
321 lagg_input_p = lagg_input;
322 lagg_linkstate_p = lagg_port_state;
323 lagg_detach_cookie = EVENTHANDLER_REGISTER(
324 ifnet_departure_event, lagg_port_ifdetach, NULL,
325 EVENTHANDLER_PRI_ANY);
328 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
331 lagg_linkstate_p = NULL;
339 static moduledata_t lagg_mod = {
345 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
346 MODULE_VERSION(if_lagg, 1);
349 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
352 LAGG_XLOCK_ASSERT(sc);
353 KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
356 if (sc->sc_ifflags & IFF_DEBUG)
357 if_printf(sc->sc_ifp, "using proto %u\n", pr);
359 if (lagg_protos[pr].pr_attach != NULL)
360 lagg_protos[pr].pr_attach(sc);
365 lagg_proto_detach(struct lagg_softc *sc)
369 LAGG_XLOCK_ASSERT(sc);
371 sc->sc_proto = LAGG_PROTO_NONE;
373 if (lagg_protos[pr].pr_detach != NULL)
374 lagg_protos[pr].pr_detach(sc);
378 lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
381 return (lagg_protos[sc->sc_proto].pr_start(sc, m));
385 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
388 return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
392 lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
395 if (lagg_protos[sc->sc_proto].pr_addport == NULL)
398 return (lagg_protos[sc->sc_proto].pr_addport(lp));
402 lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
405 if (lagg_protos[sc->sc_proto].pr_delport != NULL)
406 lagg_protos[sc->sc_proto].pr_delport(lp);
410 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
413 if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
414 lagg_protos[sc->sc_proto].pr_linkstate(lp);
418 lagg_proto_init(struct lagg_softc *sc)
421 if (lagg_protos[sc->sc_proto].pr_init != NULL)
422 lagg_protos[sc->sc_proto].pr_init(sc);
426 lagg_proto_stop(struct lagg_softc *sc)
429 if (lagg_protos[sc->sc_proto].pr_stop != NULL)
430 lagg_protos[sc->sc_proto].pr_stop(sc);
434 lagg_proto_lladdr(struct lagg_softc *sc)
437 if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
438 lagg_protos[sc->sc_proto].pr_lladdr(sc);
442 lagg_proto_request(struct lagg_softc *sc, void *v)
445 if (lagg_protos[sc->sc_proto].pr_request != NULL)
446 lagg_protos[sc->sc_proto].pr_request(sc, v);
450 lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
453 if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
454 lagg_protos[sc->sc_proto].pr_portreq(lp, v);
458 * This routine is run via an vlan
462 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
464 struct lagg_softc *sc = ifp->if_softc;
465 struct lagg_port *lp;
467 if (ifp->if_softc != arg) /* Not our event */
471 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
472 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
477 * This routine is run via an vlan
481 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
483 struct lagg_softc *sc = ifp->if_softc;
484 struct lagg_port *lp;
486 if (ifp->if_softc != arg) /* Not our event */
490 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
491 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
496 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
498 struct lagg_softc *sc;
500 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
502 sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO);
503 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
511 if (V_def_use_flowid)
512 sc->sc_opts |= LAGG_OPT_USE_FLOWID;
514 sc->sc_opts |= LAGG_OPT_USE_NUMA;
515 sc->flowid_shift = V_def_flowid_shift;
517 /* Hash all layers by default */
518 sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
520 lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
522 CK_SLIST_INIT(&sc->sc_ports);
524 /* Initialise pseudo media types */
525 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
527 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
528 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
530 if_initname(ifp, laggname, unit);
532 ifp->if_transmit = lagg_transmit;
533 ifp->if_qflush = lagg_qflush;
534 ifp->if_init = lagg_init;
535 ifp->if_ioctl = lagg_ioctl;
536 ifp->if_get_counter = lagg_get_counter;
537 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
538 #if defined(KERN_TLS) || defined(RATELIMIT)
539 ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
540 ifp->if_snd_tag_modify = lagg_snd_tag_modify;
541 ifp->if_snd_tag_query = lagg_snd_tag_query;
542 ifp->if_snd_tag_free = lagg_snd_tag_free;
543 ifp->if_ratelimit_query = lagg_ratelimit_query;
545 ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
548 * Attach as an ordinary ethernet device, children will be attached
549 * as special device IFT_IEEE8023ADLAG.
551 ether_ifattach(ifp, eaddr);
553 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
554 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
555 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
556 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
558 /* Insert into the global list of laggs */
560 SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
568 lagg_clone_destroy(struct ifnet *ifp)
570 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
571 struct lagg_port *lp;
574 sc->sc_destroying = 1;
576 ifp->if_flags &= ~IFF_UP;
578 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
579 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
581 /* Shutdown and remove lagg ports */
582 while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
583 lagg_port_destroy(lp, 1);
585 /* Unhook the aggregation protocol */
586 lagg_proto_detach(sc);
589 ifmedia_removeall(&sc->sc_media);
594 SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
602 lagg_capabilities(struct lagg_softc *sc)
604 struct lagg_port *lp;
607 struct ifnet_hw_tsomax hw_tsomax;
609 LAGG_XLOCK_ASSERT(sc);
611 /* Get common enabled capabilities for the lagg ports */
613 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
614 ena &= lp->lp_ifp->if_capenable;
615 ena = (ena == ~0 ? 0 : ena);
618 * Apply common enabled capabilities back to the lagg ports.
619 * May require several iterations if they are dependent.
623 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
624 lagg_setcaps(lp, ena);
625 ena &= lp->lp_ifp->if_capenable;
627 } while (pena != ena);
629 /* Get other capabilities from the lagg ports */
632 memset(&hw_tsomax, 0, sizeof(hw_tsomax));
633 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
634 cap &= lp->lp_ifp->if_capabilities;
635 hwa &= lp->lp_ifp->if_hwassist;
636 if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
638 cap = (cap == ~0 ? 0 : cap);
639 hwa = (hwa == ~(uint64_t)0 ? 0 : hwa);
641 if (sc->sc_ifp->if_capabilities != cap ||
642 sc->sc_ifp->if_capenable != ena ||
643 sc->sc_ifp->if_hwassist != hwa ||
644 if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
645 sc->sc_ifp->if_capabilities = cap;
646 sc->sc_ifp->if_capenable = ena;
647 sc->sc_ifp->if_hwassist = hwa;
648 getmicrotime(&sc->sc_ifp->if_lastchange);
650 if (sc->sc_ifflags & IFF_DEBUG)
651 if_printf(sc->sc_ifp,
652 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
657 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
659 struct lagg_softc *sc_ptr;
660 struct lagg_port *lp, *tlp;
662 int error, i, oldmtu;
665 LAGG_XLOCK_ASSERT(sc);
667 if (sc->sc_ifp == ifp) {
668 if_printf(sc->sc_ifp,
669 "cannot add a lagg to itself as a port\n");
673 /* Limit the maximal number of lagg ports */
674 if (sc->sc_count >= LAGG_MAX_PORTS)
677 /* Check if port has already been associated to a lagg */
678 if (ifp->if_lagg != NULL) {
679 /* Port is already in the current lagg? */
680 lp = (struct lagg_port *)ifp->if_lagg;
681 if (lp->lp_softc == sc)
686 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
687 if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
688 return (EPROTONOSUPPORT);
690 /* Allow the first Ethernet member to define the MTU */
692 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
693 sc->sc_ifp->if_mtu = ifp->if_mtu;
694 } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
695 if (ifp->if_ioctl == NULL) {
696 if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
700 oldmtu = ifp->if_mtu;
701 strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
702 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
703 error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
705 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
709 ifr.ifr_mtu = oldmtu;
712 lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK|M_ZERO);
715 /* Check if port is a stacked lagg */
717 SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
718 if (ifp == sc_ptr->sc_ifp) {
722 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
725 /* XXX disable stacking for the moment, its untested */
726 #ifdef LAGG_PORT_STACKING
727 lp->lp_flags |= LAGG_PORT_STACK;
728 if (lagg_port_checkstacking(sc_ptr) >=
733 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
745 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
746 lp->lp_ifcapenable = ifp->if_capenable;
747 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
748 bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
749 lagg_proto_lladdr(sc);
750 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
752 if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
754 lagg_setflags(lp, 1);
756 if (CK_SLIST_EMPTY(&sc->sc_ports))
759 /* Change the interface type */
760 lp->lp_iftype = ifp->if_type;
761 ifp->if_type = IFT_IEEE8023ADLAG;
763 lp->lp_ioctl = ifp->if_ioctl;
764 ifp->if_ioctl = lagg_port_ioctl;
765 lp->lp_output = ifp->if_output;
766 ifp->if_output = lagg_port_output;
768 /* Read port counters */
769 pval = lp->port_counters.val;
770 for (i = 0; i < IFCOUNTERS; i++, pval++)
771 *pval = ifp->if_get_counter(ifp, i);
774 * Insert into the list of ports.
775 * Keep ports sorted by if_index. It is handy, when configuration
776 * is predictable and `ifconfig laggN create ...` command
777 * will lead to the same result each time.
779 CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
780 if (tlp->lp_ifp->if_index < ifp->if_index && (
781 CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
782 ((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
787 CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
789 CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
795 if ((error = lagg_proto_addport(sc, lp)) != 0) {
796 /* Remove the port, without calling pr_delport. */
797 lagg_port_destroy(lp, 0);
799 (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
803 /* Update lagg capabilities */
804 lagg_capabilities(sc);
810 #ifdef LAGG_PORT_STACKING
812 lagg_port_checkstacking(struct lagg_softc *sc)
814 struct lagg_softc *sc_ptr;
815 struct lagg_port *lp;
818 LAGG_SXLOCK_ASSERT(sc);
819 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
820 if (lp->lp_flags & LAGG_PORT_STACK) {
821 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
822 m = MAX(m, lagg_port_checkstacking(sc_ptr));
831 lagg_port_destroy_cb(epoch_context_t ec)
833 struct lagg_port *lp;
836 lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
844 lagg_port_destroy(struct lagg_port *lp, int rundelport)
846 struct lagg_softc *sc = lp->lp_softc;
847 struct lagg_port *lp_ptr, *lp0;
848 struct ifnet *ifp = lp->lp_ifp;
849 uint64_t *pval, vdiff;
852 LAGG_XLOCK_ASSERT(sc);
855 lagg_proto_delport(sc, lp);
857 if (lp->lp_detaching == 0)
860 /* Restore interface */
861 ifp->if_type = lp->lp_iftype;
862 ifp->if_ioctl = lp->lp_ioctl;
863 ifp->if_output = lp->lp_output;
866 /* Update detached port counters */
867 pval = lp->port_counters.val;
868 for (i = 0; i < IFCOUNTERS; i++, pval++) {
869 vdiff = ifp->if_get_counter(ifp, i) - *pval;
870 sc->detached_counters.val[i] += vdiff;
873 /* Finally, remove the port from the lagg */
874 CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
877 /* Update the primary interface */
878 if (lp == sc->sc_primary) {
879 uint8_t lladdr[ETHER_ADDR_LEN];
881 if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
882 bzero(&lladdr, ETHER_ADDR_LEN);
884 bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN);
885 sc->sc_primary = lp0;
886 if (sc->sc_destroying == 0) {
887 bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
888 lagg_proto_lladdr(sc);
889 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
893 * Update lladdr for each port (new primary needs update
894 * as well, to switch from old lladdr to its 'real' one)
896 CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
897 if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN);
901 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
903 if (lp->lp_detaching == 0) {
904 lagg_setflags(lp, 0);
905 lagg_setcaps(lp, lp->lp_ifcapenable);
906 if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN);
910 * free port and release it's ifnet reference after a grace period has
913 epoch_call(net_epoch_preempt, &lp->lp_epoch_ctx, lagg_port_destroy_cb);
914 /* Update lagg capabilities */
915 lagg_capabilities(sc);
922 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
924 struct lagg_reqport *rp = (struct lagg_reqport *)data;
925 struct lagg_softc *sc;
926 struct lagg_port *lp = NULL;
929 /* Should be checked by the caller */
930 if (ifp->if_type != IFT_IEEE8023ADLAG ||
931 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
936 if (rp->rp_portname[0] == '\0' ||
937 ifunit(rp->rp_portname) != ifp) {
943 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
949 lagg_port2req(lp, rp);
954 if (lp->lp_ioctl == NULL) {
958 error = (*lp->lp_ioctl)(ifp, cmd, data);
962 /* Update lagg interface capabilities */
964 lagg_capabilities(sc);
966 VLAN_CAPABILITIES(sc->sc_ifp);
970 /* Do not allow the MTU to be changed once joined */
981 if (lp != NULL && lp->lp_ioctl != NULL)
982 return ((*lp->lp_ioctl)(ifp, cmd, data));
988 * Requests counter @cnt data.
990 * Counter value is calculated the following way:
991 * 1) for each port, sum difference between current and "initial" measurements.
992 * 2) add lagg logical interface counters.
993 * 3) add data from detached_counters array.
995 * We also do the following things on ports attach/detach:
996 * 1) On port attach we store all counters it has into port_counter array.
997 * 2) On port detach we add the different between "initial" and
998 * current counters data to detached_counters array.
1001 lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
1003 struct lagg_softc *sc;
1004 struct lagg_port *lp;
1005 struct ifnet *lpifp;
1006 uint64_t newval, oldval, vsum;
1008 /* Revise this when we've got non-generic counters. */
1009 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
1011 sc = (struct lagg_softc *)ifp->if_softc;
1015 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1016 /* Saved attached value */
1017 oldval = lp->port_counters.val[cnt];
1020 newval = lpifp->if_get_counter(lpifp, cnt);
1021 /* Calculate diff and save new */
1022 vsum += newval - oldval;
1027 * Add counter data which might be added by upper
1028 * layer protocols operating on logical interface.
1030 vsum += if_get_counter_default(ifp, cnt);
1033 * Add counter data from detached ports counters
1035 vsum += sc->detached_counters.val[cnt];
1042 * For direct output to child ports.
1045 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
1046 const struct sockaddr *dst, struct route *ro)
1048 struct lagg_port *lp = ifp->if_lagg;
1050 switch (dst->sa_family) {
1051 case pseudo_AF_HDRCMPLT:
1053 return ((*lp->lp_output)(ifp, m, dst, ro));
1056 /* drop any other frames */
1062 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
1064 struct lagg_port *lp;
1065 struct lagg_softc *sc;
1067 if ((lp = ifp->if_lagg) == NULL)
1069 /* If the ifnet is just being renamed, don't do anything. */
1070 if (ifp->if_flags & IFF_RENAMING)
1076 lp->lp_detaching = 1;
1077 lagg_port_destroy(lp, 1);
1079 VLAN_CAPABILITIES(sc->sc_ifp);
1083 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
1085 struct lagg_softc *sc = lp->lp_softc;
1087 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
1088 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
1089 rp->rp_prio = lp->lp_prio;
1090 rp->rp_flags = lp->lp_flags;
1091 lagg_proto_portreq(sc, lp, &rp->rp_psc);
1093 /* Add protocol specific flags */
1094 switch (sc->sc_proto) {
1095 case LAGG_PROTO_FAILOVER:
1096 if (lp == sc->sc_primary)
1097 rp->rp_flags |= LAGG_PORT_MASTER;
1098 if (lp == lagg_link_active(sc, sc->sc_primary))
1099 rp->rp_flags |= LAGG_PORT_ACTIVE;
1102 case LAGG_PROTO_ROUNDROBIN:
1103 case LAGG_PROTO_LOADBALANCE:
1104 case LAGG_PROTO_BROADCAST:
1105 if (LAGG_PORTACTIVE(lp))
1106 rp->rp_flags |= LAGG_PORT_ACTIVE;
1109 case LAGG_PROTO_LACP:
1110 /* LACP has a different definition of active */
1111 if (lacp_isactive(lp))
1112 rp->rp_flags |= LAGG_PORT_ACTIVE;
1113 if (lacp_iscollecting(lp))
1114 rp->rp_flags |= LAGG_PORT_COLLECTING;
1115 if (lacp_isdistributing(lp))
1116 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
1123 lagg_init(void *xsc)
1125 struct lagg_softc *sc = (struct lagg_softc *)xsc;
1126 struct ifnet *ifp = sc->sc_ifp;
1127 struct lagg_port *lp;
1130 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1135 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1138 * Update the port lladdrs if needed.
1139 * This might be if_setlladdr() notification
1140 * that lladdr has been changed.
1142 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1143 if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
1144 ETHER_ADDR_LEN) != 0)
1145 if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN);
1148 lagg_proto_init(sc);
1154 lagg_stop(struct lagg_softc *sc)
1156 struct ifnet *ifp = sc->sc_ifp;
1158 LAGG_XLOCK_ASSERT(sc);
1160 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1163 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1165 lagg_proto_stop(sc);
1169 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1171 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1172 struct lagg_reqall *ra = (struct lagg_reqall *)data;
1173 struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
1174 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
1175 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
1176 struct ifreq *ifr = (struct ifreq *)data;
1177 struct lagg_port *lp;
1179 struct thread *td = curthread;
1181 int count, buflen, len, error = 0;
1183 bzero(&rpbuf, sizeof(rpbuf));
1188 buflen = sc->sc_count * sizeof(struct lagg_reqport);
1189 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1190 ra->ra_proto = sc->sc_proto;
1191 lagg_proto_request(sc, &ra->ra_psc);
1194 len = min(ra->ra_size, buflen);
1195 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1196 if (len < sizeof(rpbuf))
1199 lagg_port2req(lp, &rpbuf);
1200 memcpy(buf, &rpbuf, sizeof(rpbuf));
1202 buf += sizeof(rpbuf);
1203 len -= sizeof(rpbuf);
1206 ra->ra_ports = count;
1207 ra->ra_size = count * sizeof(rpbuf);
1208 error = copyout(outbuf, ra->ra_port, ra->ra_size);
1209 free(outbuf, M_TEMP);
1212 error = priv_check(td, PRIV_NET_LAGG);
1215 if (ra->ra_proto >= LAGG_PROTO_MAX) {
1216 error = EPROTONOSUPPORT;
1221 lagg_proto_detach(sc);
1222 LAGG_UNLOCK_ASSERT();
1223 lagg_proto_attach(sc, ra->ra_proto);
1228 ro->ro_opts = sc->sc_opts;
1229 if (sc->sc_proto == LAGG_PROTO_LACP) {
1230 struct lacp_softc *lsc;
1232 lsc = (struct lacp_softc *)sc->sc_psc;
1233 if (lsc->lsc_debug.lsc_tx_test != 0)
1234 ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
1235 if (lsc->lsc_debug.lsc_rx_test != 0)
1236 ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
1237 if (lsc->lsc_strict_mode != 0)
1238 ro->ro_opts |= LAGG_OPT_LACP_STRICT;
1239 if (lsc->lsc_fast_timeout != 0)
1240 ro->ro_opts |= LAGG_OPT_LACP_TIMEOUT;
1242 ro->ro_active = sc->sc_active;
1245 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1246 ro->ro_active += LAGG_PORTACTIVE(lp);
1248 ro->ro_bkt = sc->sc_bkt;
1249 ro->ro_flapping = sc->sc_flapping;
1250 ro->ro_flowid_shift = sc->flowid_shift;
1254 if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) {
1255 if (ro->ro_bkt == 0)
1256 sc->sc_bkt = 1; // Minimum 1 packet per iface.
1258 sc->sc_bkt = ro->ro_bkt;
1260 error = priv_check(td, PRIV_NET_LAGG);
1263 if (ro->ro_opts == 0)
1266 * Set options. LACP options are stored in sc->sc_psc,
1271 switch (ro->ro_opts) {
1272 case LAGG_OPT_USE_FLOWID:
1273 case -LAGG_OPT_USE_FLOWID:
1274 case LAGG_OPT_USE_NUMA:
1275 case -LAGG_OPT_USE_NUMA:
1276 case LAGG_OPT_FLOWIDSHIFT:
1280 case LAGG_OPT_LACP_TXTEST:
1281 case -LAGG_OPT_LACP_TXTEST:
1282 case LAGG_OPT_LACP_RXTEST:
1283 case -LAGG_OPT_LACP_RXTEST:
1284 case LAGG_OPT_LACP_STRICT:
1285 case -LAGG_OPT_LACP_STRICT:
1286 case LAGG_OPT_LACP_TIMEOUT:
1287 case -LAGG_OPT_LACP_TIMEOUT:
1298 (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
1299 /* Invalid combination of options specified. */
1302 break; /* Return from SIOCSLAGGOPTS. */
1305 * Store new options into sc->sc_opts except for
1306 * FLOWIDSHIFT and LACP options.
1309 if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
1310 sc->flowid_shift = ro->ro_flowid_shift;
1311 else if (ro->ro_opts > 0)
1312 sc->sc_opts |= ro->ro_opts;
1314 sc->sc_opts &= ~ro->ro_opts;
1316 struct lacp_softc *lsc;
1317 struct lacp_port *lp;
1319 lsc = (struct lacp_softc *)sc->sc_psc;
1321 switch (ro->ro_opts) {
1322 case LAGG_OPT_LACP_TXTEST:
1323 lsc->lsc_debug.lsc_tx_test = 1;
1325 case -LAGG_OPT_LACP_TXTEST:
1326 lsc->lsc_debug.lsc_tx_test = 0;
1328 case LAGG_OPT_LACP_RXTEST:
1329 lsc->lsc_debug.lsc_rx_test = 1;
1331 case -LAGG_OPT_LACP_RXTEST:
1332 lsc->lsc_debug.lsc_rx_test = 0;
1334 case LAGG_OPT_LACP_STRICT:
1335 lsc->lsc_strict_mode = 1;
1337 case -LAGG_OPT_LACP_STRICT:
1338 lsc->lsc_strict_mode = 0;
1340 case LAGG_OPT_LACP_TIMEOUT:
1342 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1343 lp->lp_state |= LACP_STATE_TIMEOUT;
1345 lsc->lsc_fast_timeout = 1;
1347 case -LAGG_OPT_LACP_TIMEOUT:
1349 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1350 lp->lp_state &= ~LACP_STATE_TIMEOUT;
1352 lsc->lsc_fast_timeout = 0;
1358 case SIOCGLAGGFLAGS:
1361 if (sc->sc_flags & MBUF_HASHFLAG_L2)
1362 rf->rf_flags |= LAGG_F_HASHL2;
1363 if (sc->sc_flags & MBUF_HASHFLAG_L3)
1364 rf->rf_flags |= LAGG_F_HASHL3;
1365 if (sc->sc_flags & MBUF_HASHFLAG_L4)
1366 rf->rf_flags |= LAGG_F_HASHL4;
1370 error = priv_check(td, PRIV_NET_LAGG);
1373 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1379 if (rf->rf_flags & LAGG_F_HASHL2)
1380 sc->sc_flags |= MBUF_HASHFLAG_L2;
1381 if (rf->rf_flags & LAGG_F_HASHL3)
1382 sc->sc_flags |= MBUF_HASHFLAG_L3;
1383 if (rf->rf_flags & LAGG_F_HASHL4)
1384 sc->sc_flags |= MBUF_HASHFLAG_L4;
1388 if (rp->rp_portname[0] == '\0' ||
1389 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1395 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1396 lp->lp_softc != sc) {
1403 lagg_port2req(lp, rp);
1408 error = priv_check(td, PRIV_NET_LAGG);
1411 if (rp->rp_portname[0] == '\0' ||
1412 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1418 * A laggport interface should not have inet6 address
1419 * because two interfaces with a valid link-local
1420 * scope zone must not be merged in any form. This
1421 * restriction is needed to prevent violation of
1422 * link-local scope zone. Attempts to add a laggport
1423 * interface which has inet6 addresses triggers
1424 * removal of all inet6 addresses on the member
1427 if (in6ifa_llaonifp(tpif)) {
1429 if_printf(sc->sc_ifp,
1430 "IPv6 addresses on %s have been removed "
1431 "before adding it as a member to prevent "
1432 "IPv6 address scope violation.\n",
1437 error = lagg_port_create(sc, tpif);
1440 VLAN_CAPABILITIES(ifp);
1442 case SIOCSLAGGDELPORT:
1443 error = priv_check(td, PRIV_NET_LAGG);
1446 if (rp->rp_portname[0] == '\0' ||
1447 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1453 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1454 lp->lp_softc != sc) {
1461 error = lagg_port_destroy(lp, 1);
1464 VLAN_CAPABILITIES(ifp);
1467 /* Set flags on ports too */
1469 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1470 lagg_setflags(lp, 1);
1473 if (!(ifp->if_flags & IFF_UP) &&
1474 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1476 * If interface is marked down and it is running,
1477 * then stop and disable it.
1481 } else if ((ifp->if_flags & IFF_UP) &&
1482 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1484 * If interface is marked up and it is stopped, then
1488 (*ifp->if_init)(sc);
1495 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1504 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1509 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1510 if (lp->lp_ioctl != NULL)
1511 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1513 lagg_capabilities(sc);
1515 VLAN_CAPABILITIES(ifp);
1521 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1522 if (lp->lp_ioctl != NULL)
1523 error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1528 "failed to change MTU to %d on port %s, "
1529 "reverting all ports to original MTU (%d)\n",
1530 ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
1535 ifp->if_mtu = ifr->ifr_mtu;
1537 /* set every port back to the original MTU */
1538 ifr->ifr_mtu = ifp->if_mtu;
1539 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1540 if (lp->lp_ioctl != NULL)
1541 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1548 error = ether_ioctl(ifp, cmd, data);
1554 #if defined(KERN_TLS) || defined(RATELIMIT)
1555 static inline struct lagg_snd_tag *
1556 mst_to_lst(struct m_snd_tag *mst)
1559 return (__containerof(mst, struct lagg_snd_tag, com));
1563 * Look up the port used by a specific flow. This only works for lagg
1564 * protocols with deterministic port mappings (e.g. not roundrobin).
1565 * In addition protocols which use a hash to map flows to ports must
1566 * be configured to use the mbuf flowid rather than hashing packet
1569 static struct lagg_port *
1570 lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype)
1572 struct lagg_softc *sc;
1573 struct lagg_port *lp;
1579 switch (sc->sc_proto) {
1580 case LAGG_PROTO_FAILOVER:
1581 return (lagg_link_active(sc, sc->sc_primary));
1582 case LAGG_PROTO_LOADBALANCE:
1583 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1584 flowtype == M_HASHTYPE_NONE)
1586 p = flowid >> sc->flowid_shift;
1588 lb = (struct lagg_lb *)sc->sc_psc;
1589 lp = lb->lb_ports[p];
1590 return (lagg_link_active(sc, lp));
1591 case LAGG_PROTO_LACP:
1592 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1593 flowtype == M_HASHTYPE_NONE)
1595 return (lacp_select_tx_port_by_hash(sc, flowid));
1602 lagg_snd_tag_alloc(struct ifnet *ifp,
1603 union if_snd_tag_alloc_params *params,
1604 struct m_snd_tag **ppmt)
1606 struct lagg_snd_tag *lst;
1607 struct lagg_softc *sc;
1608 struct lagg_port *lp;
1609 struct ifnet *lp_ifp;
1615 lp = lookup_snd_tag_port(ifp, params->hdr.flowid, params->hdr.flowtype);
1618 return (EOPNOTSUPP);
1620 if (lp->lp_ifp == NULL || lp->lp_ifp->if_snd_tag_alloc == NULL) {
1622 return (EOPNOTSUPP);
1624 lp_ifp = lp->lp_ifp;
1628 lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
1634 error = lp_ifp->if_snd_tag_alloc(lp_ifp, params, &lst->tag);
1641 m_snd_tag_init(&lst->com, ifp);
1648 lagg_snd_tag_modify(struct m_snd_tag *mst,
1649 union if_snd_tag_modify_params *params)
1651 struct lagg_snd_tag *lst;
1653 lst = mst_to_lst(mst);
1654 return (lst->tag->ifp->if_snd_tag_modify(lst->tag, params));
1658 lagg_snd_tag_query(struct m_snd_tag *mst,
1659 union if_snd_tag_query_params *params)
1661 struct lagg_snd_tag *lst;
1663 lst = mst_to_lst(mst);
1664 return (lst->tag->ifp->if_snd_tag_query(lst->tag, params));
1668 lagg_snd_tag_free(struct m_snd_tag *mst)
1670 struct lagg_snd_tag *lst;
1672 lst = mst_to_lst(mst);
1673 m_snd_tag_rele(lst->tag);
1678 lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
1681 * For lagg, we have an indirect
1682 * interface. The caller needs to
1683 * get a ratelimit tag on the actual
1684 * interface the flow will go on.
1686 q->rate_table = NULL;
1687 q->flags = RT_IS_INDIRECT;
1689 q->number_of_rates = 0;
1694 lagg_setmulti(struct lagg_port *lp)
1696 struct lagg_softc *sc = lp->lp_softc;
1697 struct ifnet *ifp = lp->lp_ifp;
1698 struct ifnet *scifp = sc->sc_ifp;
1700 struct ifmultiaddr *ifma;
1703 IF_ADDR_WLOCK(scifp);
1704 CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1705 if (ifma->ifma_addr->sa_family != AF_LINK)
1707 mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
1709 IF_ADDR_WUNLOCK(scifp);
1712 bcopy(ifma->ifma_addr, &mc->mc_addr,
1713 ifma->ifma_addr->sa_len);
1714 mc->mc_addr.sdl_index = ifp->if_index;
1716 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1718 IF_ADDR_WUNLOCK(scifp);
1719 SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
1720 error = if_addmulti(ifp,
1721 (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
1729 lagg_clrmulti(struct lagg_port *lp)
1733 LAGG_XLOCK_ASSERT(lp->lp_softc);
1734 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1735 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1736 if (mc->mc_ifma && lp->lp_detaching == 0)
1737 if_delmulti_ifma(mc->mc_ifma);
1744 lagg_setcaps(struct lagg_port *lp, int cap)
1748 if (lp->lp_ifp->if_capenable == cap)
1750 if (lp->lp_ioctl == NULL)
1752 ifr.ifr_reqcap = cap;
1753 return ((*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr));
1756 /* Handle a ref counted flag that should be set on the lagg port as well */
1758 lagg_setflag(struct lagg_port *lp, int flag, int status,
1759 int (*func)(struct ifnet *, int))
1761 struct lagg_softc *sc = lp->lp_softc;
1762 struct ifnet *scifp = sc->sc_ifp;
1763 struct ifnet *ifp = lp->lp_ifp;
1766 LAGG_XLOCK_ASSERT(sc);
1768 status = status ? (scifp->if_flags & flag) : 0;
1769 /* Now "status" contains the flag value or 0 */
1772 * See if recorded ports status is different from what
1773 * we want it to be. If it is, flip it. We record ports
1774 * status in lp_ifflags so that we won't clear ports flag
1775 * we haven't set. In fact, we don't clear or set ports
1776 * flags directly, but get or release references to them.
1777 * That's why we can be sure that recorded flags still are
1778 * in accord with actual ports flags.
1780 if (status != (lp->lp_ifflags & flag)) {
1781 error = (*func)(ifp, status);
1784 lp->lp_ifflags &= ~flag;
1785 lp->lp_ifflags |= status;
1791 * Handle IFF_* flags that require certain changes on the lagg port
1792 * if "status" is true, update ports flags respective to the lagg
1793 * if "status" is false, forcedly clear the flags set on port.
1796 lagg_setflags(struct lagg_port *lp, int status)
1800 for (i = 0; lagg_pflags[i].flag; i++) {
1801 error = lagg_setflag(lp, lagg_pflags[i].flag,
1802 status, lagg_pflags[i].func);
1810 lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1812 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1815 #if defined(KERN_TLS) || defined(RATELIMIT)
1816 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
1817 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
1820 /* We need a Tx algorithm and at least one port */
1821 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1824 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1828 ETHER_BPF_MTAP(ifp, m);
1830 error = lagg_proto_start(sc, m);
1834 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1840 * The ifp->if_qflush entry point for lagg(4) is no-op.
1843 lagg_qflush(struct ifnet *ifp __unused)
1847 static struct mbuf *
1848 lagg_input(struct ifnet *ifp, struct mbuf *m)
1850 struct lagg_port *lp = ifp->if_lagg;
1851 struct lagg_softc *sc = lp->lp_softc;
1852 struct ifnet *scifp = sc->sc_ifp;
1855 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1856 lp->lp_detaching != 0 ||
1857 sc->sc_proto == LAGG_PROTO_NONE) {
1863 ETHER_BPF_MTAP(scifp, m);
1865 m = lagg_proto_input(sc, lp, m);
1866 if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
1876 lagg_media_change(struct ifnet *ifp)
1878 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1880 if (sc->sc_ifflags & IFF_DEBUG)
1881 printf("%s\n", __func__);
1888 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1890 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1891 struct lagg_port *lp;
1893 imr->ifm_status = IFM_AVALID;
1894 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1897 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1898 if (LAGG_PORTACTIVE(lp))
1899 imr->ifm_status |= IFM_ACTIVE;
1905 lagg_linkstate(struct lagg_softc *sc)
1907 struct lagg_port *lp;
1908 int new_link = LINK_STATE_DOWN;
1911 LAGG_XLOCK_ASSERT(sc);
1913 /* LACP handles link state itself */
1914 if (sc->sc_proto == LAGG_PROTO_LACP)
1917 /* Our link is considered up if at least one of our ports is active */
1919 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1920 if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
1921 new_link = LINK_STATE_UP;
1926 if_link_state_change(sc->sc_ifp, new_link);
1928 /* Update if_baudrate to reflect the max possible speed */
1929 switch (sc->sc_proto) {
1930 case LAGG_PROTO_FAILOVER:
1931 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1932 sc->sc_primary->lp_ifp->if_baudrate : 0;
1934 case LAGG_PROTO_ROUNDROBIN:
1935 case LAGG_PROTO_LOADBALANCE:
1936 case LAGG_PROTO_BROADCAST:
1939 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1940 speed += lp->lp_ifp->if_baudrate;
1942 sc->sc_ifp->if_baudrate = speed;
1944 case LAGG_PROTO_LACP:
1945 /* LACP updates if_baudrate itself */
1951 lagg_port_state(struct ifnet *ifp, int state)
1953 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1954 struct lagg_softc *sc = NULL;
1963 lagg_proto_linkstate(sc, lp);
1968 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1970 struct lagg_port *lp_next, *rval = NULL;
1973 * Search a port which reports an active link state.
1978 * This is called with either LAGG_RLOCK() held or
1979 * LAGG_XLOCK(sc) held.
1981 if (!in_epoch(net_epoch_preempt))
1982 LAGG_XLOCK_ASSERT(sc);
1987 if (LAGG_PORTACTIVE(lp)) {
1991 if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
1992 LAGG_PORTACTIVE(lp_next)) {
1998 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1999 if (LAGG_PORTACTIVE(lp_next)) {
2008 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
2011 #if defined(KERN_TLS) || defined(RATELIMIT)
2012 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2013 struct lagg_snd_tag *lst;
2014 struct m_snd_tag *mst;
2016 mst = m->m_pkthdr.snd_tag;
2017 lst = mst_to_lst(mst);
2018 if (lst->tag->ifp != ifp) {
2022 m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
2023 m_snd_tag_rele(mst);
2026 return (ifp->if_transmit)(ifp, m);
2030 * Simple round robin aggregation
2033 lagg_rr_attach(struct lagg_softc *sc)
2036 sc->sc_bkt_count = sc->sc_bkt;
2040 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
2042 struct lagg_port *lp;
2045 if (sc->sc_bkt_count == 0 && sc->sc_bkt > 0)
2046 sc->sc_bkt_count = sc->sc_bkt;
2048 if (sc->sc_bkt > 0) {
2049 atomic_subtract_int(&sc->sc_bkt_count, 1);
2050 if (atomic_cmpset_int(&sc->sc_bkt_count, 0, sc->sc_bkt))
2051 p = atomic_fetchadd_32(&sc->sc_seq, 1);
2055 p = atomic_fetchadd_32(&sc->sc_seq, 1);
2058 lp = CK_SLIST_FIRST(&sc->sc_ports);
2061 lp = CK_SLIST_NEXT(lp, lp_entries);
2064 * Check the port's link state. This will return the next active
2065 * port if the link is down or the port is NULL.
2067 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2073 return (lagg_enqueue(lp->lp_ifp, m));
2076 static struct mbuf *
2077 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2079 struct ifnet *ifp = sc->sc_ifp;
2081 /* Just pass in the packet to our lagg device */
2082 m->m_pkthdr.rcvif = ifp;
2091 lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
2093 int active_ports = 0;
2096 struct lagg_port *lp, *last = NULL;
2099 LAGG_RLOCK_ASSERT();
2100 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2101 if (!LAGG_PORTACTIVE(lp))
2107 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
2114 ret = lagg_enqueue(last->lp_ifp, m0);
2125 if ((last = lagg_link_active(sc, last)) == NULL) {
2130 ret = lagg_enqueue(last->lp_ifp, m);
2141 lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2143 struct ifnet *ifp = sc->sc_ifp;
2145 /* Just pass in the packet to our lagg device */
2146 m->m_pkthdr.rcvif = ifp;
2154 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
2156 struct lagg_port *lp;
2158 /* Use the master port if active or the next available port */
2159 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
2165 return (lagg_enqueue(lp->lp_ifp, m));
2168 static struct mbuf *
2169 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2171 struct ifnet *ifp = sc->sc_ifp;
2172 struct lagg_port *tmp_tp;
2174 if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
2175 m->m_pkthdr.rcvif = ifp;
2179 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
2180 tmp_tp = lagg_link_active(sc, sc->sc_primary);
2182 * If tmp_tp is null, we've received a packet when all
2183 * our links are down. Weird, but process it anyways.
2185 if ((tmp_tp == NULL || tmp_tp == lp)) {
2186 m->m_pkthdr.rcvif = ifp;
2199 lagg_lb_attach(struct lagg_softc *sc)
2201 struct lagg_port *lp;
2204 LAGG_XLOCK_ASSERT(sc);
2205 lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
2206 lb->lb_key = m_ether_tcpip_hash_init();
2209 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2210 lagg_lb_port_create(lp);
2214 lagg_lb_detach(struct lagg_softc *sc)
2218 lb = (struct lagg_lb *)sc->sc_psc;
2224 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
2226 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2227 struct lagg_port *lp_next;
2231 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
2232 LAGG_XLOCK_ASSERT(sc);
2233 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2236 if (i >= LAGG_MAX_PORTS) {
2240 if (sc->sc_ifflags & IFF_DEBUG)
2241 printf("%s: port %s at index %d\n",
2242 sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
2243 lb->lb_ports[i++] = lp_next;
2250 lagg_lb_port_create(struct lagg_port *lp)
2252 struct lagg_softc *sc = lp->lp_softc;
2253 return (lagg_lb_porttable(sc, NULL));
2257 lagg_lb_port_destroy(struct lagg_port *lp)
2259 struct lagg_softc *sc = lp->lp_softc;
2260 lagg_lb_porttable(sc, lp);
2264 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
2266 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2267 struct lagg_port *lp = NULL;
2270 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
2271 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2272 p = m->m_pkthdr.flowid >> sc->flowid_shift;
2274 p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
2276 lp = lb->lb_ports[p];
2279 * Check the port's link state. This will return the next active
2280 * port if the link is down or the port is NULL.
2282 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2288 return (lagg_enqueue(lp->lp_ifp, m));
2291 static struct mbuf *
2292 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2294 struct ifnet *ifp = sc->sc_ifp;
2296 /* Just pass in the packet to our lagg device */
2297 m->m_pkthdr.rcvif = ifp;
2306 lagg_lacp_attach(struct lagg_softc *sc)
2308 struct lagg_port *lp;
2311 LAGG_XLOCK_ASSERT(sc);
2312 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2313 lacp_port_create(lp);
2317 lagg_lacp_detach(struct lagg_softc *sc)
2319 struct lagg_port *lp;
2322 LAGG_XLOCK_ASSERT(sc);
2323 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2324 lacp_port_destroy(lp);
2332 lagg_lacp_lladdr(struct lagg_softc *sc)
2334 struct lagg_port *lp;
2336 LAGG_SXLOCK_ASSERT(sc);
2338 /* purge all the lacp ports */
2339 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2340 lacp_port_destroy(lp);
2342 /* add them back in */
2343 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2344 lacp_port_create(lp);
2348 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
2350 struct lagg_port *lp;
2352 lp = lacp_select_tx_port(sc, m);
2359 return (lagg_enqueue(lp->lp_ifp, m));
2362 static struct mbuf *
2363 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2365 struct ifnet *ifp = sc->sc_ifp;
2366 struct ether_header *eh;
2369 eh = mtod(m, struct ether_header *);
2370 etype = ntohs(eh->ether_type);
2372 /* Tap off LACP control messages */
2373 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
2374 m = lacp_input(lp, m);
2380 * If the port is not collecting or not in the active aggregator then
2383 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
2388 m->m_pkthdr.rcvif = ifp;