1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
25 #include "opt_inet6.h"
26 #include "opt_kern_tls.h"
27 #include "opt_ratelimit.h"
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
33 #include <sys/queue.h>
34 #include <sys/socket.h>
35 #include <sys/sockio.h>
36 #include <sys/sysctl.h>
37 #include <sys/module.h>
39 #include <sys/systm.h>
42 #include <sys/rmlock.h>
44 #include <sys/taskqueue.h>
45 #include <sys/eventhandler.h>
47 #include <net/ethernet.h>
49 #include <net/if_clone.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_var.h>
56 #include <net/route.h>
58 #include <net/infiniband.h>
60 #if defined(INET) || defined(INET6)
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/if_ether.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/in6_var.h>
72 #include <netinet6/in6_ifattach.h>
75 #include <net/if_vlan_var.h>
76 #include <net/if_lagg.h>
77 #include <net/ieee8023ad_lacp.h>
81 * XXX: declare here to avoid to include many inet6 related files..
82 * should be more generalized?
84 extern void nd6_setmtu(struct ifnet *);
87 #define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
88 #define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
89 #define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
90 #define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
91 #define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
92 #define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
94 /* Special flags we should propagate to the lagg ports. */
97 int (*func)(struct ifnet *, int);
99 {IFF_PROMISC, ifpromisc},
100 {IFF_ALLMULTI, if_allmulti},
104 struct lagg_snd_tag {
105 struct m_snd_tag com;
106 struct m_snd_tag *tag;
109 VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
110 #define V_lagg_list VNET(lagg_list)
111 VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
112 #define V_lagg_list_mtx VNET(lagg_list_mtx)
113 #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
114 "if_lagg list", NULL, MTX_DEF)
115 #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
116 #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
117 #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
118 eventhandler_tag lagg_detach_cookie = NULL;
120 static int lagg_clone_create(struct if_clone *, int, caddr_t);
121 static void lagg_clone_destroy(struct ifnet *);
122 VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
123 #define V_lagg_cloner VNET(lagg_cloner)
124 static const char laggname[] = "lagg";
125 static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
127 static void lagg_capabilities(struct lagg_softc *);
128 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
129 static int lagg_port_destroy(struct lagg_port *, int);
130 static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *);
131 static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *);
132 static void lagg_linkstate(struct lagg_softc *);
133 static void lagg_port_state(struct ifnet *, int);
134 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
135 static int lagg_port_output(struct ifnet *, struct mbuf *,
136 const struct sockaddr *, struct route *);
137 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
138 #ifdef LAGG_PORT_STACKING
139 static int lagg_port_checkstacking(struct lagg_softc *);
141 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
142 static void lagg_init(void *);
143 static void lagg_stop(struct lagg_softc *);
144 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
145 #if defined(KERN_TLS) || defined(RATELIMIT)
146 static int lagg_snd_tag_alloc(struct ifnet *,
147 union if_snd_tag_alloc_params *,
148 struct m_snd_tag **);
149 static int lagg_snd_tag_modify(struct m_snd_tag *,
150 union if_snd_tag_modify_params *);
151 static int lagg_snd_tag_query(struct m_snd_tag *,
152 union if_snd_tag_query_params *);
153 static void lagg_snd_tag_free(struct m_snd_tag *);
154 static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *);
155 static void lagg_ratelimit_query(struct ifnet *,
156 struct if_ratelimit_query_results *);
158 static int lagg_setmulti(struct lagg_port *);
159 static int lagg_clrmulti(struct lagg_port *);
160 static int lagg_setcaps(struct lagg_port *, int cap);
161 static int lagg_setflag(struct lagg_port *, int, int,
162 int (*func)(struct ifnet *, int));
163 static int lagg_setflags(struct lagg_port *, int status);
164 static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
165 static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *);
166 static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *);
167 static void lagg_qflush(struct ifnet *);
168 static int lagg_media_change(struct ifnet *);
169 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
170 static struct lagg_port *lagg_link_active(struct lagg_softc *,
173 /* Simple round robin */
174 static void lagg_rr_attach(struct lagg_softc *);
175 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
176 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
179 /* Active failover */
180 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
181 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
185 static void lagg_lb_attach(struct lagg_softc *);
186 static void lagg_lb_detach(struct lagg_softc *);
187 static int lagg_lb_port_create(struct lagg_port *);
188 static void lagg_lb_port_destroy(struct lagg_port *);
189 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
190 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
192 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
195 static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
196 static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
200 static void lagg_lacp_attach(struct lagg_softc *);
201 static void lagg_lacp_detach(struct lagg_softc *);
202 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
203 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
205 static void lagg_lacp_lladdr(struct lagg_softc *);
207 /* lagg protocol table */
208 static const struct lagg_proto {
210 void (*pr_attach)(struct lagg_softc *);
211 void (*pr_detach)(struct lagg_softc *);
212 int (*pr_start)(struct lagg_softc *, struct mbuf *);
213 struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
215 int (*pr_addport)(struct lagg_port *);
216 void (*pr_delport)(struct lagg_port *);
217 void (*pr_linkstate)(struct lagg_port *);
218 void (*pr_init)(struct lagg_softc *);
219 void (*pr_stop)(struct lagg_softc *);
220 void (*pr_lladdr)(struct lagg_softc *);
221 void (*pr_request)(struct lagg_softc *, void *);
222 void (*pr_portreq)(struct lagg_port *, void *);
225 .pr_num = LAGG_PROTO_NONE
228 .pr_num = LAGG_PROTO_ROUNDROBIN,
229 .pr_attach = lagg_rr_attach,
230 .pr_start = lagg_rr_start,
231 .pr_input = lagg_rr_input,
234 .pr_num = LAGG_PROTO_FAILOVER,
235 .pr_start = lagg_fail_start,
236 .pr_input = lagg_fail_input,
239 .pr_num = LAGG_PROTO_LOADBALANCE,
240 .pr_attach = lagg_lb_attach,
241 .pr_detach = lagg_lb_detach,
242 .pr_start = lagg_lb_start,
243 .pr_input = lagg_lb_input,
244 .pr_addport = lagg_lb_port_create,
245 .pr_delport = lagg_lb_port_destroy,
248 .pr_num = LAGG_PROTO_LACP,
249 .pr_attach = lagg_lacp_attach,
250 .pr_detach = lagg_lacp_detach,
251 .pr_start = lagg_lacp_start,
252 .pr_input = lagg_lacp_input,
253 .pr_addport = lacp_port_create,
254 .pr_delport = lacp_port_destroy,
255 .pr_linkstate = lacp_linkstate,
256 .pr_init = lacp_init,
257 .pr_stop = lacp_stop,
258 .pr_lladdr = lagg_lacp_lladdr,
259 .pr_request = lacp_req,
260 .pr_portreq = lacp_portreq,
263 .pr_num = LAGG_PROTO_BROADCAST,
264 .pr_start = lagg_bcast_start,
265 .pr_input = lagg_bcast_input,
269 SYSCTL_DECL(_net_link);
270 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
273 /* Allow input on any failover links */
274 VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
275 #define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
276 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
277 &VNET_NAME(lagg_failover_rx_all), 0,
278 "Accept input from any interface in a failover lagg");
280 /* Default value for using flowid */
281 VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
282 #define V_def_use_flowid VNET(def_use_flowid)
283 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
284 &VNET_NAME(def_use_flowid), 0,
285 "Default setting for using flow id for load sharing");
287 /* Default value for using numa */
288 VNET_DEFINE_STATIC(int, def_use_numa) = 1;
289 #define V_def_use_numa VNET(def_use_numa)
290 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa, CTLFLAG_RWTUN,
291 &VNET_NAME(def_use_numa), 0,
292 "Use numa to steer flows");
294 /* Default value for flowid shift */
295 VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
296 #define V_def_flowid_shift VNET(def_flowid_shift)
297 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
298 &VNET_NAME(def_flowid_shift), 0,
299 "Default setting for flowid shift for load sharing");
302 vnet_lagg_init(const void *unused __unused)
305 LAGG_LIST_LOCK_INIT();
306 SLIST_INIT(&V_lagg_list);
307 V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
308 lagg_clone_destroy, 0);
310 VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
311 vnet_lagg_init, NULL);
314 vnet_lagg_uninit(const void *unused __unused)
317 if_clone_detach(V_lagg_cloner);
318 LAGG_LIST_LOCK_DESTROY();
320 VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
321 vnet_lagg_uninit, NULL);
324 lagg_modevent(module_t mod, int type, void *data)
329 lagg_input_ethernet_p = lagg_input_ethernet;
330 lagg_input_infiniband_p = lagg_input_infiniband;
331 lagg_linkstate_p = lagg_port_state;
332 lagg_detach_cookie = EVENTHANDLER_REGISTER(
333 ifnet_departure_event, lagg_port_ifdetach, NULL,
334 EVENTHANDLER_PRI_ANY);
337 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
339 lagg_input_ethernet_p = NULL;
340 lagg_input_infiniband_p = NULL;
341 lagg_linkstate_p = NULL;
349 static moduledata_t lagg_mod = {
355 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
356 MODULE_VERSION(if_lagg, 1);
357 MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1);
360 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
363 LAGG_XLOCK_ASSERT(sc);
364 KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
367 if (sc->sc_ifflags & IFF_DEBUG)
368 if_printf(sc->sc_ifp, "using proto %u\n", pr);
370 if (lagg_protos[pr].pr_attach != NULL)
371 lagg_protos[pr].pr_attach(sc);
376 lagg_proto_detach(struct lagg_softc *sc)
380 LAGG_XLOCK_ASSERT(sc);
382 sc->sc_proto = LAGG_PROTO_NONE;
384 if (lagg_protos[pr].pr_detach != NULL)
385 lagg_protos[pr].pr_detach(sc);
389 lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
392 return (lagg_protos[sc->sc_proto].pr_start(sc, m));
396 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
399 return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
403 lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
406 if (lagg_protos[sc->sc_proto].pr_addport == NULL)
409 return (lagg_protos[sc->sc_proto].pr_addport(lp));
413 lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
416 if (lagg_protos[sc->sc_proto].pr_delport != NULL)
417 lagg_protos[sc->sc_proto].pr_delport(lp);
421 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
424 if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
425 lagg_protos[sc->sc_proto].pr_linkstate(lp);
429 lagg_proto_init(struct lagg_softc *sc)
432 if (lagg_protos[sc->sc_proto].pr_init != NULL)
433 lagg_protos[sc->sc_proto].pr_init(sc);
437 lagg_proto_stop(struct lagg_softc *sc)
440 if (lagg_protos[sc->sc_proto].pr_stop != NULL)
441 lagg_protos[sc->sc_proto].pr_stop(sc);
445 lagg_proto_lladdr(struct lagg_softc *sc)
448 if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
449 lagg_protos[sc->sc_proto].pr_lladdr(sc);
453 lagg_proto_request(struct lagg_softc *sc, void *v)
456 if (lagg_protos[sc->sc_proto].pr_request != NULL)
457 lagg_protos[sc->sc_proto].pr_request(sc, v);
461 lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
464 if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
465 lagg_protos[sc->sc_proto].pr_portreq(lp, v);
469 * This routine is run via an vlan
473 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
475 struct lagg_softc *sc = ifp->if_softc;
476 struct lagg_port *lp;
478 if (ifp->if_softc != arg) /* Not our event */
482 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
483 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
488 * This routine is run via an vlan
492 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
494 struct lagg_softc *sc = ifp->if_softc;
495 struct lagg_port *lp;
497 if (ifp->if_softc != arg) /* Not our event */
501 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
502 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
507 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
509 struct iflaggparam iflp;
510 struct lagg_softc *sc;
514 static const uint8_t eaddr[LAGG_ADDR_LEN];
516 if (params != NULL) {
517 error = copyin(params, &iflp, sizeof(iflp));
521 switch (iflp.lagg_type) {
522 case LAGG_TYPE_ETHERNET:
525 case LAGG_TYPE_INFINIBAND:
526 if_type = IFT_INFINIBAND;
535 sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO);
536 ifp = sc->sc_ifp = if_alloc(if_type);
543 mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF);
544 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
547 if (V_def_use_flowid)
548 sc->sc_opts |= LAGG_OPT_USE_FLOWID;
550 sc->sc_opts |= LAGG_OPT_USE_NUMA;
551 sc->flowid_shift = V_def_flowid_shift;
553 /* Hash all layers by default */
554 sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
556 lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
558 CK_SLIST_INIT(&sc->sc_ports);
562 /* Initialise pseudo media types */
563 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
565 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
566 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
568 if_initname(ifp, laggname, unit);
569 ifp->if_transmit = lagg_transmit_ethernet;
572 if_initname(ifp, laggname, unit);
573 ifp->if_transmit = lagg_transmit_infiniband;
579 ifp->if_qflush = lagg_qflush;
580 ifp->if_init = lagg_init;
581 ifp->if_ioctl = lagg_ioctl;
582 ifp->if_get_counter = lagg_get_counter;
583 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
584 #if defined(KERN_TLS) || defined(RATELIMIT)
585 ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
586 ifp->if_snd_tag_modify = lagg_snd_tag_modify;
587 ifp->if_snd_tag_query = lagg_snd_tag_query;
588 ifp->if_snd_tag_free = lagg_snd_tag_free;
589 ifp->if_next_snd_tag = lagg_next_snd_tag;
590 ifp->if_ratelimit_query = lagg_ratelimit_query;
592 ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
595 * Attach as an ordinary ethernet device, children will be attached
596 * as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG.
600 ether_ifattach(ifp, eaddr);
603 infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr);
609 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
610 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
611 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
612 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
614 /* Insert into the global list of laggs */
616 SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
624 lagg_clone_destroy(struct ifnet *ifp)
626 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
627 struct lagg_port *lp;
630 sc->sc_destroying = 1;
632 ifp->if_flags &= ~IFF_UP;
634 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
635 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
637 /* Shutdown and remove lagg ports */
638 while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
639 lagg_port_destroy(lp, 1);
641 /* Unhook the aggregation protocol */
642 lagg_proto_detach(sc);
645 switch (ifp->if_type) {
647 ifmedia_removeall(&sc->sc_media);
651 infiniband_ifdetach(ifp);
659 SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
662 mtx_destroy(&sc->sc_mtx);
668 lagg_capabilities(struct lagg_softc *sc)
670 struct lagg_port *lp;
673 struct ifnet_hw_tsomax hw_tsomax;
675 LAGG_XLOCK_ASSERT(sc);
677 /* Get common enabled capabilities for the lagg ports */
679 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
680 ena &= lp->lp_ifp->if_capenable;
681 ena = (ena == ~0 ? 0 : ena);
684 * Apply common enabled capabilities back to the lagg ports.
685 * May require several iterations if they are dependent.
689 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
690 lagg_setcaps(lp, ena);
691 ena &= lp->lp_ifp->if_capenable;
693 } while (pena != ena);
695 /* Get other capabilities from the lagg ports */
698 memset(&hw_tsomax, 0, sizeof(hw_tsomax));
699 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
700 cap &= lp->lp_ifp->if_capabilities;
701 hwa &= lp->lp_ifp->if_hwassist;
702 if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
704 cap = (cap == ~0 ? 0 : cap);
705 hwa = (hwa == ~(uint64_t)0 ? 0 : hwa);
707 if (sc->sc_ifp->if_capabilities != cap ||
708 sc->sc_ifp->if_capenable != ena ||
709 sc->sc_ifp->if_hwassist != hwa ||
710 if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
711 sc->sc_ifp->if_capabilities = cap;
712 sc->sc_ifp->if_capenable = ena;
713 sc->sc_ifp->if_hwassist = hwa;
714 getmicrotime(&sc->sc_ifp->if_lastchange);
716 if (sc->sc_ifflags & IFF_DEBUG)
717 if_printf(sc->sc_ifp,
718 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
723 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
725 struct lagg_softc *sc_ptr;
726 struct lagg_port *lp, *tlp;
728 int error, i, oldmtu;
732 LAGG_XLOCK_ASSERT(sc);
734 if (sc->sc_ifp == ifp) {
735 if_printf(sc->sc_ifp,
736 "cannot add a lagg to itself as a port\n");
740 if (sc->sc_destroying == 1)
743 /* Limit the maximal number of lagg ports */
744 if (sc->sc_count >= LAGG_MAX_PORTS)
747 /* Check if port has already been associated to a lagg */
748 if (ifp->if_lagg != NULL) {
749 /* Port is already in the current lagg? */
750 lp = (struct lagg_port *)ifp->if_lagg;
751 if (lp->lp_softc == sc)
756 switch (sc->sc_ifp->if_type) {
758 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
759 if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
760 return (EPROTONOSUPPORT);
761 if_type = IFT_IEEE8023ADLAG;
764 /* XXX Disallow non-infiniband interfaces */
765 if (ifp->if_type != IFT_INFINIBAND)
766 return (EPROTONOSUPPORT);
767 if_type = IFT_INFINIBANDLAG;
773 /* Allow the first Ethernet member to define the MTU */
775 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
776 sc->sc_ifp->if_mtu = ifp->if_mtu;
777 } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
778 if (ifp->if_ioctl == NULL) {
779 if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
783 oldmtu = ifp->if_mtu;
784 strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
785 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
786 error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
788 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
792 ifr.ifr_mtu = oldmtu;
795 lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK|M_ZERO);
798 /* Check if port is a stacked lagg */
800 SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
801 if (ifp == sc_ptr->sc_ifp) {
805 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
808 /* XXX disable stacking for the moment, its untested */
809 #ifdef LAGG_PORT_STACKING
810 lp->lp_flags |= LAGG_PORT_STACK;
811 if (lagg_port_checkstacking(sc_ptr) >=
816 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
828 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen);
829 lp->lp_ifcapenable = ifp->if_capenable;
830 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
831 bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
832 lagg_proto_lladdr(sc);
833 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
835 if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
837 lagg_setflags(lp, 1);
839 if (CK_SLIST_EMPTY(&sc->sc_ports))
842 /* Change the interface type */
843 lp->lp_iftype = ifp->if_type;
844 ifp->if_type = if_type;
846 lp->lp_ioctl = ifp->if_ioctl;
847 ifp->if_ioctl = lagg_port_ioctl;
848 lp->lp_output = ifp->if_output;
849 ifp->if_output = lagg_port_output;
851 /* Read port counters */
852 pval = lp->port_counters.val;
853 for (i = 0; i < IFCOUNTERS; i++, pval++)
854 *pval = ifp->if_get_counter(ifp, i);
857 * Insert into the list of ports.
858 * Keep ports sorted by if_index. It is handy, when configuration
859 * is predictable and `ifconfig laggN create ...` command
860 * will lead to the same result each time.
862 CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
863 if (tlp->lp_ifp->if_index < ifp->if_index && (
864 CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
865 ((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
870 CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
872 CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
877 if ((error = lagg_proto_addport(sc, lp)) != 0) {
878 /* Remove the port, without calling pr_delport. */
879 lagg_port_destroy(lp, 0);
881 (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
885 /* Update lagg capabilities */
886 lagg_capabilities(sc);
892 #ifdef LAGG_PORT_STACKING
894 lagg_port_checkstacking(struct lagg_softc *sc)
896 struct lagg_softc *sc_ptr;
897 struct lagg_port *lp;
900 LAGG_SXLOCK_ASSERT(sc);
901 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
902 if (lp->lp_flags & LAGG_PORT_STACK) {
903 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
904 m = MAX(m, lagg_port_checkstacking(sc_ptr));
913 lagg_port_destroy_cb(epoch_context_t ec)
915 struct lagg_port *lp;
918 lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
926 lagg_port_destroy(struct lagg_port *lp, int rundelport)
928 struct lagg_softc *sc = lp->lp_softc;
929 struct lagg_port *lp_ptr, *lp0;
930 struct ifnet *ifp = lp->lp_ifp;
931 uint64_t *pval, vdiff;
934 LAGG_XLOCK_ASSERT(sc);
937 lagg_proto_delport(sc, lp);
939 if (lp->lp_detaching == 0)
942 /* Restore interface */
943 ifp->if_type = lp->lp_iftype;
944 ifp->if_ioctl = lp->lp_ioctl;
945 ifp->if_output = lp->lp_output;
948 /* Update detached port counters */
949 pval = lp->port_counters.val;
950 for (i = 0; i < IFCOUNTERS; i++, pval++) {
951 vdiff = ifp->if_get_counter(ifp, i) - *pval;
952 sc->detached_counters.val[i] += vdiff;
955 /* Finally, remove the port from the lagg */
956 CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
959 /* Update the primary interface */
960 if (lp == sc->sc_primary) {
961 uint8_t lladdr[LAGG_ADDR_LEN];
963 if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
964 bzero(&lladdr, LAGG_ADDR_LEN);
966 bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN);
967 sc->sc_primary = lp0;
968 if (sc->sc_destroying == 0) {
969 bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen);
970 lagg_proto_lladdr(sc);
971 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
974 * Update lladdr for each port (new primary needs update
975 * as well, to switch from old lladdr to its 'real' one).
976 * We can skip this if the lagg is being destroyed.
978 CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
979 if_setlladdr(lp_ptr->lp_ifp, lladdr,
980 lp_ptr->lp_ifp->if_addrlen);
985 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
987 if (lp->lp_detaching == 0) {
988 lagg_setflags(lp, 0);
989 lagg_setcaps(lp, lp->lp_ifcapenable);
990 if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen);
994 * free port and release it's ifnet reference after a grace period has
997 NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
998 /* Update lagg capabilities */
999 lagg_capabilities(sc);
1006 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1008 struct epoch_tracker et;
1009 struct lagg_reqport *rp = (struct lagg_reqport *)data;
1010 struct lagg_softc *sc;
1011 struct lagg_port *lp = NULL;
1014 /* Should be checked by the caller */
1015 switch (ifp->if_type) {
1016 case IFT_IEEE8023ADLAG:
1017 case IFT_INFINIBANDLAG:
1018 if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
1027 if (rp->rp_portname[0] == '\0' ||
1028 ifunit(rp->rp_portname) != ifp) {
1033 NET_EPOCH_ENTER(et);
1034 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
1040 lagg_port2req(lp, rp);
1045 if (lp->lp_ioctl == NULL) {
1049 error = (*lp->lp_ioctl)(ifp, cmd, data);
1053 /* Update lagg interface capabilities */
1055 lagg_capabilities(sc);
1057 VLAN_CAPABILITIES(sc->sc_ifp);
1061 /* Do not allow the MTU to be changed once joined */
1072 if (lp != NULL && lp->lp_ioctl != NULL)
1073 return ((*lp->lp_ioctl)(ifp, cmd, data));
1079 * Requests counter @cnt data.
1081 * Counter value is calculated the following way:
1082 * 1) for each port, sum difference between current and "initial" measurements.
1083 * 2) add lagg logical interface counters.
1084 * 3) add data from detached_counters array.
1086 * We also do the following things on ports attach/detach:
1087 * 1) On port attach we store all counters it has into port_counter array.
1088 * 2) On port detach we add the different between "initial" and
1089 * current counters data to detached_counters array.
1092 lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
1094 struct epoch_tracker et;
1095 struct lagg_softc *sc;
1096 struct lagg_port *lp;
1097 struct ifnet *lpifp;
1098 uint64_t newval, oldval, vsum;
1100 /* Revise this when we've got non-generic counters. */
1101 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
1103 sc = (struct lagg_softc *)ifp->if_softc;
1106 NET_EPOCH_ENTER(et);
1107 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1108 /* Saved attached value */
1109 oldval = lp->port_counters.val[cnt];
1112 newval = lpifp->if_get_counter(lpifp, cnt);
1113 /* Calculate diff and save new */
1114 vsum += newval - oldval;
1119 * Add counter data which might be added by upper
1120 * layer protocols operating on logical interface.
1122 vsum += if_get_counter_default(ifp, cnt);
1125 * Add counter data from detached ports counters
1127 vsum += sc->detached_counters.val[cnt];
1133 * For direct output to child ports.
1136 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
1137 const struct sockaddr *dst, struct route *ro)
1139 struct lagg_port *lp = ifp->if_lagg;
1141 switch (dst->sa_family) {
1142 case pseudo_AF_HDRCMPLT:
1145 return ((*lp->lp_output)(ifp, m, dst, ro));
1148 /* drop any other frames */
1154 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
1156 struct lagg_port *lp;
1157 struct lagg_softc *sc;
1159 if ((lp = ifp->if_lagg) == NULL)
1161 /* If the ifnet is just being renamed, don't do anything. */
1162 if (ifp->if_flags & IFF_RENAMING)
1168 lp->lp_detaching = 1;
1169 lagg_port_destroy(lp, 1);
1171 VLAN_CAPABILITIES(sc->sc_ifp);
1175 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
1177 struct lagg_softc *sc = lp->lp_softc;
1179 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
1180 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
1181 rp->rp_prio = lp->lp_prio;
1182 rp->rp_flags = lp->lp_flags;
1183 lagg_proto_portreq(sc, lp, &rp->rp_psc);
1185 /* Add protocol specific flags */
1186 switch (sc->sc_proto) {
1187 case LAGG_PROTO_FAILOVER:
1188 if (lp == sc->sc_primary)
1189 rp->rp_flags |= LAGG_PORT_MASTER;
1190 if (lp == lagg_link_active(sc, sc->sc_primary))
1191 rp->rp_flags |= LAGG_PORT_ACTIVE;
1194 case LAGG_PROTO_ROUNDROBIN:
1195 case LAGG_PROTO_LOADBALANCE:
1196 case LAGG_PROTO_BROADCAST:
1197 if (LAGG_PORTACTIVE(lp))
1198 rp->rp_flags |= LAGG_PORT_ACTIVE;
1201 case LAGG_PROTO_LACP:
1202 /* LACP has a different definition of active */
1203 if (lacp_isactive(lp))
1204 rp->rp_flags |= LAGG_PORT_ACTIVE;
1205 if (lacp_iscollecting(lp))
1206 rp->rp_flags |= LAGG_PORT_COLLECTING;
1207 if (lacp_isdistributing(lp))
1208 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
1215 lagg_watchdog_infiniband(void *arg)
1217 struct epoch_tracker et;
1218 struct lagg_softc *sc;
1219 struct lagg_port *lp;
1221 struct ifnet *lp_ifp;
1226 * Because infiniband nodes have a fixed MAC address, which is
1227 * generated by the so-called GID, we need to regularly update
1228 * the link level address of the parent lagg<N> device when
1229 * the active port changes. Possibly we could piggy-back on
1230 * link up/down events aswell, but using a timer also provides
1231 * a guarantee against too frequent events. This operation
1232 * does not have to be atomic.
1234 NET_EPOCH_ENTER(et);
1235 lp = lagg_link_active(sc, sc->sc_primary);
1238 lp_ifp = lp->lp_ifp;
1240 if (ifp != NULL && lp_ifp != NULL &&
1241 (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 ||
1242 memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) {
1243 memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen);
1244 memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen);
1246 CURVNET_SET(ifp->if_vnet);
1247 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1253 callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg);
1257 lagg_init(void *xsc)
1259 struct lagg_softc *sc = (struct lagg_softc *)xsc;
1260 struct ifnet *ifp = sc->sc_ifp;
1261 struct lagg_port *lp;
1264 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1269 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1272 * Update the port lladdrs if needed.
1273 * This might be if_setlladdr() notification
1274 * that lladdr has been changed.
1276 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1277 if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
1278 ifp->if_addrlen) != 0)
1279 if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen);
1282 lagg_proto_init(sc);
1284 if (ifp->if_type == IFT_INFINIBAND) {
1285 mtx_lock(&sc->sc_mtx);
1286 lagg_watchdog_infiniband(sc);
1287 mtx_unlock(&sc->sc_mtx);
1294 lagg_stop(struct lagg_softc *sc)
1296 struct ifnet *ifp = sc->sc_ifp;
1298 LAGG_XLOCK_ASSERT(sc);
1300 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1303 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1305 lagg_proto_stop(sc);
1307 mtx_lock(&sc->sc_mtx);
1308 callout_stop(&sc->sc_watchdog);
1309 mtx_unlock(&sc->sc_mtx);
1311 callout_drain(&sc->sc_watchdog);
1315 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1317 struct epoch_tracker et;
1318 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1319 struct lagg_reqall *ra = (struct lagg_reqall *)data;
1320 struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
1321 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
1322 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
1323 struct ifreq *ifr = (struct ifreq *)data;
1324 struct lagg_port *lp;
1326 struct thread *td = curthread;
1328 int count, buflen, len, error = 0, oldmtu;
1330 bzero(&rpbuf, sizeof(rpbuf));
1332 /* XXX: This can race with lagg_clone_destroy. */
1337 buflen = sc->sc_count * sizeof(struct lagg_reqport);
1338 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1339 ra->ra_proto = sc->sc_proto;
1340 lagg_proto_request(sc, &ra->ra_psc);
1343 len = min(ra->ra_size, buflen);
1344 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1345 if (len < sizeof(rpbuf))
1348 lagg_port2req(lp, &rpbuf);
1349 memcpy(buf, &rpbuf, sizeof(rpbuf));
1351 buf += sizeof(rpbuf);
1352 len -= sizeof(rpbuf);
1355 ra->ra_ports = count;
1356 ra->ra_size = count * sizeof(rpbuf);
1357 error = copyout(outbuf, ra->ra_port, ra->ra_size);
1358 free(outbuf, M_TEMP);
1361 error = priv_check(td, PRIV_NET_LAGG);
1364 if (ra->ra_proto >= LAGG_PROTO_MAX) {
1365 error = EPROTONOSUPPORT;
1368 /* Infiniband only supports the failover protocol. */
1369 if (ra->ra_proto != LAGG_PROTO_FAILOVER &&
1370 ifp->if_type == IFT_INFINIBAND) {
1371 error = EPROTONOSUPPORT;
1375 lagg_proto_detach(sc);
1376 lagg_proto_attach(sc, ra->ra_proto);
1381 ro->ro_opts = sc->sc_opts;
1382 if (sc->sc_proto == LAGG_PROTO_LACP) {
1383 struct lacp_softc *lsc;
1385 lsc = (struct lacp_softc *)sc->sc_psc;
1386 if (lsc->lsc_debug.lsc_tx_test != 0)
1387 ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
1388 if (lsc->lsc_debug.lsc_rx_test != 0)
1389 ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
1390 if (lsc->lsc_strict_mode != 0)
1391 ro->ro_opts |= LAGG_OPT_LACP_STRICT;
1392 if (lsc->lsc_fast_timeout != 0)
1393 ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO;
1395 ro->ro_active = sc->sc_active;
1398 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1399 ro->ro_active += LAGG_PORTACTIVE(lp);
1401 ro->ro_bkt = sc->sc_stride;
1402 ro->ro_flapping = sc->sc_flapping;
1403 ro->ro_flowid_shift = sc->flowid_shift;
1407 error = priv_check(td, PRIV_NET_LAGG);
1412 * The stride option was added without defining a corresponding
1413 * LAGG_OPT flag, so handle a non-zero value before checking
1414 * anything else to preserve compatibility.
1417 if (ro->ro_opts == 0 && ro->ro_bkt != 0) {
1418 if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) {
1423 sc->sc_stride = ro->ro_bkt;
1425 if (ro->ro_opts == 0) {
1431 * Set options. LACP options are stored in sc->sc_psc,
1436 switch (ro->ro_opts) {
1437 case LAGG_OPT_USE_FLOWID:
1438 case -LAGG_OPT_USE_FLOWID:
1439 case LAGG_OPT_USE_NUMA:
1440 case -LAGG_OPT_USE_NUMA:
1441 case LAGG_OPT_FLOWIDSHIFT:
1442 case LAGG_OPT_RR_LIMIT:
1446 case LAGG_OPT_LACP_TXTEST:
1447 case -LAGG_OPT_LACP_TXTEST:
1448 case LAGG_OPT_LACP_RXTEST:
1449 case -LAGG_OPT_LACP_RXTEST:
1450 case LAGG_OPT_LACP_STRICT:
1451 case -LAGG_OPT_LACP_STRICT:
1452 case LAGG_OPT_LACP_FAST_TIMO:
1453 case -LAGG_OPT_LACP_FAST_TIMO:
1462 (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
1463 /* Invalid combination of options specified. */
1466 break; /* Return from SIOCSLAGGOPTS. */
1470 * Store new options into sc->sc_opts except for
1471 * FLOWIDSHIFT, RR and LACP options.
1474 if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
1475 sc->flowid_shift = ro->ro_flowid_shift;
1476 else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) {
1477 if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN ||
1483 sc->sc_stride = ro->ro_bkt;
1484 } else if (ro->ro_opts > 0)
1485 sc->sc_opts |= ro->ro_opts;
1487 sc->sc_opts &= ~ro->ro_opts;
1489 struct lacp_softc *lsc;
1490 struct lacp_port *lp;
1492 lsc = (struct lacp_softc *)sc->sc_psc;
1494 switch (ro->ro_opts) {
1495 case LAGG_OPT_LACP_TXTEST:
1496 lsc->lsc_debug.lsc_tx_test = 1;
1498 case -LAGG_OPT_LACP_TXTEST:
1499 lsc->lsc_debug.lsc_tx_test = 0;
1501 case LAGG_OPT_LACP_RXTEST:
1502 lsc->lsc_debug.lsc_rx_test = 1;
1504 case -LAGG_OPT_LACP_RXTEST:
1505 lsc->lsc_debug.lsc_rx_test = 0;
1507 case LAGG_OPT_LACP_STRICT:
1508 lsc->lsc_strict_mode = 1;
1510 case -LAGG_OPT_LACP_STRICT:
1511 lsc->lsc_strict_mode = 0;
1513 case LAGG_OPT_LACP_FAST_TIMO:
1515 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1516 lp->lp_state |= LACP_STATE_TIMEOUT;
1518 lsc->lsc_fast_timeout = 1;
1520 case -LAGG_OPT_LACP_FAST_TIMO:
1522 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1523 lp->lp_state &= ~LACP_STATE_TIMEOUT;
1525 lsc->lsc_fast_timeout = 0;
1531 case SIOCGLAGGFLAGS:
1534 if (sc->sc_flags & MBUF_HASHFLAG_L2)
1535 rf->rf_flags |= LAGG_F_HASHL2;
1536 if (sc->sc_flags & MBUF_HASHFLAG_L3)
1537 rf->rf_flags |= LAGG_F_HASHL3;
1538 if (sc->sc_flags & MBUF_HASHFLAG_L4)
1539 rf->rf_flags |= LAGG_F_HASHL4;
1543 error = priv_check(td, PRIV_NET_LAGG);
1546 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1552 if (rf->rf_flags & LAGG_F_HASHL2)
1553 sc->sc_flags |= MBUF_HASHFLAG_L2;
1554 if (rf->rf_flags & LAGG_F_HASHL3)
1555 sc->sc_flags |= MBUF_HASHFLAG_L3;
1556 if (rf->rf_flags & LAGG_F_HASHL4)
1557 sc->sc_flags |= MBUF_HASHFLAG_L4;
1561 if (rp->rp_portname[0] == '\0' ||
1562 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1567 NET_EPOCH_ENTER(et);
1568 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1569 lp->lp_softc != sc) {
1576 lagg_port2req(lp, rp);
1581 error = priv_check(td, PRIV_NET_LAGG);
1584 if (rp->rp_portname[0] == '\0' ||
1585 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1591 * A laggport interface should not have inet6 address
1592 * because two interfaces with a valid link-local
1593 * scope zone must not be merged in any form. This
1594 * restriction is needed to prevent violation of
1595 * link-local scope zone. Attempts to add a laggport
1596 * interface which has inet6 addresses triggers
1597 * removal of all inet6 addresses on the member
1600 if (in6ifa_llaonifp(tpif)) {
1602 if_printf(sc->sc_ifp,
1603 "IPv6 addresses on %s have been removed "
1604 "before adding it as a member to prevent "
1605 "IPv6 address scope violation.\n",
1609 oldmtu = ifp->if_mtu;
1611 error = lagg_port_create(sc, tpif);
1616 * LAGG MTU may change during addition of the first port.
1617 * If it did, do network layer specific procedure.
1619 if (ifp->if_mtu != oldmtu) {
1626 VLAN_CAPABILITIES(ifp);
1628 case SIOCSLAGGDELPORT:
1629 error = priv_check(td, PRIV_NET_LAGG);
1632 if (rp->rp_portname[0] == '\0' ||
1633 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1639 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1640 lp->lp_softc != sc) {
1647 error = lagg_port_destroy(lp, 1);
1650 VLAN_CAPABILITIES(ifp);
1653 /* Set flags on ports too */
1655 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1656 lagg_setflags(lp, 1);
1659 if (!(ifp->if_flags & IFF_UP) &&
1660 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1662 * If interface is marked down and it is running,
1663 * then stop and disable it.
1667 } else if ((ifp->if_flags & IFF_UP) &&
1668 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1670 * If interface is marked up and it is stopped, then
1674 (*ifp->if_init)(sc);
1681 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1690 if (ifp->if_type == IFT_INFINIBAND)
1693 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1698 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1699 if (lp->lp_ioctl != NULL)
1700 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1702 lagg_capabilities(sc);
1704 VLAN_CAPABILITIES(ifp);
1710 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1711 if (lp->lp_ioctl != NULL)
1712 error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1717 "failed to change MTU to %d on port %s, "
1718 "reverting all ports to original MTU (%d)\n",
1719 ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
1724 ifp->if_mtu = ifr->ifr_mtu;
1726 /* set every port back to the original MTU */
1727 ifr->ifr_mtu = ifp->if_mtu;
1728 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1729 if (lp->lp_ioctl != NULL)
1730 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1737 error = ether_ioctl(ifp, cmd, data);
1743 #if defined(KERN_TLS) || defined(RATELIMIT)
1744 static inline struct lagg_snd_tag *
1745 mst_to_lst(struct m_snd_tag *mst)
1748 return (__containerof(mst, struct lagg_snd_tag, com));
1752 * Look up the port used by a specific flow. This only works for lagg
1753 * protocols with deterministic port mappings (e.g. not roundrobin).
1754 * In addition protocols which use a hash to map flows to ports must
1755 * be configured to use the mbuf flowid rather than hashing packet
1758 static struct lagg_port *
1759 lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype,
1760 uint8_t numa_domain)
1762 struct lagg_softc *sc;
1763 struct lagg_port *lp;
1770 switch (sc->sc_proto) {
1771 case LAGG_PROTO_FAILOVER:
1772 return (lagg_link_active(sc, sc->sc_primary));
1773 case LAGG_PROTO_LOADBALANCE:
1774 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1775 flowtype == M_HASHTYPE_NONE)
1777 p = flowid >> sc->flowid_shift;
1779 lb = (struct lagg_lb *)sc->sc_psc;
1780 lp = lb->lb_ports[p];
1781 return (lagg_link_active(sc, lp));
1782 case LAGG_PROTO_LACP:
1783 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1784 flowtype == M_HASHTYPE_NONE)
1786 hash = flowid >> sc->flowid_shift;
1787 return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err));
1794 lagg_snd_tag_alloc(struct ifnet *ifp,
1795 union if_snd_tag_alloc_params *params,
1796 struct m_snd_tag **ppmt)
1798 struct epoch_tracker et;
1799 struct lagg_snd_tag *lst;
1800 struct lagg_softc *sc;
1801 struct lagg_port *lp;
1802 struct ifnet *lp_ifp;
1807 NET_EPOCH_ENTER(et);
1808 lp = lookup_snd_tag_port(ifp, params->hdr.flowid,
1809 params->hdr.flowtype, params->hdr.numa_domain);
1812 return (EOPNOTSUPP);
1814 if (lp->lp_ifp == NULL) {
1816 return (EOPNOTSUPP);
1818 lp_ifp = lp->lp_ifp;
1822 lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
1828 error = m_snd_tag_alloc(lp_ifp, params, &lst->tag);
1835 m_snd_tag_init(&lst->com, ifp, lst->tag->type);
1841 static struct m_snd_tag *
1842 lagg_next_snd_tag(struct m_snd_tag *mst)
1844 struct lagg_snd_tag *lst;
1846 lst = mst_to_lst(mst);
1851 lagg_snd_tag_modify(struct m_snd_tag *mst,
1852 union if_snd_tag_modify_params *params)
1854 struct lagg_snd_tag *lst;
1856 lst = mst_to_lst(mst);
1857 return (lst->tag->ifp->if_snd_tag_modify(lst->tag, params));
1861 lagg_snd_tag_query(struct m_snd_tag *mst,
1862 union if_snd_tag_query_params *params)
1864 struct lagg_snd_tag *lst;
1866 lst = mst_to_lst(mst);
1867 return (lst->tag->ifp->if_snd_tag_query(lst->tag, params));
1871 lagg_snd_tag_free(struct m_snd_tag *mst)
1873 struct lagg_snd_tag *lst;
1875 lst = mst_to_lst(mst);
1876 m_snd_tag_rele(lst->tag);
1881 lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
1884 * For lagg, we have an indirect
1885 * interface. The caller needs to
1886 * get a ratelimit tag on the actual
1887 * interface the flow will go on.
1889 q->rate_table = NULL;
1890 q->flags = RT_IS_INDIRECT;
1892 q->number_of_rates = 0;
1897 lagg_setmulti(struct lagg_port *lp)
1899 struct lagg_softc *sc = lp->lp_softc;
1900 struct ifnet *ifp = lp->lp_ifp;
1901 struct ifnet *scifp = sc->sc_ifp;
1903 struct ifmultiaddr *ifma;
1906 IF_ADDR_WLOCK(scifp);
1907 CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1908 if (ifma->ifma_addr->sa_family != AF_LINK)
1910 mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
1912 IF_ADDR_WUNLOCK(scifp);
1915 bcopy(ifma->ifma_addr, &mc->mc_addr,
1916 ifma->ifma_addr->sa_len);
1917 mc->mc_addr.sdl_index = ifp->if_index;
1919 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1921 IF_ADDR_WUNLOCK(scifp);
1922 SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
1923 error = if_addmulti(ifp,
1924 (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
1932 lagg_clrmulti(struct lagg_port *lp)
1936 LAGG_XLOCK_ASSERT(lp->lp_softc);
1937 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1938 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1939 if (mc->mc_ifma && lp->lp_detaching == 0)
1940 if_delmulti_ifma(mc->mc_ifma);
1947 lagg_setcaps(struct lagg_port *lp, int cap)
1951 if (lp->lp_ifp->if_capenable == cap)
1953 if (lp->lp_ioctl == NULL)
1955 ifr.ifr_reqcap = cap;
1956 return ((*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr));
1959 /* Handle a ref counted flag that should be set on the lagg port as well */
1961 lagg_setflag(struct lagg_port *lp, int flag, int status,
1962 int (*func)(struct ifnet *, int))
1964 struct lagg_softc *sc = lp->lp_softc;
1965 struct ifnet *scifp = sc->sc_ifp;
1966 struct ifnet *ifp = lp->lp_ifp;
1969 LAGG_XLOCK_ASSERT(sc);
1971 status = status ? (scifp->if_flags & flag) : 0;
1972 /* Now "status" contains the flag value or 0 */
1975 * See if recorded ports status is different from what
1976 * we want it to be. If it is, flip it. We record ports
1977 * status in lp_ifflags so that we won't clear ports flag
1978 * we haven't set. In fact, we don't clear or set ports
1979 * flags directly, but get or release references to them.
1980 * That's why we can be sure that recorded flags still are
1981 * in accord with actual ports flags.
1983 if (status != (lp->lp_ifflags & flag)) {
1984 error = (*func)(ifp, status);
1987 lp->lp_ifflags &= ~flag;
1988 lp->lp_ifflags |= status;
1994 * Handle IFF_* flags that require certain changes on the lagg port
1995 * if "status" is true, update ports flags respective to the lagg
1996 * if "status" is false, forcedly clear the flags set on port.
1999 lagg_setflags(struct lagg_port *lp, int status)
2003 for (i = 0; lagg_pflags[i].flag; i++) {
2004 error = lagg_setflag(lp, lagg_pflags[i].flag,
2005 status, lagg_pflags[i].func);
2013 lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m)
2015 struct epoch_tracker et;
2016 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2019 #if defined(KERN_TLS) || defined(RATELIMIT)
2020 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2021 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2023 NET_EPOCH_ENTER(et);
2024 /* We need a Tx algorithm and at least one port */
2025 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
2028 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2032 ETHER_BPF_MTAP(ifp, m);
2034 error = lagg_proto_start(sc, m);
2040 lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m)
2042 struct epoch_tracker et;
2043 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2046 #if defined(KERN_TLS) || defined(RATELIMIT)
2047 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2048 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2050 NET_EPOCH_ENTER(et);
2051 /* We need a Tx algorithm and at least one port */
2052 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
2055 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2059 INFINIBAND_BPF_MTAP(ifp, m);
2061 error = lagg_proto_start(sc, m);
2067 * The ifp->if_qflush entry point for lagg(4) is no-op.
2070 lagg_qflush(struct ifnet *ifp __unused)
2074 static struct mbuf *
2075 lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m)
2077 struct epoch_tracker et;
2078 struct lagg_port *lp = ifp->if_lagg;
2079 struct lagg_softc *sc = lp->lp_softc;
2080 struct ifnet *scifp = sc->sc_ifp;
2082 NET_EPOCH_ENTER(et);
2083 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2084 lp->lp_detaching != 0 ||
2085 sc->sc_proto == LAGG_PROTO_NONE) {
2091 ETHER_BPF_MTAP(scifp, m);
2093 m = lagg_proto_input(sc, lp, m);
2094 if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
2103 static struct mbuf *
2104 lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m)
2106 struct epoch_tracker et;
2107 struct lagg_port *lp = ifp->if_lagg;
2108 struct lagg_softc *sc = lp->lp_softc;
2109 struct ifnet *scifp = sc->sc_ifp;
2111 NET_EPOCH_ENTER(et);
2112 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2113 lp->lp_detaching != 0 ||
2114 sc->sc_proto == LAGG_PROTO_NONE) {
2120 INFINIBAND_BPF_MTAP(scifp, m);
2122 m = lagg_proto_input(sc, lp, m);
2123 if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
2133 lagg_media_change(struct ifnet *ifp)
2135 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2137 if (sc->sc_ifflags & IFF_DEBUG)
2138 printf("%s\n", __func__);
2145 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2147 struct epoch_tracker et;
2148 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2149 struct lagg_port *lp;
2151 imr->ifm_status = IFM_AVALID;
2152 imr->ifm_active = IFM_ETHER | IFM_AUTO;
2154 NET_EPOCH_ENTER(et);
2155 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2156 if (LAGG_PORTACTIVE(lp))
2157 imr->ifm_status |= IFM_ACTIVE;
2163 lagg_linkstate(struct lagg_softc *sc)
2165 struct epoch_tracker et;
2166 struct lagg_port *lp;
2167 int new_link = LINK_STATE_DOWN;
2170 LAGG_XLOCK_ASSERT(sc);
2172 /* LACP handles link state itself */
2173 if (sc->sc_proto == LAGG_PROTO_LACP)
2176 /* Our link is considered up if at least one of our ports is active */
2177 NET_EPOCH_ENTER(et);
2178 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2179 if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
2180 new_link = LINK_STATE_UP;
2185 if_link_state_change(sc->sc_ifp, new_link);
2187 /* Update if_baudrate to reflect the max possible speed */
2188 switch (sc->sc_proto) {
2189 case LAGG_PROTO_FAILOVER:
2190 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
2191 sc->sc_primary->lp_ifp->if_baudrate : 0;
2193 case LAGG_PROTO_ROUNDROBIN:
2194 case LAGG_PROTO_LOADBALANCE:
2195 case LAGG_PROTO_BROADCAST:
2197 NET_EPOCH_ENTER(et);
2198 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2199 speed += lp->lp_ifp->if_baudrate;
2201 sc->sc_ifp->if_baudrate = speed;
2203 case LAGG_PROTO_LACP:
2204 /* LACP updates if_baudrate itself */
2210 lagg_port_state(struct ifnet *ifp, int state)
2212 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
2213 struct lagg_softc *sc = NULL;
2222 lagg_proto_linkstate(sc, lp);
2227 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
2229 struct lagg_port *lp_next, *rval = NULL;
2232 * Search a port which reports an active link state.
2237 * This is called with either in the network epoch
2238 * or with LAGG_XLOCK(sc) held.
2240 if (!in_epoch(net_epoch_preempt))
2241 LAGG_XLOCK_ASSERT(sc);
2246 if (LAGG_PORTACTIVE(lp)) {
2250 if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
2251 LAGG_PORTACTIVE(lp_next)) {
2257 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2258 if (LAGG_PORTACTIVE(lp_next)) {
2267 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
2270 #if defined(KERN_TLS) || defined(RATELIMIT)
2271 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2272 struct lagg_snd_tag *lst;
2273 struct m_snd_tag *mst;
2275 mst = m->m_pkthdr.snd_tag;
2276 lst = mst_to_lst(mst);
2277 if (lst->tag->ifp != ifp) {
2281 m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
2282 m_snd_tag_rele(mst);
2285 return (ifp->if_transmit)(ifp, m);
2289 * Simple round robin aggregation
2292 lagg_rr_attach(struct lagg_softc *sc)
2299 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
2301 struct lagg_port *lp;
2304 p = atomic_fetchadd_32(&sc->sc_seq, 1);
2307 lp = CK_SLIST_FIRST(&sc->sc_ports);
2310 lp = CK_SLIST_NEXT(lp, lp_entries);
2313 * Check the port's link state. This will return the next active
2314 * port if the link is down or the port is NULL.
2316 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2317 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2323 return (lagg_enqueue(lp->lp_ifp, m));
2326 static struct mbuf *
2327 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2329 struct ifnet *ifp = sc->sc_ifp;
2331 /* Just pass in the packet to our lagg device */
2332 m->m_pkthdr.rcvif = ifp;
2341 lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
2343 int active_ports = 0;
2346 struct lagg_port *lp, *last = NULL;
2350 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2351 if (!LAGG_PORTACTIVE(lp))
2357 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
2363 lagg_enqueue(last->lp_ifp, m0);
2369 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2373 if ((last = lagg_link_active(sc, last)) == NULL) {
2375 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2380 ret = lagg_enqueue(last->lp_ifp, m);
2382 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2388 lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2390 struct ifnet *ifp = sc->sc_ifp;
2392 /* Just pass in the packet to our lagg device */
2393 m->m_pkthdr.rcvif = ifp;
2401 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
2403 struct lagg_port *lp;
2405 /* Use the master port if active or the next available port */
2406 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
2407 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2413 return (lagg_enqueue(lp->lp_ifp, m));
2416 static struct mbuf *
2417 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2419 struct ifnet *ifp = sc->sc_ifp;
2420 struct lagg_port *tmp_tp;
2422 if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
2423 m->m_pkthdr.rcvif = ifp;
2427 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
2428 tmp_tp = lagg_link_active(sc, sc->sc_primary);
2430 * If tmp_tp is null, we've received a packet when all
2431 * our links are down. Weird, but process it anyways.
2433 if ((tmp_tp == NULL || tmp_tp == lp)) {
2434 m->m_pkthdr.rcvif = ifp;
2447 lagg_lb_attach(struct lagg_softc *sc)
2449 struct lagg_port *lp;
2452 LAGG_XLOCK_ASSERT(sc);
2453 lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
2454 lb->lb_key = m_ether_tcpip_hash_init();
2457 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2458 lagg_lb_port_create(lp);
2462 lagg_lb_detach(struct lagg_softc *sc)
2466 lb = (struct lagg_lb *)sc->sc_psc;
2472 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
2474 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2475 struct lagg_port *lp_next;
2479 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
2480 LAGG_XLOCK_ASSERT(sc);
2481 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2484 if (i >= LAGG_MAX_PORTS) {
2488 if (sc->sc_ifflags & IFF_DEBUG)
2489 printf("%s: port %s at index %d\n",
2490 sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
2491 lb->lb_ports[i++] = lp_next;
2498 lagg_lb_port_create(struct lagg_port *lp)
2500 struct lagg_softc *sc = lp->lp_softc;
2501 return (lagg_lb_porttable(sc, NULL));
2505 lagg_lb_port_destroy(struct lagg_port *lp)
2507 struct lagg_softc *sc = lp->lp_softc;
2508 lagg_lb_porttable(sc, lp);
2512 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
2514 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2515 struct lagg_port *lp = NULL;
2518 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
2519 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2520 p = m->m_pkthdr.flowid >> sc->flowid_shift;
2522 p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
2524 lp = lb->lb_ports[p];
2527 * Check the port's link state. This will return the next active
2528 * port if the link is down or the port is NULL.
2530 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2531 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2537 return (lagg_enqueue(lp->lp_ifp, m));
2540 static struct mbuf *
2541 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2543 struct ifnet *ifp = sc->sc_ifp;
2545 /* Just pass in the packet to our lagg device */
2546 m->m_pkthdr.rcvif = ifp;
2555 lagg_lacp_attach(struct lagg_softc *sc)
2557 struct lagg_port *lp;
2560 LAGG_XLOCK_ASSERT(sc);
2561 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2562 lacp_port_create(lp);
2566 lagg_lacp_detach(struct lagg_softc *sc)
2568 struct lagg_port *lp;
2571 LAGG_XLOCK_ASSERT(sc);
2572 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2573 lacp_port_destroy(lp);
2581 lagg_lacp_lladdr(struct lagg_softc *sc)
2583 struct lagg_port *lp;
2585 LAGG_SXLOCK_ASSERT(sc);
2587 /* purge all the lacp ports */
2588 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2589 lacp_port_destroy(lp);
2591 /* add them back in */
2592 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2593 lacp_port_create(lp);
2597 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
2599 struct lagg_port *lp;
2602 lp = lacp_select_tx_port(sc, m, &err);
2604 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2610 return (lagg_enqueue(lp->lp_ifp, m));
2613 static struct mbuf *
2614 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2616 struct ifnet *ifp = sc->sc_ifp;
2617 struct ether_header *eh;
2620 eh = mtod(m, struct ether_header *);
2621 etype = ntohs(eh->ether_type);
2623 /* Tap off LACP control messages */
2624 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
2625 m = lacp_input(lp, m);
2631 * If the port is not collecting or not in the active aggregator then
2634 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
2639 m->m_pkthdr.rcvif = ifp;