1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
24 #include "opt_inet6.h"
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
36 #include <sys/systm.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
42 #include <sys/eventhandler.h>
44 #include <net/ethernet.h>
46 #include <net/if_clone.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_llc.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 #include <net/if_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
63 #include <netinet/ip6.h>
66 #include <net/if_vlan_var.h>
67 #include <net/if_lagg.h>
68 #include <net/ieee8023ad_lacp.h>
70 /* Special flags we should propagate to the lagg ports. */
73 int (*func)(struct ifnet *, int);
75 {IFF_PROMISC, ifpromisc},
76 {IFF_ALLMULTI, if_allmulti},
80 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
81 static struct mtx lagg_list_mtx;
82 eventhandler_tag lagg_detach_cookie = NULL;
84 static int lagg_clone_create(struct if_clone *, int, caddr_t);
85 static void lagg_clone_destroy(struct ifnet *);
86 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
87 static void lagg_capabilities(struct lagg_softc *);
88 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
89 static void lagg_port_setlladdr(void *, int);
90 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
91 static int lagg_port_destroy(struct lagg_port *, int);
92 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
93 static void lagg_linkstate(struct lagg_softc *);
94 static void lagg_port_state(struct ifnet *, int);
95 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
96 static int lagg_port_output(struct ifnet *, struct mbuf *,
97 struct sockaddr *, struct route *);
98 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
99 static int lagg_port_checkstacking(struct lagg_softc *);
100 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
101 static void lagg_init(void *);
102 static void lagg_stop(struct lagg_softc *);
103 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
104 static int lagg_ether_setmulti(struct lagg_softc *);
105 static int lagg_ether_cmdmulti(struct lagg_port *, int);
106 static int lagg_setflag(struct lagg_port *, int, int,
107 int (*func)(struct ifnet *, int));
108 static int lagg_setflags(struct lagg_port *, int status);
109 static void lagg_start(struct ifnet *);
110 static int lagg_media_change(struct ifnet *);
111 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
112 static struct lagg_port *lagg_link_active(struct lagg_softc *,
114 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
116 IFC_SIMPLE_DECLARE(lagg, 0);
118 /* Simple round robin */
119 static int lagg_rr_attach(struct lagg_softc *);
120 static int lagg_rr_detach(struct lagg_softc *);
121 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
122 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
125 /* Active failover */
126 static int lagg_fail_attach(struct lagg_softc *);
127 static int lagg_fail_detach(struct lagg_softc *);
128 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
129 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
133 static int lagg_lb_attach(struct lagg_softc *);
134 static int lagg_lb_detach(struct lagg_softc *);
135 static int lagg_lb_port_create(struct lagg_port *);
136 static void lagg_lb_port_destroy(struct lagg_port *);
137 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
138 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
140 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
143 static int lagg_lacp_attach(struct lagg_softc *);
144 static int lagg_lacp_detach(struct lagg_softc *);
145 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
146 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
148 static void lagg_lacp_lladdr(struct lagg_softc *);
150 /* lagg protocol table */
151 static const struct {
153 int (*ti_attach)(struct lagg_softc *);
155 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
156 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
157 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
158 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
159 { LAGG_PROTO_LACP, lagg_lacp_attach },
160 { LAGG_PROTO_NONE, NULL }
163 SYSCTL_DECL(_net_link);
164 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0, "Link Aggregation");
166 static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
167 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
168 &lagg_failover_rx_all, 0,
169 "Accept input from any interface in a failover lagg");
172 lagg_modevent(module_t mod, int type, void *data)
177 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
178 SLIST_INIT(&lagg_list);
179 if_clone_attach(&lagg_cloner);
180 lagg_input_p = lagg_input;
181 lagg_linkstate_p = lagg_port_state;
182 lagg_detach_cookie = EVENTHANDLER_REGISTER(
183 ifnet_departure_event, lagg_port_ifdetach, NULL,
184 EVENTHANDLER_PRI_ANY);
187 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
189 if_clone_detach(&lagg_cloner);
191 lagg_linkstate_p = NULL;
192 mtx_destroy(&lagg_list_mtx);
200 static moduledata_t lagg_mod = {
206 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
207 MODULE_VERSION(if_lagg, 1);
209 #if __FreeBSD_version >= 800000
211 * This routine is run via an vlan
215 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
217 struct lagg_softc *sc = ifp->if_softc;
218 struct lagg_port *lp;
220 if (ifp->if_softc != arg) /* Not our event */
224 if (!SLIST_EMPTY(&sc->sc_ports)) {
225 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
226 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
232 * This routine is run via an vlan
236 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
238 struct lagg_softc *sc = ifp->if_softc;
239 struct lagg_port *lp;
241 if (ifp->if_softc != arg) /* Not our event */
245 if (!SLIST_EMPTY(&sc->sc_ports)) {
246 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
247 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
254 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
256 struct lagg_softc *sc;
259 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
261 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
262 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
268 sc->sc_proto = LAGG_PROTO_NONE;
269 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
270 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
271 sc->sc_proto = lagg_protos[i].ti_proto;
272 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
273 if_free_type(ifp, IFT_ETHER);
281 SLIST_INIT(&sc->sc_ports);
282 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
284 /* Initialise pseudo media types */
285 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
287 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
288 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
290 if_initname(ifp, ifc->ifc_name, unit);
291 ifp->if_type = IFT_ETHER;
293 ifp->if_start = lagg_start;
294 ifp->if_init = lagg_init;
295 ifp->if_ioctl = lagg_ioctl;
296 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
298 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
299 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
300 IFQ_SET_READY(&ifp->if_snd);
303 * Attach as an ordinary ethernet device, childs will be attached
304 * as special device IFT_IEEE8023ADLAG.
306 ether_ifattach(ifp, eaddr);
308 #if __FreeBSD_version >= 800000
309 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
310 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
311 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
312 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
315 /* Insert into the global list of laggs */
316 mtx_lock(&lagg_list_mtx);
317 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
318 mtx_unlock(&lagg_list_mtx);
324 lagg_clone_destroy(struct ifnet *ifp)
326 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
327 struct lagg_port *lp;
332 ifp->if_flags &= ~IFF_UP;
334 #if __FreeBSD_version >= 800000
335 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
336 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
339 /* Shutdown and remove lagg ports */
340 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
341 lagg_port_destroy(lp, 1);
342 /* Unhook the aggregation protocol */
343 (*sc->sc_detach)(sc);
347 ifmedia_removeall(&sc->sc_media);
349 if_free_type(ifp, IFT_ETHER);
351 mtx_lock(&lagg_list_mtx);
352 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
353 mtx_unlock(&lagg_list_mtx);
355 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
356 LAGG_LOCK_DESTROY(sc);
361 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
363 struct ifnet *ifp = sc->sc_ifp;
365 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
368 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
369 /* Let the protocol know the MAC has changed */
370 if (sc->sc_lladdr != NULL)
371 (*sc->sc_lladdr)(sc);
372 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
376 lagg_capabilities(struct lagg_softc *sc)
378 struct lagg_port *lp;
379 int cap = ~0, ena = ~0;
382 LAGG_WLOCK_ASSERT(sc);
384 /* Get capabilities from the lagg ports */
385 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
386 cap &= lp->lp_ifp->if_capabilities;
387 ena &= lp->lp_ifp->if_capenable;
388 hwa &= lp->lp_ifp->if_hwassist;
390 cap = (cap == ~0 ? 0 : cap);
391 ena = (ena == ~0 ? 0 : ena);
392 hwa = (hwa == ~0 ? 0 : hwa);
394 if (sc->sc_ifp->if_capabilities != cap ||
395 sc->sc_ifp->if_capenable != ena ||
396 sc->sc_ifp->if_hwassist != hwa) {
397 sc->sc_ifp->if_capabilities = cap;
398 sc->sc_ifp->if_capenable = ena;
399 sc->sc_ifp->if_hwassist = hwa;
400 getmicrotime(&sc->sc_ifp->if_lastchange);
402 if (sc->sc_ifflags & IFF_DEBUG)
403 if_printf(sc->sc_ifp,
404 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
409 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
411 struct lagg_softc *sc = lp->lp_softc;
412 struct ifnet *ifp = lp->lp_ifp;
413 struct lagg_llq *llq;
416 LAGG_WLOCK_ASSERT(sc);
418 if (lp->lp_detaching ||
419 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
422 /* Check to make sure its not already queued to be changed */
423 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
424 if (llq->llq_ifp == ifp) {
431 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
432 if (llq == NULL) /* XXX what to do */
436 /* Update the lladdr even if pending, it may have changed */
438 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
441 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
443 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
447 * Set the interface MAC address from a taskqueue to avoid a LOR.
450 lagg_port_setlladdr(void *arg, int pending)
452 struct lagg_softc *sc = (struct lagg_softc *)arg;
453 struct lagg_llq *llq, *head;
457 /* Grab a local reference of the queue and remove it from the softc */
459 head = SLIST_FIRST(&sc->sc_llq_head);
460 SLIST_FIRST(&sc->sc_llq_head) = NULL;
464 * Traverse the queue and set the lladdr on each ifp. It is safe to do
465 * unlocked as we have the only reference to it.
467 for (llq = head; llq != NULL; llq = head) {
470 /* Set the link layer address */
471 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
473 printf("%s: setlladdr failed on %s\n", __func__,
476 head = SLIST_NEXT(llq, llq_entries);
482 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
484 struct lagg_softc *sc_ptr;
485 struct lagg_port *lp;
488 LAGG_WLOCK_ASSERT(sc);
490 /* Limit the maximal number of lagg ports */
491 if (sc->sc_count >= LAGG_MAX_PORTS)
494 /* Check if port has already been associated to a lagg */
495 if (ifp->if_lagg != NULL)
498 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
499 if (ifp->if_type != IFT_ETHER)
500 return (EPROTONOSUPPORT);
502 /* Allow the first Ethernet member to define the MTU */
503 if (SLIST_EMPTY(&sc->sc_ports))
504 sc->sc_ifp->if_mtu = ifp->if_mtu;
505 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
506 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
511 if ((lp = malloc(sizeof(struct lagg_port),
512 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
515 /* Check if port is a stacked lagg */
516 mtx_lock(&lagg_list_mtx);
517 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
518 if (ifp == sc_ptr->sc_ifp) {
519 mtx_unlock(&lagg_list_mtx);
522 /* XXX disable stacking for the moment, its untested
523 lp->lp_flags |= LAGG_PORT_STACK;
524 if (lagg_port_checkstacking(sc_ptr) >=
526 mtx_unlock(&lagg_list_mtx);
533 mtx_unlock(&lagg_list_mtx);
535 /* Change the interface type */
536 lp->lp_iftype = ifp->if_type;
537 ifp->if_type = IFT_IEEE8023ADLAG;
539 lp->lp_ioctl = ifp->if_ioctl;
540 ifp->if_ioctl = lagg_port_ioctl;
541 lp->lp_output = ifp->if_output;
542 ifp->if_output = lagg_port_output;
547 /* Save port link layer address */
548 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
550 if (SLIST_EMPTY(&sc->sc_ports)) {
552 lagg_lladdr(sc, IF_LLADDR(ifp));
554 /* Update link layer address for this port */
555 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
558 /* Insert into the list of ports */
559 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
562 /* Update lagg capabilities */
563 lagg_capabilities(sc);
566 /* Add multicast addresses and interface flags to this port */
567 lagg_ether_cmdmulti(lp, 1);
568 lagg_setflags(lp, 1);
570 if (sc->sc_port_create != NULL)
571 error = (*sc->sc_port_create)(lp);
573 /* remove the port again, without calling sc_port_destroy */
574 lagg_port_destroy(lp, 0);
582 lagg_port_checkstacking(struct lagg_softc *sc)
584 struct lagg_softc *sc_ptr;
585 struct lagg_port *lp;
588 LAGG_WLOCK_ASSERT(sc);
590 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
591 if (lp->lp_flags & LAGG_PORT_STACK) {
592 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
593 m = MAX(m, lagg_port_checkstacking(sc_ptr));
601 lagg_port_destroy(struct lagg_port *lp, int runpd)
603 struct lagg_softc *sc = lp->lp_softc;
604 struct lagg_port *lp_ptr;
605 struct lagg_llq *llq;
606 struct ifnet *ifp = lp->lp_ifp;
608 LAGG_WLOCK_ASSERT(sc);
610 if (runpd && sc->sc_port_destroy != NULL)
611 (*sc->sc_port_destroy)(lp);
614 * Remove multicast addresses and interface flags from this port and
615 * reset the MAC address, skip if the interface is being detached.
617 if (!lp->lp_detaching) {
618 lagg_ether_cmdmulti(lp, 0);
619 lagg_setflags(lp, 0);
620 lagg_port_lladdr(lp, lp->lp_lladdr);
623 /* Restore interface */
624 ifp->if_type = lp->lp_iftype;
625 ifp->if_ioctl = lp->lp_ioctl;
626 ifp->if_output = lp->lp_output;
629 /* Finally, remove the port from the lagg */
630 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
633 /* Update the primary interface */
634 if (lp == sc->sc_primary) {
635 uint8_t lladdr[ETHER_ADDR_LEN];
637 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
638 bzero(&lladdr, ETHER_ADDR_LEN);
640 bcopy(lp_ptr->lp_lladdr,
641 lladdr, ETHER_ADDR_LEN);
643 lagg_lladdr(sc, lladdr);
644 sc->sc_primary = lp_ptr;
646 /* Update link layer address for each port */
647 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
648 lagg_port_lladdr(lp_ptr, lladdr);
651 /* Remove any pending lladdr changes from the queue */
652 if (lp->lp_detaching) {
653 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
654 if (llq->llq_ifp == ifp) {
655 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
658 break; /* Only appears once */
664 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
668 /* Update lagg capabilities */
669 lagg_capabilities(sc);
676 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
678 struct lagg_reqport *rp = (struct lagg_reqport *)data;
679 struct lagg_softc *sc;
680 struct lagg_port *lp = NULL;
683 /* Should be checked by the caller */
684 if (ifp->if_type != IFT_IEEE8023ADLAG ||
685 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
690 if (rp->rp_portname[0] == '\0' ||
691 ifunit(rp->rp_portname) != ifp) {
697 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
703 lagg_port2req(lp, rp);
708 if (lp->lp_ioctl == NULL) {
712 error = (*lp->lp_ioctl)(ifp, cmd, data);
716 /* Update lagg interface capabilities */
718 lagg_capabilities(sc);
723 /* Do not allow the MTU to be changed once joined */
734 if (lp->lp_ioctl != NULL)
735 return ((*lp->lp_ioctl)(ifp, cmd, data));
741 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
742 struct sockaddr *dst, struct route *ro)
744 struct lagg_port *lp = ifp->if_lagg;
745 struct ether_header *eh;
748 switch (dst->sa_family) {
749 case pseudo_AF_HDRCMPLT:
751 eh = (struct ether_header *)dst->sa_data;
752 type = eh->ether_type;
757 * Only allow ethernet types required to initiate or maintain the link,
758 * aggregated frames take a different path.
760 switch (ntohs(type)) {
761 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
762 return ((*lp->lp_output)(ifp, m, dst, ro));
765 /* drop any other frames */
771 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
773 struct lagg_port *lp;
774 struct lagg_softc *sc;
776 if ((lp = ifp->if_lagg) == NULL)
782 lp->lp_detaching = 1;
783 lagg_port_destroy(lp, 1);
788 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
790 struct lagg_softc *sc = lp->lp_softc;
792 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
793 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
794 rp->rp_prio = lp->lp_prio;
795 rp->rp_flags = lp->lp_flags;
796 if (sc->sc_portreq != NULL)
797 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
799 /* Add protocol specific flags */
800 switch (sc->sc_proto) {
801 case LAGG_PROTO_FAILOVER:
802 if (lp == sc->sc_primary)
803 rp->rp_flags |= LAGG_PORT_MASTER;
804 if (lp == lagg_link_active(sc, sc->sc_primary))
805 rp->rp_flags |= LAGG_PORT_ACTIVE;
808 case LAGG_PROTO_ROUNDROBIN:
809 case LAGG_PROTO_LOADBALANCE:
810 case LAGG_PROTO_ETHERCHANNEL:
811 if (LAGG_PORTACTIVE(lp))
812 rp->rp_flags |= LAGG_PORT_ACTIVE;
815 case LAGG_PROTO_LACP:
816 /* LACP has a different definition of active */
817 if (lacp_isactive(lp))
818 rp->rp_flags |= LAGG_PORT_ACTIVE;
819 if (lacp_iscollecting(lp))
820 rp->rp_flags |= LAGG_PORT_COLLECTING;
821 if (lacp_isdistributing(lp))
822 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
831 struct lagg_softc *sc = (struct lagg_softc *)xsc;
832 struct lagg_port *lp;
833 struct ifnet *ifp = sc->sc_ifp;
835 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
840 ifp->if_drv_flags |= IFF_DRV_RUNNING;
841 /* Update the port lladdrs */
842 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
843 lagg_port_lladdr(lp, IF_LLADDR(ifp));
845 if (sc->sc_init != NULL)
852 lagg_stop(struct lagg_softc *sc)
854 struct ifnet *ifp = sc->sc_ifp;
856 LAGG_WLOCK_ASSERT(sc);
858 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
861 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
863 if (sc->sc_stop != NULL)
868 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
870 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
871 struct lagg_reqall *ra = (struct lagg_reqall *)data;
872 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
873 struct ifreq *ifr = (struct ifreq *)data;
874 struct lagg_port *lp;
876 struct thread *td = curthread;
878 int count, buflen, len, error = 0;
880 bzero(&rpbuf, sizeof(rpbuf));
886 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
888 buflen = count * sizeof(struct lagg_reqport);
891 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
894 ra->ra_proto = sc->sc_proto;
895 if (sc->sc_req != NULL)
896 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
900 len = min(ra->ra_size, buflen);
901 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
902 if (len < sizeof(rpbuf))
905 lagg_port2req(lp, &rpbuf);
906 memcpy(buf, &rpbuf, sizeof(rpbuf));
908 buf += sizeof(rpbuf);
909 len -= sizeof(rpbuf);
912 ra->ra_ports = count;
913 ra->ra_size = count * sizeof(rpbuf);
914 error = copyout(outbuf, ra->ra_port, ra->ra_size);
915 free(outbuf, M_TEMP);
918 error = priv_check(td, PRIV_NET_LAGG);
921 if (ra->ra_proto >= LAGG_PROTO_MAX) {
922 error = EPROTONOSUPPORT;
925 if (sc->sc_proto != LAGG_PROTO_NONE) {
927 error = sc->sc_detach(sc);
928 /* Reset protocol and pointers */
929 sc->sc_proto = LAGG_PROTO_NONE;
930 sc->sc_detach = NULL;
933 sc->sc_port_create = NULL;
934 sc->sc_port_destroy = NULL;
935 sc->sc_linkstate = NULL;
938 sc->sc_lladdr = NULL;
940 sc->sc_portreq = NULL;
945 for (int i = 0; i < (sizeof(lagg_protos) /
946 sizeof(lagg_protos[0])); i++) {
947 if (lagg_protos[i].ti_proto == ra->ra_proto) {
948 if (sc->sc_ifflags & IFF_DEBUG)
949 printf("%s: using proto %u\n",
951 lagg_protos[i].ti_proto);
953 sc->sc_proto = lagg_protos[i].ti_proto;
954 if (sc->sc_proto != LAGG_PROTO_NONE)
955 error = lagg_protos[i].ti_attach(sc);
960 error = EPROTONOSUPPORT;
963 if (rp->rp_portname[0] == '\0' ||
964 (tpif = ifunit(rp->rp_portname)) == NULL) {
970 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
971 lp->lp_softc != sc) {
977 lagg_port2req(lp, rp);
981 error = priv_check(td, PRIV_NET_LAGG);
984 if (rp->rp_portname[0] == '\0' ||
985 (tpif = ifunit(rp->rp_portname)) == NULL) {
990 error = lagg_port_create(sc, tpif);
993 case SIOCSLAGGDELPORT:
994 error = priv_check(td, PRIV_NET_LAGG);
997 if (rp->rp_portname[0] == '\0' ||
998 (tpif = ifunit(rp->rp_portname)) == NULL) {
1004 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1005 lp->lp_softc != sc) {
1011 error = lagg_port_destroy(lp, 1);
1015 /* Set flags on ports too */
1017 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1018 lagg_setflags(lp, 1);
1022 if (!(ifp->if_flags & IFF_UP) &&
1023 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1025 * If interface is marked down and it is running,
1026 * then stop and disable it.
1031 } else if ((ifp->if_flags & IFF_UP) &&
1032 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1034 * If interface is marked up and it is stopped, then
1037 (*ifp->if_init)(sc);
1043 error = lagg_ether_setmulti(sc);
1048 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1053 /* Do not allow the MTU or caps to be directly changed */
1058 error = ether_ioctl(ifp, cmd, data);
1065 lagg_ether_setmulti(struct lagg_softc *sc)
1067 struct lagg_port *lp;
1069 LAGG_WLOCK_ASSERT(sc);
1071 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1072 /* First, remove any existing filter entries. */
1073 lagg_ether_cmdmulti(lp, 0);
1074 /* copy all addresses from the lagg interface to the port */
1075 lagg_ether_cmdmulti(lp, 1);
1081 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1083 struct lagg_softc *sc = lp->lp_softc;
1084 struct ifnet *ifp = lp->lp_ifp;
1085 struct ifnet *scifp = sc->sc_ifp;
1087 struct ifmultiaddr *ifma, *rifma = NULL;
1088 struct sockaddr_dl sdl;
1091 LAGG_WLOCK_ASSERT(sc);
1093 bzero((char *)&sdl, sizeof(sdl));
1094 sdl.sdl_len = sizeof(sdl);
1095 sdl.sdl_family = AF_LINK;
1096 sdl.sdl_type = IFT_ETHER;
1097 sdl.sdl_alen = ETHER_ADDR_LEN;
1098 sdl.sdl_index = ifp->if_index;
1101 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1102 if (ifma->ifma_addr->sa_family != AF_LINK)
1104 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1105 LLADDR(&sdl), ETHER_ADDR_LEN);
1107 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1110 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1113 mc->mc_ifma = rifma;
1114 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1117 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1118 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1119 if_delmulti_ifma(mc->mc_ifma);
1126 /* Handle a ref counted flag that should be set on the lagg port as well */
1128 lagg_setflag(struct lagg_port *lp, int flag, int status,
1129 int (*func)(struct ifnet *, int))
1131 struct lagg_softc *sc = lp->lp_softc;
1132 struct ifnet *scifp = sc->sc_ifp;
1133 struct ifnet *ifp = lp->lp_ifp;
1136 LAGG_WLOCK_ASSERT(sc);
1138 status = status ? (scifp->if_flags & flag) : 0;
1139 /* Now "status" contains the flag value or 0 */
1142 * See if recorded ports status is different from what
1143 * we want it to be. If it is, flip it. We record ports
1144 * status in lp_ifflags so that we won't clear ports flag
1145 * we haven't set. In fact, we don't clear or set ports
1146 * flags directly, but get or release references to them.
1147 * That's why we can be sure that recorded flags still are
1148 * in accord with actual ports flags.
1150 if (status != (lp->lp_ifflags & flag)) {
1151 error = (*func)(ifp, status);
1154 lp->lp_ifflags &= ~flag;
1155 lp->lp_ifflags |= status;
1161 * Handle IFF_* flags that require certain changes on the lagg port
1162 * if "status" is true, update ports flags respective to the lagg
1163 * if "status" is false, forcedly clear the flags set on port.
1166 lagg_setflags(struct lagg_port *lp, int status)
1170 for (i = 0; lagg_pflags[i].flag; i++) {
1171 error = lagg_setflag(lp, lagg_pflags[i].flag,
1172 status, lagg_pflags[i].func);
1180 lagg_start(struct ifnet *ifp)
1182 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1187 /* We need a Tx algorithm and at least one port */
1188 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1189 IF_DRAIN(&ifp->if_snd);
1194 for (;; error = 0) {
1195 IFQ_DEQUEUE(&ifp->if_snd, m);
1199 ETHER_BPF_MTAP(ifp, m);
1201 error = (*sc->sc_start)(sc, m);
1210 static struct mbuf *
1211 lagg_input(struct ifnet *ifp, struct mbuf *m)
1213 struct lagg_port *lp = ifp->if_lagg;
1214 struct lagg_softc *sc = lp->lp_softc;
1215 struct ifnet *scifp = sc->sc_ifp;
1218 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1219 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1220 sc->sc_proto == LAGG_PROTO_NONE) {
1226 ETHER_BPF_MTAP(scifp, m);
1228 m = (*sc->sc_input)(sc, lp, m);
1231 scifp->if_ipackets++;
1232 scifp->if_ibytes += m->m_pkthdr.len;
1234 if (scifp->if_flags & IFF_MONITOR) {
1245 lagg_media_change(struct ifnet *ifp)
1247 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1249 if (sc->sc_ifflags & IFF_DEBUG)
1250 printf("%s\n", __func__);
1257 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1259 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1260 struct lagg_port *lp;
1262 imr->ifm_status = IFM_AVALID;
1263 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1266 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1267 if (LAGG_PORTACTIVE(lp))
1268 imr->ifm_status |= IFM_ACTIVE;
1274 lagg_linkstate(struct lagg_softc *sc)
1276 struct lagg_port *lp;
1277 int new_link = LINK_STATE_DOWN;
1280 /* Our link is considered up if at least one of our ports is active */
1281 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1282 if (lp->lp_link_state == LINK_STATE_UP) {
1283 new_link = LINK_STATE_UP;
1287 if_link_state_change(sc->sc_ifp, new_link);
1289 /* Update if_baudrate to reflect the max possible speed */
1290 switch (sc->sc_proto) {
1291 case LAGG_PROTO_FAILOVER:
1292 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1293 sc->sc_primary->lp_ifp->if_baudrate : 0;
1295 case LAGG_PROTO_ROUNDROBIN:
1296 case LAGG_PROTO_LOADBALANCE:
1297 case LAGG_PROTO_ETHERCHANNEL:
1299 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1300 speed += lp->lp_ifp->if_baudrate;
1301 sc->sc_ifp->if_baudrate = speed;
1303 case LAGG_PROTO_LACP:
1304 /* LACP updates if_baudrate itself */
1310 lagg_port_state(struct ifnet *ifp, int state)
1312 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1313 struct lagg_softc *sc = NULL;
1322 if (sc->sc_linkstate != NULL)
1323 (*sc->sc_linkstate)(lp);
1328 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1330 struct lagg_port *lp_next, *rval = NULL;
1331 // int new_link = LINK_STATE_DOWN;
1333 LAGG_RLOCK_ASSERT(sc);
1335 * Search a port which reports an active link state.
1340 if (LAGG_PORTACTIVE(lp)) {
1344 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1345 LAGG_PORTACTIVE(lp_next)) {
1351 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1352 if (LAGG_PORTACTIVE(lp_next)) {
1361 * The IEEE 802.1D standard assumes that a lagg with
1362 * multiple ports is always full duplex. This is valid
1363 * for load sharing laggs and if at least two links
1364 * are active. Unfortunately, checking the latter would
1365 * be too expensive at this point.
1367 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1369 new_link = LINK_STATE_FULL_DUPLEX;
1371 new_link = rval->lp_link_state;
1379 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1381 if (m->m_pkthdr.len < (off + len)) {
1383 } else if (m->m_len < (off + len)) {
1384 m_copydata(m, off, len, buf);
1387 return (mtod(m, char *) + off);
1391 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1396 struct ether_header *eh;
1397 struct ether_vlan_header vlanbuf;
1398 const struct ether_vlan_header *vlan;
1400 const struct ip *ip;
1404 const struct ip6_hdr *ip6;
1405 struct ip6_hdr ip6buf;
1412 eh = mtod(m, struct ether_header *);
1413 etype = ntohs(eh->ether_type);
1414 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1415 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1417 /* Special handling for encapsulating VLAN frames */
1418 if (m->m_flags & M_VLANTAG) {
1419 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1420 sizeof(m->m_pkthdr.ether_vtag), p);
1421 } else if (etype == ETHERTYPE_VLAN) {
1422 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1426 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1427 etype = ntohs(vlan->evl_proto);
1428 off += sizeof(*vlan) - sizeof(*eh);
1434 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1438 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1439 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1443 case ETHERTYPE_IPV6:
1444 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1448 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1449 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1450 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1451 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1460 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1463 return (ifp->if_transmit)(ifp, m);
1467 * Simple round robin aggregation
1471 lagg_rr_attach(struct lagg_softc *sc)
1473 sc->sc_detach = lagg_rr_detach;
1474 sc->sc_start = lagg_rr_start;
1475 sc->sc_input = lagg_rr_input;
1476 sc->sc_port_create = NULL;
1477 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1484 lagg_rr_detach(struct lagg_softc *sc)
1490 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1492 struct lagg_port *lp;
1495 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1497 lp = SLIST_FIRST(&sc->sc_ports);
1499 lp = SLIST_NEXT(lp, lp_entries);
1502 * Check the port's link state. This will return the next active
1503 * port if the link is down or the port is NULL.
1505 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1511 return (lagg_enqueue(lp->lp_ifp, m));
1514 static struct mbuf *
1515 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1517 struct ifnet *ifp = sc->sc_ifp;
1519 /* Just pass in the packet to our lagg device */
1520 m->m_pkthdr.rcvif = ifp;
1530 lagg_fail_attach(struct lagg_softc *sc)
1532 sc->sc_detach = lagg_fail_detach;
1533 sc->sc_start = lagg_fail_start;
1534 sc->sc_input = lagg_fail_input;
1535 sc->sc_port_create = NULL;
1536 sc->sc_port_destroy = NULL;
1542 lagg_fail_detach(struct lagg_softc *sc)
1548 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1550 struct lagg_port *lp;
1552 /* Use the master port if active or the next available port */
1553 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1559 return (lagg_enqueue(lp->lp_ifp, m));
1562 static struct mbuf *
1563 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1565 struct ifnet *ifp = sc->sc_ifp;
1566 struct lagg_port *tmp_tp;
1568 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1569 m->m_pkthdr.rcvif = ifp;
1573 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1574 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1576 * If tmp_tp is null, we've recieved a packet when all
1577 * our links are down. Weird, but process it anyways.
1579 if ((tmp_tp == NULL || tmp_tp == lp)) {
1580 m->m_pkthdr.rcvif = ifp;
1594 lagg_lb_attach(struct lagg_softc *sc)
1596 struct lagg_port *lp;
1599 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1600 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1603 sc->sc_detach = lagg_lb_detach;
1604 sc->sc_start = lagg_lb_start;
1605 sc->sc_input = lagg_lb_input;
1606 sc->sc_port_create = lagg_lb_port_create;
1607 sc->sc_port_destroy = lagg_lb_port_destroy;
1608 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1610 lb->lb_key = arc4random();
1611 sc->sc_psc = (caddr_t)lb;
1613 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1614 lagg_lb_port_create(lp);
1620 lagg_lb_detach(struct lagg_softc *sc)
1622 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1629 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1631 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1632 struct lagg_port *lp_next;
1635 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1636 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1639 if (i >= LAGG_MAX_PORTS)
1641 if (sc->sc_ifflags & IFF_DEBUG)
1642 printf("%s: port %s at index %d\n",
1643 sc->sc_ifname, lp_next->lp_ifname, i);
1644 lb->lb_ports[i++] = lp_next;
1651 lagg_lb_port_create(struct lagg_port *lp)
1653 struct lagg_softc *sc = lp->lp_softc;
1654 return (lagg_lb_porttable(sc, NULL));
1658 lagg_lb_port_destroy(struct lagg_port *lp)
1660 struct lagg_softc *sc = lp->lp_softc;
1661 lagg_lb_porttable(sc, lp);
1665 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1667 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1668 struct lagg_port *lp = NULL;
1671 if (m->m_flags & M_FLOWID)
1672 p = m->m_pkthdr.flowid;
1674 p = lagg_hashmbuf(m, lb->lb_key);
1676 lp = lb->lb_ports[p];
1679 * Check the port's link state. This will return the next active
1680 * port if the link is down or the port is NULL.
1682 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1688 return (lagg_enqueue(lp->lp_ifp, m));
1691 static struct mbuf *
1692 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1694 struct ifnet *ifp = sc->sc_ifp;
1696 /* Just pass in the packet to our lagg device */
1697 m->m_pkthdr.rcvif = ifp;
1707 lagg_lacp_attach(struct lagg_softc *sc)
1709 struct lagg_port *lp;
1712 sc->sc_detach = lagg_lacp_detach;
1713 sc->sc_port_create = lacp_port_create;
1714 sc->sc_port_destroy = lacp_port_destroy;
1715 sc->sc_linkstate = lacp_linkstate;
1716 sc->sc_start = lagg_lacp_start;
1717 sc->sc_input = lagg_lacp_input;
1718 sc->sc_init = lacp_init;
1719 sc->sc_stop = lacp_stop;
1720 sc->sc_lladdr = lagg_lacp_lladdr;
1721 sc->sc_req = lacp_req;
1722 sc->sc_portreq = lacp_portreq;
1724 error = lacp_attach(sc);
1728 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1729 lacp_port_create(lp);
1735 lagg_lacp_detach(struct lagg_softc *sc)
1737 struct lagg_port *lp;
1740 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1741 lacp_port_destroy(lp);
1743 /* unlocking is safe here */
1745 error = lacp_detach(sc);
1752 lagg_lacp_lladdr(struct lagg_softc *sc)
1754 struct lagg_port *lp;
1756 /* purge all the lacp ports */
1757 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1758 lacp_port_destroy(lp);
1760 /* add them back in */
1761 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1762 lacp_port_create(lp);
1766 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1768 struct lagg_port *lp;
1770 lp = lacp_select_tx_port(sc, m);
1777 return (lagg_enqueue(lp->lp_ifp, m));
1780 static struct mbuf *
1781 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1783 struct ifnet *ifp = sc->sc_ifp;
1784 struct ether_header *eh;
1787 eh = mtod(m, struct ether_header *);
1788 etype = ntohs(eh->ether_type);
1790 /* Tap off LACP control messages */
1791 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
1792 m = lacp_input(lp, m);
1798 * If the port is not collecting or not in the active aggregator then
1801 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1806 m->m_pkthdr.rcvif = ifp;