1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
24 #include "opt_inet6.h"
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
36 #include <sys/systm.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
43 #include <net/ethernet.h>
45 #include <net/if_clone.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_llc.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
65 #include <net/if_vlan_var.h>
66 #include <net/if_lagg.h>
67 #include <net/ieee8023ad_lacp.h>
69 /* Special flags we should propagate to the lagg ports. */
72 int (*func)(struct ifnet *, int);
74 {IFF_PROMISC, ifpromisc},
75 {IFF_ALLMULTI, if_allmulti},
79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
80 static struct mtx lagg_list_mtx;
81 eventhandler_tag lagg_detach_cookie = NULL;
83 static int lagg_clone_create(struct if_clone *, int, caddr_t);
84 static void lagg_clone_destroy(struct ifnet *);
85 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
86 static void lagg_capabilities(struct lagg_softc *);
87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
88 static void lagg_port_setlladdr(void *, int);
89 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
90 static int lagg_port_destroy(struct lagg_port *, int);
91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
92 static void lagg_linkstate(struct lagg_softc *);
93 static void lagg_port_state(struct ifnet *, int);
94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
95 static int lagg_port_output(struct ifnet *, struct mbuf *,
96 struct sockaddr *, struct route *);
97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
98 static int lagg_port_checkstacking(struct lagg_softc *);
99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
100 static void lagg_init(void *);
101 static void lagg_stop(struct lagg_softc *);
102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
103 static int lagg_ether_setmulti(struct lagg_softc *);
104 static int lagg_ether_cmdmulti(struct lagg_port *, int);
105 static int lagg_setflag(struct lagg_port *, int, int,
106 int (*func)(struct ifnet *, int));
107 static int lagg_setflags(struct lagg_port *, int status);
108 static void lagg_start(struct ifnet *);
109 static int lagg_media_change(struct ifnet *);
110 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
111 static struct lagg_port *lagg_link_active(struct lagg_softc *,
113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
115 IFC_SIMPLE_DECLARE(lagg, 0);
117 /* Simple round robin */
118 static int lagg_rr_attach(struct lagg_softc *);
119 static int lagg_rr_detach(struct lagg_softc *);
120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
124 /* Active failover */
125 static int lagg_fail_attach(struct lagg_softc *);
126 static int lagg_fail_detach(struct lagg_softc *);
127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
132 static int lagg_lb_attach(struct lagg_softc *);
133 static int lagg_lb_detach(struct lagg_softc *);
134 static int lagg_lb_port_create(struct lagg_port *);
135 static void lagg_lb_port_destroy(struct lagg_port *);
136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
142 static int lagg_lacp_attach(struct lagg_softc *);
143 static int lagg_lacp_detach(struct lagg_softc *);
144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
147 static void lagg_lacp_lladdr(struct lagg_softc *);
149 /* lagg protocol table */
150 static const struct {
152 int (*ti_attach)(struct lagg_softc *);
154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
155 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
158 { LAGG_PROTO_LACP, lagg_lacp_attach },
159 { LAGG_PROTO_NONE, NULL }
162 SYSCTL_DECL(_net_link);
163 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0, "Link Aggregation");
165 static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
166 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
167 &lagg_failover_rx_all, 0,
168 "Accept input from any interface in a failover lagg");
171 lagg_modevent(module_t mod, int type, void *data)
176 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
177 SLIST_INIT(&lagg_list);
178 if_clone_attach(&lagg_cloner);
179 lagg_input_p = lagg_input;
180 lagg_linkstate_p = lagg_port_state;
181 lagg_detach_cookie = EVENTHANDLER_REGISTER(
182 ifnet_departure_event, lagg_port_ifdetach, NULL,
183 EVENTHANDLER_PRI_ANY);
186 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
188 if_clone_detach(&lagg_cloner);
190 lagg_linkstate_p = NULL;
191 mtx_destroy(&lagg_list_mtx);
199 static moduledata_t lagg_mod = {
205 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
208 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
210 struct lagg_softc *sc;
213 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
215 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
216 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
222 sc->sc_proto = LAGG_PROTO_NONE;
223 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
224 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
225 sc->sc_proto = lagg_protos[i].ti_proto;
226 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
227 if_free_type(ifp, IFT_ETHER);
235 SLIST_INIT(&sc->sc_ports);
236 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
238 /* Initialise pseudo media types */
239 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
241 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
242 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
244 if_initname(ifp, ifc->ifc_name, unit);
245 ifp->if_type = IFT_ETHER;
247 ifp->if_start = lagg_start;
248 ifp->if_init = lagg_init;
249 ifp->if_ioctl = lagg_ioctl;
250 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
252 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
253 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
254 IFQ_SET_READY(&ifp->if_snd);
257 * Attach as an ordinary ethernet device, childs will be attached
258 * as special device IFT_IEEE8023ADLAG.
260 ether_ifattach(ifp, eaddr);
262 /* Insert into the global list of laggs */
263 mtx_lock(&lagg_list_mtx);
264 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
265 mtx_unlock(&lagg_list_mtx);
271 lagg_clone_destroy(struct ifnet *ifp)
273 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
274 struct lagg_port *lp;
279 ifp->if_flags &= ~IFF_UP;
281 /* Shutdown and remove lagg ports */
282 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
283 lagg_port_destroy(lp, 1);
284 /* Unhook the aggregation protocol */
285 (*sc->sc_detach)(sc);
289 ifmedia_removeall(&sc->sc_media);
291 if_free_type(ifp, IFT_ETHER);
293 mtx_lock(&lagg_list_mtx);
294 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
295 mtx_unlock(&lagg_list_mtx);
297 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
298 LAGG_LOCK_DESTROY(sc);
303 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
305 struct ifnet *ifp = sc->sc_ifp;
307 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
310 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
311 /* Let the protocol know the MAC has changed */
312 if (sc->sc_lladdr != NULL)
313 (*sc->sc_lladdr)(sc);
314 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
318 lagg_capabilities(struct lagg_softc *sc)
320 struct lagg_port *lp;
321 int cap = ~0, ena = ~0;
324 LAGG_WLOCK_ASSERT(sc);
326 /* Get capabilities from the lagg ports */
327 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
328 cap &= lp->lp_ifp->if_capabilities;
329 ena &= lp->lp_ifp->if_capenable;
330 hwa &= lp->lp_ifp->if_hwassist;
332 cap = (cap == ~0 ? 0 : cap);
333 ena = (ena == ~0 ? 0 : ena);
334 hwa = (hwa == ~0 ? 0 : hwa);
336 if (sc->sc_ifp->if_capabilities != cap ||
337 sc->sc_ifp->if_capenable != ena ||
338 sc->sc_ifp->if_hwassist != hwa) {
339 sc->sc_ifp->if_capabilities = cap;
340 sc->sc_ifp->if_capenable = ena;
341 sc->sc_ifp->if_hwassist = hwa;
342 getmicrotime(&sc->sc_ifp->if_lastchange);
344 if (sc->sc_ifflags & IFF_DEBUG)
345 if_printf(sc->sc_ifp,
346 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
351 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
353 struct lagg_softc *sc = lp->lp_softc;
354 struct ifnet *ifp = lp->lp_ifp;
355 struct lagg_llq *llq;
358 LAGG_WLOCK_ASSERT(sc);
360 if (lp->lp_detaching ||
361 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
364 /* Check to make sure its not already queued to be changed */
365 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
366 if (llq->llq_ifp == ifp) {
373 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
374 if (llq == NULL) /* XXX what to do */
378 /* Update the lladdr even if pending, it may have changed */
380 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
383 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
385 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
389 * Set the interface MAC address from a taskqueue to avoid a LOR.
392 lagg_port_setlladdr(void *arg, int pending)
394 struct lagg_softc *sc = (struct lagg_softc *)arg;
395 struct lagg_llq *llq, *head;
399 /* Grab a local reference of the queue and remove it from the softc */
401 head = SLIST_FIRST(&sc->sc_llq_head);
402 SLIST_FIRST(&sc->sc_llq_head) = NULL;
406 * Traverse the queue and set the lladdr on each ifp. It is safe to do
407 * unlocked as we have the only reference to it.
409 for (llq = head; llq != NULL; llq = head) {
412 /* Set the link layer address */
413 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
415 printf("%s: setlladdr failed on %s\n", __func__,
418 head = SLIST_NEXT(llq, llq_entries);
424 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
426 struct lagg_softc *sc_ptr;
427 struct lagg_port *lp;
430 LAGG_WLOCK_ASSERT(sc);
432 /* Limit the maximal number of lagg ports */
433 if (sc->sc_count >= LAGG_MAX_PORTS)
436 /* Check if port has already been associated to a lagg */
437 if (ifp->if_lagg != NULL)
440 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
441 if (ifp->if_type != IFT_ETHER)
442 return (EPROTONOSUPPORT);
444 /* Allow the first Ethernet member to define the MTU */
445 if (SLIST_EMPTY(&sc->sc_ports))
446 sc->sc_ifp->if_mtu = ifp->if_mtu;
447 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
448 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
453 if ((lp = malloc(sizeof(struct lagg_port),
454 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
457 /* Check if port is a stacked lagg */
458 mtx_lock(&lagg_list_mtx);
459 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
460 if (ifp == sc_ptr->sc_ifp) {
461 mtx_unlock(&lagg_list_mtx);
464 /* XXX disable stacking for the moment, its untested
465 lp->lp_flags |= LAGG_PORT_STACK;
466 if (lagg_port_checkstacking(sc_ptr) >=
468 mtx_unlock(&lagg_list_mtx);
475 mtx_unlock(&lagg_list_mtx);
477 /* Change the interface type */
478 lp->lp_iftype = ifp->if_type;
479 ifp->if_type = IFT_IEEE8023ADLAG;
481 lp->lp_ioctl = ifp->if_ioctl;
482 ifp->if_ioctl = lagg_port_ioctl;
483 lp->lp_output = ifp->if_output;
484 ifp->if_output = lagg_port_output;
489 /* Save port link layer address */
490 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
492 if (SLIST_EMPTY(&sc->sc_ports)) {
494 lagg_lladdr(sc, IF_LLADDR(ifp));
496 /* Update link layer address for this port */
497 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
500 /* Insert into the list of ports */
501 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
504 /* Update lagg capabilities */
505 lagg_capabilities(sc);
508 /* Add multicast addresses and interface flags to this port */
509 lagg_ether_cmdmulti(lp, 1);
510 lagg_setflags(lp, 1);
512 if (sc->sc_port_create != NULL)
513 error = (*sc->sc_port_create)(lp);
515 /* remove the port again, without calling sc_port_destroy */
516 lagg_port_destroy(lp, 0);
524 lagg_port_checkstacking(struct lagg_softc *sc)
526 struct lagg_softc *sc_ptr;
527 struct lagg_port *lp;
530 LAGG_WLOCK_ASSERT(sc);
532 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
533 if (lp->lp_flags & LAGG_PORT_STACK) {
534 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
535 m = MAX(m, lagg_port_checkstacking(sc_ptr));
543 lagg_port_destroy(struct lagg_port *lp, int runpd)
545 struct lagg_softc *sc = lp->lp_softc;
546 struct lagg_port *lp_ptr;
547 struct lagg_llq *llq;
548 struct ifnet *ifp = lp->lp_ifp;
550 LAGG_WLOCK_ASSERT(sc);
552 if (runpd && sc->sc_port_destroy != NULL)
553 (*sc->sc_port_destroy)(lp);
556 * Remove multicast addresses and interface flags from this port and
557 * reset the MAC address, skip if the interface is being detached.
559 if (!lp->lp_detaching) {
560 lagg_ether_cmdmulti(lp, 0);
561 lagg_setflags(lp, 0);
562 lagg_port_lladdr(lp, lp->lp_lladdr);
565 /* Restore interface */
566 ifp->if_type = lp->lp_iftype;
567 ifp->if_ioctl = lp->lp_ioctl;
568 ifp->if_output = lp->lp_output;
571 /* Finally, remove the port from the lagg */
572 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
575 /* Update the primary interface */
576 if (lp == sc->sc_primary) {
577 uint8_t lladdr[ETHER_ADDR_LEN];
579 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
580 bzero(&lladdr, ETHER_ADDR_LEN);
582 bcopy(lp_ptr->lp_lladdr,
583 lladdr, ETHER_ADDR_LEN);
585 lagg_lladdr(sc, lladdr);
586 sc->sc_primary = lp_ptr;
588 /* Update link layer address for each port */
589 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
590 lagg_port_lladdr(lp_ptr, lladdr);
593 /* Remove any pending lladdr changes from the queue */
594 if (lp->lp_detaching) {
595 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
596 if (llq->llq_ifp == ifp) {
597 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
600 break; /* Only appears once */
606 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
610 /* Update lagg capabilities */
611 lagg_capabilities(sc);
618 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
620 struct lagg_reqport *rp = (struct lagg_reqport *)data;
621 struct lagg_softc *sc;
622 struct lagg_port *lp = NULL;
625 /* Should be checked by the caller */
626 if (ifp->if_type != IFT_IEEE8023ADLAG ||
627 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
632 if (rp->rp_portname[0] == '\0' ||
633 ifunit(rp->rp_portname) != ifp) {
639 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
645 lagg_port2req(lp, rp);
650 if (lp->lp_ioctl == NULL) {
654 error = (*lp->lp_ioctl)(ifp, cmd, data);
658 /* Update lagg interface capabilities */
660 lagg_capabilities(sc);
665 /* Do not allow the MTU to be changed once joined */
676 if (lp->lp_ioctl != NULL)
677 return ((*lp->lp_ioctl)(ifp, cmd, data));
683 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
684 struct sockaddr *dst, struct route *ro)
686 struct lagg_port *lp = ifp->if_lagg;
687 struct ether_header *eh;
690 switch (dst->sa_family) {
691 case pseudo_AF_HDRCMPLT:
693 eh = (struct ether_header *)dst->sa_data;
694 type = eh->ether_type;
699 * Only allow ethernet types required to initiate or maintain the link,
700 * aggregated frames take a different path.
702 switch (ntohs(type)) {
703 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
704 return ((*lp->lp_output)(ifp, m, dst, ro));
707 /* drop any other frames */
713 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
715 struct lagg_port *lp;
716 struct lagg_softc *sc;
718 if ((lp = ifp->if_lagg) == NULL)
724 lp->lp_detaching = 1;
725 lagg_port_destroy(lp, 1);
730 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
732 struct lagg_softc *sc = lp->lp_softc;
734 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
735 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
736 rp->rp_prio = lp->lp_prio;
737 rp->rp_flags = lp->lp_flags;
738 if (sc->sc_portreq != NULL)
739 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
741 /* Add protocol specific flags */
742 switch (sc->sc_proto) {
743 case LAGG_PROTO_FAILOVER:
744 if (lp == sc->sc_primary)
745 rp->rp_flags |= LAGG_PORT_MASTER;
746 if (lp == lagg_link_active(sc, sc->sc_primary))
747 rp->rp_flags |= LAGG_PORT_ACTIVE;
750 case LAGG_PROTO_ROUNDROBIN:
751 case LAGG_PROTO_LOADBALANCE:
752 case LAGG_PROTO_ETHERCHANNEL:
753 if (LAGG_PORTACTIVE(lp))
754 rp->rp_flags |= LAGG_PORT_ACTIVE;
757 case LAGG_PROTO_LACP:
758 /* LACP has a different definition of active */
759 if (lacp_isactive(lp))
760 rp->rp_flags |= LAGG_PORT_ACTIVE;
761 if (lacp_iscollecting(lp))
762 rp->rp_flags |= LAGG_PORT_COLLECTING;
763 if (lacp_isdistributing(lp))
764 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
773 struct lagg_softc *sc = (struct lagg_softc *)xsc;
774 struct lagg_port *lp;
775 struct ifnet *ifp = sc->sc_ifp;
777 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
782 ifp->if_drv_flags |= IFF_DRV_RUNNING;
783 /* Update the port lladdrs */
784 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
785 lagg_port_lladdr(lp, IF_LLADDR(ifp));
787 if (sc->sc_init != NULL)
794 lagg_stop(struct lagg_softc *sc)
796 struct ifnet *ifp = sc->sc_ifp;
798 LAGG_WLOCK_ASSERT(sc);
800 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
803 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
805 if (sc->sc_stop != NULL)
810 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
812 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
813 struct lagg_reqall *ra = (struct lagg_reqall *)data;
814 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
815 struct ifreq *ifr = (struct ifreq *)data;
816 struct lagg_port *lp;
818 struct thread *td = curthread;
820 int count, buflen, len, error = 0;
822 bzero(&rpbuf, sizeof(rpbuf));
828 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
830 buflen = count * sizeof(struct lagg_reqport);
833 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
836 ra->ra_proto = sc->sc_proto;
837 if (sc->sc_req != NULL)
838 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
842 len = min(ra->ra_size, buflen);
843 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
844 if (len < sizeof(rpbuf))
847 lagg_port2req(lp, &rpbuf);
848 memcpy(buf, &rpbuf, sizeof(rpbuf));
850 buf += sizeof(rpbuf);
851 len -= sizeof(rpbuf);
854 ra->ra_ports = count;
855 ra->ra_size = count * sizeof(rpbuf);
856 error = copyout(outbuf, ra->ra_port, ra->ra_size);
857 free(outbuf, M_TEMP);
860 error = priv_check(td, PRIV_NET_LAGG);
863 if (ra->ra_proto >= LAGG_PROTO_MAX) {
864 error = EPROTONOSUPPORT;
867 if (sc->sc_proto != LAGG_PROTO_NONE) {
869 error = sc->sc_detach(sc);
870 /* Reset protocol and pointers */
871 sc->sc_proto = LAGG_PROTO_NONE;
872 sc->sc_detach = NULL;
875 sc->sc_port_create = NULL;
876 sc->sc_port_destroy = NULL;
877 sc->sc_linkstate = NULL;
880 sc->sc_lladdr = NULL;
882 sc->sc_portreq = NULL;
887 for (int i = 0; i < (sizeof(lagg_protos) /
888 sizeof(lagg_protos[0])); i++) {
889 if (lagg_protos[i].ti_proto == ra->ra_proto) {
890 if (sc->sc_ifflags & IFF_DEBUG)
891 printf("%s: using proto %u\n",
893 lagg_protos[i].ti_proto);
895 sc->sc_proto = lagg_protos[i].ti_proto;
896 if (sc->sc_proto != LAGG_PROTO_NONE)
897 error = lagg_protos[i].ti_attach(sc);
902 error = EPROTONOSUPPORT;
905 if (rp->rp_portname[0] == '\0' ||
906 (tpif = ifunit(rp->rp_portname)) == NULL) {
912 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
913 lp->lp_softc != sc) {
919 lagg_port2req(lp, rp);
923 error = priv_check(td, PRIV_NET_LAGG);
926 if (rp->rp_portname[0] == '\0' ||
927 (tpif = ifunit(rp->rp_portname)) == NULL) {
932 error = lagg_port_create(sc, tpif);
935 case SIOCSLAGGDELPORT:
936 error = priv_check(td, PRIV_NET_LAGG);
939 if (rp->rp_portname[0] == '\0' ||
940 (tpif = ifunit(rp->rp_portname)) == NULL) {
946 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
947 lp->lp_softc != sc) {
953 error = lagg_port_destroy(lp, 1);
957 /* Set flags on ports too */
959 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
960 lagg_setflags(lp, 1);
964 if (!(ifp->if_flags & IFF_UP) &&
965 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
967 * If interface is marked down and it is running,
968 * then stop and disable it.
973 } else if ((ifp->if_flags & IFF_UP) &&
974 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
976 * If interface is marked up and it is stopped, then
985 error = lagg_ether_setmulti(sc);
990 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
995 /* Do not allow the MTU or caps to be directly changed */
1000 error = ether_ioctl(ifp, cmd, data);
1007 lagg_ether_setmulti(struct lagg_softc *sc)
1009 struct lagg_port *lp;
1011 LAGG_WLOCK_ASSERT(sc);
1013 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1014 /* First, remove any existing filter entries. */
1015 lagg_ether_cmdmulti(lp, 0);
1016 /* copy all addresses from the lagg interface to the port */
1017 lagg_ether_cmdmulti(lp, 1);
1023 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1025 struct lagg_softc *sc = lp->lp_softc;
1026 struct ifnet *ifp = lp->lp_ifp;
1027 struct ifnet *scifp = sc->sc_ifp;
1029 struct ifmultiaddr *ifma, *rifma = NULL;
1030 struct sockaddr_dl sdl;
1033 LAGG_WLOCK_ASSERT(sc);
1035 bzero((char *)&sdl, sizeof(sdl));
1036 sdl.sdl_len = sizeof(sdl);
1037 sdl.sdl_family = AF_LINK;
1038 sdl.sdl_type = IFT_ETHER;
1039 sdl.sdl_alen = ETHER_ADDR_LEN;
1040 sdl.sdl_index = ifp->if_index;
1043 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1044 if (ifma->ifma_addr->sa_family != AF_LINK)
1046 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1047 LLADDR(&sdl), ETHER_ADDR_LEN);
1049 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1052 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1055 mc->mc_ifma = rifma;
1056 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1059 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1060 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1061 if_delmulti_ifma(mc->mc_ifma);
1068 /* Handle a ref counted flag that should be set on the lagg port as well */
1070 lagg_setflag(struct lagg_port *lp, int flag, int status,
1071 int (*func)(struct ifnet *, int))
1073 struct lagg_softc *sc = lp->lp_softc;
1074 struct ifnet *scifp = sc->sc_ifp;
1075 struct ifnet *ifp = lp->lp_ifp;
1078 LAGG_WLOCK_ASSERT(sc);
1080 status = status ? (scifp->if_flags & flag) : 0;
1081 /* Now "status" contains the flag value or 0 */
1084 * See if recorded ports status is different from what
1085 * we want it to be. If it is, flip it. We record ports
1086 * status in lp_ifflags so that we won't clear ports flag
1087 * we haven't set. In fact, we don't clear or set ports
1088 * flags directly, but get or release references to them.
1089 * That's why we can be sure that recorded flags still are
1090 * in accord with actual ports flags.
1092 if (status != (lp->lp_ifflags & flag)) {
1093 error = (*func)(ifp, status);
1096 lp->lp_ifflags &= ~flag;
1097 lp->lp_ifflags |= status;
1103 * Handle IFF_* flags that require certain changes on the lagg port
1104 * if "status" is true, update ports flags respective to the lagg
1105 * if "status" is false, forcedly clear the flags set on port.
1108 lagg_setflags(struct lagg_port *lp, int status)
1112 for (i = 0; lagg_pflags[i].flag; i++) {
1113 error = lagg_setflag(lp, lagg_pflags[i].flag,
1114 status, lagg_pflags[i].func);
1122 lagg_start(struct ifnet *ifp)
1124 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1129 /* We need a Tx algorithm and at least one port */
1130 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1131 IF_DRAIN(&ifp->if_snd);
1136 for (;; error = 0) {
1137 IFQ_DEQUEUE(&ifp->if_snd, m);
1141 ETHER_BPF_MTAP(ifp, m);
1143 error = (*sc->sc_start)(sc, m);
1152 static struct mbuf *
1153 lagg_input(struct ifnet *ifp, struct mbuf *m)
1155 struct lagg_port *lp = ifp->if_lagg;
1156 struct lagg_softc *sc = lp->lp_softc;
1157 struct ifnet *scifp = sc->sc_ifp;
1159 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1160 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1161 sc->sc_proto == LAGG_PROTO_NONE) {
1167 ETHER_BPF_MTAP(scifp, m);
1169 m = (*sc->sc_input)(sc, lp, m);
1172 scifp->if_ipackets++;
1173 scifp->if_ibytes += m->m_pkthdr.len;
1175 if (scifp->if_flags & IFF_MONITOR) {
1186 lagg_media_change(struct ifnet *ifp)
1188 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1190 if (sc->sc_ifflags & IFF_DEBUG)
1191 printf("%s\n", __func__);
1198 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1200 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1201 struct lagg_port *lp;
1203 imr->ifm_status = IFM_AVALID;
1204 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1207 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1208 if (LAGG_PORTACTIVE(lp))
1209 imr->ifm_status |= IFM_ACTIVE;
1215 lagg_linkstate(struct lagg_softc *sc)
1217 struct lagg_port *lp;
1218 int new_link = LINK_STATE_DOWN;
1221 /* Our link is considered up if at least one of our ports is active */
1222 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1223 if (lp->lp_link_state == LINK_STATE_UP) {
1224 new_link = LINK_STATE_UP;
1228 if_link_state_change(sc->sc_ifp, new_link);
1230 /* Update if_baudrate to reflect the max possible speed */
1231 switch (sc->sc_proto) {
1232 case LAGG_PROTO_FAILOVER:
1233 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1234 sc->sc_primary->lp_ifp->if_baudrate : 0;
1236 case LAGG_PROTO_ROUNDROBIN:
1237 case LAGG_PROTO_LOADBALANCE:
1238 case LAGG_PROTO_ETHERCHANNEL:
1240 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1241 speed += lp->lp_ifp->if_baudrate;
1242 sc->sc_ifp->if_baudrate = speed;
1244 case LAGG_PROTO_LACP:
1245 /* LACP updates if_baudrate itself */
1251 lagg_port_state(struct ifnet *ifp, int state)
1253 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1254 struct lagg_softc *sc = NULL;
1263 if (sc->sc_linkstate != NULL)
1264 (*sc->sc_linkstate)(lp);
1269 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1271 struct lagg_port *lp_next, *rval = NULL;
1272 // int new_link = LINK_STATE_DOWN;
1274 LAGG_RLOCK_ASSERT(sc);
1276 * Search a port which reports an active link state.
1281 if (LAGG_PORTACTIVE(lp)) {
1285 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1286 LAGG_PORTACTIVE(lp_next)) {
1292 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1293 if (LAGG_PORTACTIVE(lp_next)) {
1302 * The IEEE 802.1D standard assumes that a lagg with
1303 * multiple ports is always full duplex. This is valid
1304 * for load sharing laggs and if at least two links
1305 * are active. Unfortunately, checking the latter would
1306 * be too expensive at this point.
1308 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1310 new_link = LINK_STATE_FULL_DUPLEX;
1312 new_link = rval->lp_link_state;
1320 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1322 if (m->m_pkthdr.len < (off + len)) {
1324 } else if (m->m_len < (off + len)) {
1325 m_copydata(m, off, len, buf);
1328 return (mtod(m, char *) + off);
1332 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1337 struct ether_header *eh;
1338 struct ether_vlan_header vlanbuf;
1339 const struct ether_vlan_header *vlan;
1341 const struct ip *ip;
1345 const struct ip6_hdr *ip6;
1346 struct ip6_hdr ip6buf;
1353 eh = mtod(m, struct ether_header *);
1354 etype = ntohs(eh->ether_type);
1355 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1356 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1358 /* Special handling for encapsulating VLAN frames */
1359 if (m->m_flags & M_VLANTAG) {
1360 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1361 sizeof(m->m_pkthdr.ether_vtag), p);
1362 } else if (etype == ETHERTYPE_VLAN) {
1363 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1367 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1368 etype = ntohs(vlan->evl_proto);
1369 off += sizeof(*vlan) - sizeof(*eh);
1375 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1379 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1380 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1384 case ETHERTYPE_IPV6:
1385 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1389 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1390 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1391 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1392 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1401 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1404 return (ifp->if_transmit)(ifp, m);
1408 * Simple round robin aggregation
1412 lagg_rr_attach(struct lagg_softc *sc)
1414 sc->sc_detach = lagg_rr_detach;
1415 sc->sc_start = lagg_rr_start;
1416 sc->sc_input = lagg_rr_input;
1417 sc->sc_port_create = NULL;
1418 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1425 lagg_rr_detach(struct lagg_softc *sc)
1431 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1433 struct lagg_port *lp;
1436 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1438 lp = SLIST_FIRST(&sc->sc_ports);
1440 lp = SLIST_NEXT(lp, lp_entries);
1443 * Check the port's link state. This will return the next active
1444 * port if the link is down or the port is NULL.
1446 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1452 return (lagg_enqueue(lp->lp_ifp, m));
1455 static struct mbuf *
1456 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1458 struct ifnet *ifp = sc->sc_ifp;
1460 /* Just pass in the packet to our lagg device */
1461 m->m_pkthdr.rcvif = ifp;
1471 lagg_fail_attach(struct lagg_softc *sc)
1473 sc->sc_detach = lagg_fail_detach;
1474 sc->sc_start = lagg_fail_start;
1475 sc->sc_input = lagg_fail_input;
1476 sc->sc_port_create = NULL;
1477 sc->sc_port_destroy = NULL;
1483 lagg_fail_detach(struct lagg_softc *sc)
1489 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1491 struct lagg_port *lp;
1493 /* Use the master port if active or the next available port */
1494 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1500 return (lagg_enqueue(lp->lp_ifp, m));
1503 static struct mbuf *
1504 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1506 struct ifnet *ifp = sc->sc_ifp;
1507 struct lagg_port *tmp_tp;
1509 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1510 m->m_pkthdr.rcvif = ifp;
1514 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1515 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1517 * If tmp_tp is null, we've recieved a packet when all
1518 * our links are down. Weird, but process it anyways.
1520 if ((tmp_tp == NULL || tmp_tp == lp)) {
1521 m->m_pkthdr.rcvif = ifp;
1535 lagg_lb_attach(struct lagg_softc *sc)
1537 struct lagg_port *lp;
1540 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1541 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1544 sc->sc_detach = lagg_lb_detach;
1545 sc->sc_start = lagg_lb_start;
1546 sc->sc_input = lagg_lb_input;
1547 sc->sc_port_create = lagg_lb_port_create;
1548 sc->sc_port_destroy = lagg_lb_port_destroy;
1549 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1551 lb->lb_key = arc4random();
1552 sc->sc_psc = (caddr_t)lb;
1554 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1555 lagg_lb_port_create(lp);
1561 lagg_lb_detach(struct lagg_softc *sc)
1563 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1570 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1572 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1573 struct lagg_port *lp_next;
1576 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1577 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1580 if (i >= LAGG_MAX_PORTS)
1582 if (sc->sc_ifflags & IFF_DEBUG)
1583 printf("%s: port %s at index %d\n",
1584 sc->sc_ifname, lp_next->lp_ifname, i);
1585 lb->lb_ports[i++] = lp_next;
1592 lagg_lb_port_create(struct lagg_port *lp)
1594 struct lagg_softc *sc = lp->lp_softc;
1595 return (lagg_lb_porttable(sc, NULL));
1599 lagg_lb_port_destroy(struct lagg_port *lp)
1601 struct lagg_softc *sc = lp->lp_softc;
1602 lagg_lb_porttable(sc, lp);
1606 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1608 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1609 struct lagg_port *lp = NULL;
1612 if (m->m_flags & M_FLOWID)
1613 p = m->m_pkthdr.flowid;
1615 p = lagg_hashmbuf(m, lb->lb_key);
1617 lp = lb->lb_ports[p];
1620 * Check the port's link state. This will return the next active
1621 * port if the link is down or the port is NULL.
1623 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1629 return (lagg_enqueue(lp->lp_ifp, m));
1632 static struct mbuf *
1633 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1635 struct ifnet *ifp = sc->sc_ifp;
1637 /* Just pass in the packet to our lagg device */
1638 m->m_pkthdr.rcvif = ifp;
1648 lagg_lacp_attach(struct lagg_softc *sc)
1650 struct lagg_port *lp;
1653 sc->sc_detach = lagg_lacp_detach;
1654 sc->sc_port_create = lacp_port_create;
1655 sc->sc_port_destroy = lacp_port_destroy;
1656 sc->sc_linkstate = lacp_linkstate;
1657 sc->sc_start = lagg_lacp_start;
1658 sc->sc_input = lagg_lacp_input;
1659 sc->sc_init = lacp_init;
1660 sc->sc_stop = lacp_stop;
1661 sc->sc_lladdr = lagg_lacp_lladdr;
1662 sc->sc_req = lacp_req;
1663 sc->sc_portreq = lacp_portreq;
1665 error = lacp_attach(sc);
1669 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1670 lacp_port_create(lp);
1676 lagg_lacp_detach(struct lagg_softc *sc)
1678 struct lagg_port *lp;
1681 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1682 lacp_port_destroy(lp);
1684 /* unlocking is safe here */
1686 error = lacp_detach(sc);
1693 lagg_lacp_lladdr(struct lagg_softc *sc)
1695 struct lagg_port *lp;
1697 /* purge all the lacp ports */
1698 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1699 lacp_port_destroy(lp);
1701 /* add them back in */
1702 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1703 lacp_port_create(lp);
1707 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1709 struct lagg_port *lp;
1711 lp = lacp_select_tx_port(sc, m);
1718 return (lagg_enqueue(lp->lp_ifp, m));
1721 static struct mbuf *
1722 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1724 struct ifnet *ifp = sc->sc_ifp;
1725 struct ether_header *eh;
1728 eh = mtod(m, struct ether_header *);
1729 etype = ntohs(eh->ether_type);
1731 /* Tap off LACP control messages */
1732 if (etype == ETHERTYPE_SLOW) {
1733 m = lacp_input(lp, m);
1739 * If the port is not collecting or not in the active aggregator then
1742 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1747 m->m_pkthdr.rcvif = ifp;