1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
24 #include "opt_inet6.h"
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
36 #include <sys/systm.h>
40 #include <sys/rwlock.h>
41 #include <sys/taskqueue.h>
43 #include <net/ethernet.h>
45 #include <net/if_clone.h>
46 #include <net/if_arp.h>
47 #include <net/if_dl.h>
48 #include <net/if_llc.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/if_ether.h>
58 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
65 #include <net/if_vlan_var.h>
66 #include <net/if_lagg.h>
67 #include <net/ieee8023ad_lacp.h>
69 /* Special flags we should propagate to the lagg ports. */
72 int (*func)(struct ifnet *, int);
74 {IFF_PROMISC, ifpromisc},
75 {IFF_ALLMULTI, if_allmulti},
79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
80 static struct mtx lagg_list_mtx;
81 eventhandler_tag lagg_detach_cookie = NULL;
83 static int lagg_clone_create(struct if_clone *, int, caddr_t);
84 static void lagg_clone_destroy(struct ifnet *);
85 static void lagg_lladdr(struct lagg_softc *, uint8_t *);
86 static void lagg_capabilities(struct lagg_softc *);
87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
88 static void lagg_port_setlladdr(void *, int);
89 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
90 static int lagg_port_destroy(struct lagg_port *, int);
91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
92 static void lagg_linkstate(struct lagg_softc *);
93 static void lagg_port_state(struct ifnet *, int);
94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
95 static int lagg_port_output(struct ifnet *, struct mbuf *,
96 struct sockaddr *, struct route *);
97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
98 static int lagg_port_checkstacking(struct lagg_softc *);
99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
100 static void lagg_init(void *);
101 static void lagg_stop(struct lagg_softc *);
102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
103 static int lagg_ether_setmulti(struct lagg_softc *);
104 static int lagg_ether_cmdmulti(struct lagg_port *, int);
105 static int lagg_setflag(struct lagg_port *, int, int,
106 int (*func)(struct ifnet *, int));
107 static int lagg_setflags(struct lagg_port *, int status);
108 static void lagg_start(struct ifnet *);
109 static int lagg_media_change(struct ifnet *);
110 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
111 static struct lagg_port *lagg_link_active(struct lagg_softc *,
113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
115 IFC_SIMPLE_DECLARE(lagg, 0);
117 /* Simple round robin */
118 static int lagg_rr_attach(struct lagg_softc *);
119 static int lagg_rr_detach(struct lagg_softc *);
120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
124 /* Active failover */
125 static int lagg_fail_attach(struct lagg_softc *);
126 static int lagg_fail_detach(struct lagg_softc *);
127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
132 static int lagg_lb_attach(struct lagg_softc *);
133 static int lagg_lb_detach(struct lagg_softc *);
134 static int lagg_lb_port_create(struct lagg_port *);
135 static void lagg_lb_port_destroy(struct lagg_port *);
136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
142 static int lagg_lacp_attach(struct lagg_softc *);
143 static int lagg_lacp_detach(struct lagg_softc *);
144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
147 static void lagg_lacp_lladdr(struct lagg_softc *);
149 /* lagg protocol table */
150 static const struct {
152 int (*ti_attach)(struct lagg_softc *);
154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
155 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
158 { LAGG_PROTO_LACP, lagg_lacp_attach },
159 { LAGG_PROTO_NONE, NULL }
163 lagg_modevent(module_t mod, int type, void *data)
168 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
169 SLIST_INIT(&lagg_list);
170 if_clone_attach(&lagg_cloner);
171 lagg_input_p = lagg_input;
172 lagg_linkstate_p = lagg_port_state;
173 lagg_detach_cookie = EVENTHANDLER_REGISTER(
174 ifnet_departure_event, lagg_port_ifdetach, NULL,
175 EVENTHANDLER_PRI_ANY);
178 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
180 if_clone_detach(&lagg_cloner);
182 lagg_linkstate_p = NULL;
183 mtx_destroy(&lagg_list_mtx);
191 static moduledata_t lagg_mod = {
197 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
200 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
202 struct lagg_softc *sc;
205 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
207 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
208 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
214 sc->sc_proto = LAGG_PROTO_NONE;
215 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
216 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
217 sc->sc_proto = lagg_protos[i].ti_proto;
218 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
219 if_free_type(ifp, IFT_ETHER);
227 SLIST_INIT(&sc->sc_ports);
228 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
230 /* Initialise pseudo media types */
231 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
233 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
234 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
236 if_initname(ifp, ifc->ifc_name, unit);
237 ifp->if_type = IFT_ETHER;
239 ifp->if_start = lagg_start;
240 ifp->if_init = lagg_init;
241 ifp->if_ioctl = lagg_ioctl;
242 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
244 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
245 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
246 IFQ_SET_READY(&ifp->if_snd);
249 * Attach as an ordinary ethernet device, childs will be attached
250 * as special device IFT_IEEE8023ADLAG.
252 ether_ifattach(ifp, eaddr);
254 /* Insert into the global list of laggs */
255 mtx_lock(&lagg_list_mtx);
256 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
257 mtx_unlock(&lagg_list_mtx);
263 lagg_clone_destroy(struct ifnet *ifp)
265 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
266 struct lagg_port *lp;
271 ifp->if_flags &= ~IFF_UP;
273 /* Shutdown and remove lagg ports */
274 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
275 lagg_port_destroy(lp, 1);
276 /* Unhook the aggregation protocol */
277 (*sc->sc_detach)(sc);
281 ifmedia_removeall(&sc->sc_media);
283 if_free_type(ifp, IFT_ETHER);
285 mtx_lock(&lagg_list_mtx);
286 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
287 mtx_unlock(&lagg_list_mtx);
289 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
290 LAGG_LOCK_DESTROY(sc);
295 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
297 struct ifnet *ifp = sc->sc_ifp;
299 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
302 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
303 /* Let the protocol know the MAC has changed */
304 if (sc->sc_lladdr != NULL)
305 (*sc->sc_lladdr)(sc);
309 lagg_capabilities(struct lagg_softc *sc)
311 struct lagg_port *lp;
312 int cap = ~0, ena = ~0;
315 LAGG_WLOCK_ASSERT(sc);
317 /* Get capabilities from the lagg ports */
318 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
319 cap &= lp->lp_ifp->if_capabilities;
320 ena &= lp->lp_ifp->if_capenable;
321 hwa &= lp->lp_ifp->if_hwassist;
323 cap = (cap == ~0 ? 0 : cap);
324 ena = (ena == ~0 ? 0 : ena);
325 hwa = (hwa == ~0 ? 0 : hwa);
327 if (sc->sc_ifp->if_capabilities != cap ||
328 sc->sc_ifp->if_capenable != ena ||
329 sc->sc_ifp->if_hwassist != hwa) {
330 sc->sc_ifp->if_capabilities = cap;
331 sc->sc_ifp->if_capenable = ena;
332 sc->sc_ifp->if_hwassist = hwa;
333 getmicrotime(&sc->sc_ifp->if_lastchange);
335 if (sc->sc_ifflags & IFF_DEBUG)
336 if_printf(sc->sc_ifp,
337 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
342 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
344 struct lagg_softc *sc = lp->lp_softc;
345 struct ifnet *ifp = lp->lp_ifp;
346 struct lagg_llq *llq;
349 LAGG_WLOCK_ASSERT(sc);
351 if (lp->lp_detaching ||
352 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
355 /* Check to make sure its not already queued to be changed */
356 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
357 if (llq->llq_ifp == ifp) {
364 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
365 if (llq == NULL) /* XXX what to do */
369 /* Update the lladdr even if pending, it may have changed */
371 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
374 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
376 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
380 * Set the interface MAC address from a taskqueue to avoid a LOR.
383 lagg_port_setlladdr(void *arg, int pending)
385 struct lagg_softc *sc = (struct lagg_softc *)arg;
386 struct lagg_llq *llq, *head;
390 /* Grab a local reference of the queue and remove it from the softc */
392 head = SLIST_FIRST(&sc->sc_llq_head);
393 SLIST_FIRST(&sc->sc_llq_head) = NULL;
397 * Traverse the queue and set the lladdr on each ifp. It is safe to do
398 * unlocked as we have the only reference to it.
400 for (llq = head; llq != NULL; llq = head) {
403 /* Set the link layer address */
404 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
406 printf("%s: setlladdr failed on %s\n", __func__,
409 head = SLIST_NEXT(llq, llq_entries);
415 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
417 struct lagg_softc *sc_ptr;
418 struct lagg_port *lp;
421 LAGG_WLOCK_ASSERT(sc);
423 /* Limit the maximal number of lagg ports */
424 if (sc->sc_count >= LAGG_MAX_PORTS)
427 /* Check if port has already been associated to a lagg */
428 if (ifp->if_lagg != NULL)
431 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
432 if (ifp->if_type != IFT_ETHER)
433 return (EPROTONOSUPPORT);
435 /* Allow the first Ethernet member to define the MTU */
436 if (SLIST_EMPTY(&sc->sc_ports))
437 sc->sc_ifp->if_mtu = ifp->if_mtu;
438 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
439 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
444 if ((lp = malloc(sizeof(struct lagg_port),
445 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
448 /* Check if port is a stacked lagg */
449 mtx_lock(&lagg_list_mtx);
450 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
451 if (ifp == sc_ptr->sc_ifp) {
452 mtx_unlock(&lagg_list_mtx);
455 /* XXX disable stacking for the moment, its untested
456 lp->lp_flags |= LAGG_PORT_STACK;
457 if (lagg_port_checkstacking(sc_ptr) >=
459 mtx_unlock(&lagg_list_mtx);
466 mtx_unlock(&lagg_list_mtx);
468 /* Change the interface type */
469 lp->lp_iftype = ifp->if_type;
470 ifp->if_type = IFT_IEEE8023ADLAG;
472 lp->lp_ioctl = ifp->if_ioctl;
473 ifp->if_ioctl = lagg_port_ioctl;
474 lp->lp_output = ifp->if_output;
475 ifp->if_output = lagg_port_output;
480 /* Save port link layer address */
481 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
483 if (SLIST_EMPTY(&sc->sc_ports)) {
485 lagg_lladdr(sc, IF_LLADDR(ifp));
487 /* Update link layer address for this port */
488 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
491 /* Insert into the list of ports */
492 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
495 /* Update lagg capabilities */
496 lagg_capabilities(sc);
499 /* Add multicast addresses and interface flags to this port */
500 lagg_ether_cmdmulti(lp, 1);
501 lagg_setflags(lp, 1);
503 if (sc->sc_port_create != NULL)
504 error = (*sc->sc_port_create)(lp);
506 /* remove the port again, without calling sc_port_destroy */
507 lagg_port_destroy(lp, 0);
515 lagg_port_checkstacking(struct lagg_softc *sc)
517 struct lagg_softc *sc_ptr;
518 struct lagg_port *lp;
521 LAGG_WLOCK_ASSERT(sc);
523 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
524 if (lp->lp_flags & LAGG_PORT_STACK) {
525 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
526 m = MAX(m, lagg_port_checkstacking(sc_ptr));
534 lagg_port_destroy(struct lagg_port *lp, int runpd)
536 struct lagg_softc *sc = lp->lp_softc;
537 struct lagg_port *lp_ptr;
538 struct lagg_llq *llq;
539 struct ifnet *ifp = lp->lp_ifp;
541 LAGG_WLOCK_ASSERT(sc);
543 if (runpd && sc->sc_port_destroy != NULL)
544 (*sc->sc_port_destroy)(lp);
547 * Remove multicast addresses and interface flags from this port and
548 * reset the MAC address, skip if the interface is being detached.
550 if (!lp->lp_detaching) {
551 lagg_ether_cmdmulti(lp, 0);
552 lagg_setflags(lp, 0);
553 lagg_port_lladdr(lp, lp->lp_lladdr);
556 /* Restore interface */
557 ifp->if_type = lp->lp_iftype;
558 ifp->if_ioctl = lp->lp_ioctl;
559 ifp->if_output = lp->lp_output;
562 /* Finally, remove the port from the lagg */
563 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
566 /* Update the primary interface */
567 if (lp == sc->sc_primary) {
568 uint8_t lladdr[ETHER_ADDR_LEN];
570 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
571 bzero(&lladdr, ETHER_ADDR_LEN);
573 bcopy(lp_ptr->lp_lladdr,
574 lladdr, ETHER_ADDR_LEN);
576 lagg_lladdr(sc, lladdr);
577 sc->sc_primary = lp_ptr;
579 /* Update link layer address for each port */
580 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
581 lagg_port_lladdr(lp_ptr, lladdr);
584 /* Remove any pending lladdr changes from the queue */
585 if (lp->lp_detaching) {
586 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
587 if (llq->llq_ifp == ifp) {
588 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
591 break; /* Only appears once */
597 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
601 /* Update lagg capabilities */
602 lagg_capabilities(sc);
609 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
611 struct lagg_reqport *rp = (struct lagg_reqport *)data;
612 struct lagg_softc *sc;
613 struct lagg_port *lp = NULL;
616 /* Should be checked by the caller */
617 if (ifp->if_type != IFT_IEEE8023ADLAG ||
618 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
623 if (rp->rp_portname[0] == '\0' ||
624 ifunit(rp->rp_portname) != ifp) {
630 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
636 lagg_port2req(lp, rp);
641 if (lp->lp_ioctl == NULL) {
645 error = (*lp->lp_ioctl)(ifp, cmd, data);
649 /* Update lagg interface capabilities */
651 lagg_capabilities(sc);
656 /* Do not allow the MTU to be changed once joined */
667 if (lp->lp_ioctl != NULL)
668 return ((*lp->lp_ioctl)(ifp, cmd, data));
674 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
675 struct sockaddr *dst, struct route *ro)
677 struct lagg_port *lp = ifp->if_lagg;
678 struct ether_header *eh;
681 switch (dst->sa_family) {
682 case pseudo_AF_HDRCMPLT:
684 eh = (struct ether_header *)dst->sa_data;
685 type = eh->ether_type;
690 * Only allow ethernet types required to initiate or maintain the link,
691 * aggregated frames take a different path.
693 switch (ntohs(type)) {
694 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
695 return ((*lp->lp_output)(ifp, m, dst, ro));
698 /* drop any other frames */
704 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
706 struct lagg_port *lp;
707 struct lagg_softc *sc;
709 if ((lp = ifp->if_lagg) == NULL)
715 lp->lp_detaching = 1;
716 lagg_port_destroy(lp, 1);
721 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
723 struct lagg_softc *sc = lp->lp_softc;
725 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
726 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
727 rp->rp_prio = lp->lp_prio;
728 rp->rp_flags = lp->lp_flags;
729 if (sc->sc_portreq != NULL)
730 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
732 /* Add protocol specific flags */
733 switch (sc->sc_proto) {
734 case LAGG_PROTO_FAILOVER:
735 if (lp == sc->sc_primary)
736 rp->rp_flags |= LAGG_PORT_MASTER;
737 if (lp == lagg_link_active(sc, sc->sc_primary))
738 rp->rp_flags |= LAGG_PORT_ACTIVE;
741 case LAGG_PROTO_ROUNDROBIN:
742 case LAGG_PROTO_LOADBALANCE:
743 case LAGG_PROTO_ETHERCHANNEL:
744 if (LAGG_PORTACTIVE(lp))
745 rp->rp_flags |= LAGG_PORT_ACTIVE;
748 case LAGG_PROTO_LACP:
749 /* LACP has a different definition of active */
750 if (lacp_isactive(lp))
751 rp->rp_flags |= LAGG_PORT_ACTIVE;
752 if (lacp_iscollecting(lp))
753 rp->rp_flags |= LAGG_PORT_COLLECTING;
754 if (lacp_isdistributing(lp))
755 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
764 struct lagg_softc *sc = (struct lagg_softc *)xsc;
765 struct lagg_port *lp;
766 struct ifnet *ifp = sc->sc_ifp;
768 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
773 ifp->if_drv_flags |= IFF_DRV_RUNNING;
774 /* Update the port lladdrs */
775 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
776 lagg_port_lladdr(lp, IF_LLADDR(ifp));
778 if (sc->sc_init != NULL)
785 lagg_stop(struct lagg_softc *sc)
787 struct ifnet *ifp = sc->sc_ifp;
789 LAGG_WLOCK_ASSERT(sc);
791 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
794 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
796 if (sc->sc_stop != NULL)
801 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
803 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
804 struct lagg_reqall *ra = (struct lagg_reqall *)data;
805 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
806 struct ifreq *ifr = (struct ifreq *)data;
807 struct lagg_port *lp;
809 struct thread *td = curthread;
811 int count, buflen, len, error = 0;
813 bzero(&rpbuf, sizeof(rpbuf));
819 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
821 buflen = count * sizeof(struct lagg_reqport);
824 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
827 ra->ra_proto = sc->sc_proto;
828 if (sc->sc_req != NULL)
829 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
833 len = min(ra->ra_size, buflen);
834 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
835 if (len < sizeof(rpbuf))
838 lagg_port2req(lp, &rpbuf);
839 memcpy(buf, &rpbuf, sizeof(rpbuf));
841 buf += sizeof(rpbuf);
842 len -= sizeof(rpbuf);
845 ra->ra_ports = count;
846 ra->ra_size = count * sizeof(rpbuf);
847 error = copyout(outbuf, ra->ra_port, ra->ra_size);
848 free(outbuf, M_TEMP);
851 error = priv_check(td, PRIV_NET_LAGG);
854 if (ra->ra_proto >= LAGG_PROTO_MAX) {
855 error = EPROTONOSUPPORT;
858 if (sc->sc_proto != LAGG_PROTO_NONE) {
860 error = sc->sc_detach(sc);
861 /* Reset protocol and pointers */
862 sc->sc_proto = LAGG_PROTO_NONE;
863 sc->sc_detach = NULL;
866 sc->sc_port_create = NULL;
867 sc->sc_port_destroy = NULL;
868 sc->sc_linkstate = NULL;
871 sc->sc_lladdr = NULL;
873 sc->sc_portreq = NULL;
878 for (int i = 0; i < (sizeof(lagg_protos) /
879 sizeof(lagg_protos[0])); i++) {
880 if (lagg_protos[i].ti_proto == ra->ra_proto) {
881 if (sc->sc_ifflags & IFF_DEBUG)
882 printf("%s: using proto %u\n",
884 lagg_protos[i].ti_proto);
886 sc->sc_proto = lagg_protos[i].ti_proto;
887 if (sc->sc_proto != LAGG_PROTO_NONE)
888 error = lagg_protos[i].ti_attach(sc);
893 error = EPROTONOSUPPORT;
896 if (rp->rp_portname[0] == '\0' ||
897 (tpif = ifunit(rp->rp_portname)) == NULL) {
903 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
904 lp->lp_softc != sc) {
910 lagg_port2req(lp, rp);
914 error = priv_check(td, PRIV_NET_LAGG);
917 if (rp->rp_portname[0] == '\0' ||
918 (tpif = ifunit(rp->rp_portname)) == NULL) {
923 error = lagg_port_create(sc, tpif);
926 case SIOCSLAGGDELPORT:
927 error = priv_check(td, PRIV_NET_LAGG);
930 if (rp->rp_portname[0] == '\0' ||
931 (tpif = ifunit(rp->rp_portname)) == NULL) {
937 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
938 lp->lp_softc != sc) {
944 error = lagg_port_destroy(lp, 1);
948 /* Set flags on ports too */
950 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
951 lagg_setflags(lp, 1);
955 if (!(ifp->if_flags & IFF_UP) &&
956 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
958 * If interface is marked down and it is running,
959 * then stop and disable it.
964 } else if ((ifp->if_flags & IFF_UP) &&
965 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
967 * If interface is marked up and it is stopped, then
976 error = lagg_ether_setmulti(sc);
981 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
986 /* Do not allow the MTU or caps to be directly changed */
991 error = ether_ioctl(ifp, cmd, data);
998 lagg_ether_setmulti(struct lagg_softc *sc)
1000 struct lagg_port *lp;
1002 LAGG_WLOCK_ASSERT(sc);
1004 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1005 /* First, remove any existing filter entries. */
1006 lagg_ether_cmdmulti(lp, 0);
1007 /* copy all addresses from the lagg interface to the port */
1008 lagg_ether_cmdmulti(lp, 1);
1014 lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1016 struct lagg_softc *sc = lp->lp_softc;
1017 struct ifnet *ifp = lp->lp_ifp;
1018 struct ifnet *scifp = sc->sc_ifp;
1020 struct ifmultiaddr *ifma, *rifma = NULL;
1021 struct sockaddr_dl sdl;
1024 LAGG_WLOCK_ASSERT(sc);
1026 bzero((char *)&sdl, sizeof(sdl));
1027 sdl.sdl_len = sizeof(sdl);
1028 sdl.sdl_family = AF_LINK;
1029 sdl.sdl_type = IFT_ETHER;
1030 sdl.sdl_alen = ETHER_ADDR_LEN;
1031 sdl.sdl_index = ifp->if_index;
1034 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1035 if (ifma->ifma_addr->sa_family != AF_LINK)
1037 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1038 LLADDR(&sdl), ETHER_ADDR_LEN);
1040 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1043 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1046 mc->mc_ifma = rifma;
1047 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1050 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1051 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1052 if_delmulti_ifma(mc->mc_ifma);
1059 /* Handle a ref counted flag that should be set on the lagg port as well */
1061 lagg_setflag(struct lagg_port *lp, int flag, int status,
1062 int (*func)(struct ifnet *, int))
1064 struct lagg_softc *sc = lp->lp_softc;
1065 struct ifnet *scifp = sc->sc_ifp;
1066 struct ifnet *ifp = lp->lp_ifp;
1069 LAGG_WLOCK_ASSERT(sc);
1071 status = status ? (scifp->if_flags & flag) : 0;
1072 /* Now "status" contains the flag value or 0 */
1075 * See if recorded ports status is different from what
1076 * we want it to be. If it is, flip it. We record ports
1077 * status in lp_ifflags so that we won't clear ports flag
1078 * we haven't set. In fact, we don't clear or set ports
1079 * flags directly, but get or release references to them.
1080 * That's why we can be sure that recorded flags still are
1081 * in accord with actual ports flags.
1083 if (status != (lp->lp_ifflags & flag)) {
1084 error = (*func)(ifp, status);
1087 lp->lp_ifflags &= ~flag;
1088 lp->lp_ifflags |= status;
1094 * Handle IFF_* flags that require certain changes on the lagg port
1095 * if "status" is true, update ports flags respective to the lagg
1096 * if "status" is false, forcedly clear the flags set on port.
1099 lagg_setflags(struct lagg_port *lp, int status)
1103 for (i = 0; lagg_pflags[i].flag; i++) {
1104 error = lagg_setflag(lp, lagg_pflags[i].flag,
1105 status, lagg_pflags[i].func);
1113 lagg_start(struct ifnet *ifp)
1115 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1120 /* We need a Tx algorithm and at least one port */
1121 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1122 IF_DRAIN(&ifp->if_snd);
1127 for (;; error = 0) {
1128 IFQ_DEQUEUE(&ifp->if_snd, m);
1132 ETHER_BPF_MTAP(ifp, m);
1134 error = (*sc->sc_start)(sc, m);
1143 static struct mbuf *
1144 lagg_input(struct ifnet *ifp, struct mbuf *m)
1146 struct lagg_port *lp = ifp->if_lagg;
1147 struct lagg_softc *sc = lp->lp_softc;
1148 struct ifnet *scifp = sc->sc_ifp;
1150 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1151 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1152 sc->sc_proto == LAGG_PROTO_NONE) {
1158 ETHER_BPF_MTAP(scifp, m);
1160 m = (*sc->sc_input)(sc, lp, m);
1163 scifp->if_ipackets++;
1164 scifp->if_ibytes += m->m_pkthdr.len;
1166 if (scifp->if_flags & IFF_MONITOR) {
1177 lagg_media_change(struct ifnet *ifp)
1179 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1181 if (sc->sc_ifflags & IFF_DEBUG)
1182 printf("%s\n", __func__);
1189 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1191 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1192 struct lagg_port *lp;
1194 imr->ifm_status = IFM_AVALID;
1195 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1198 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1199 if (LAGG_PORTACTIVE(lp))
1200 imr->ifm_status |= IFM_ACTIVE;
1206 lagg_linkstate(struct lagg_softc *sc)
1208 struct lagg_port *lp;
1209 int new_link = LINK_STATE_DOWN;
1212 /* Our link is considered up if at least one of our ports is active */
1213 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1214 if (lp->lp_link_state == LINK_STATE_UP) {
1215 new_link = LINK_STATE_UP;
1219 if_link_state_change(sc->sc_ifp, new_link);
1221 /* Update if_baudrate to reflect the max possible speed */
1222 switch (sc->sc_proto) {
1223 case LAGG_PROTO_FAILOVER:
1224 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1225 sc->sc_primary->lp_ifp->if_baudrate : 0;
1227 case LAGG_PROTO_ROUNDROBIN:
1228 case LAGG_PROTO_LOADBALANCE:
1229 case LAGG_PROTO_ETHERCHANNEL:
1231 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1232 speed += lp->lp_ifp->if_baudrate;
1233 sc->sc_ifp->if_baudrate = speed;
1235 case LAGG_PROTO_LACP:
1236 /* LACP updates if_baudrate itself */
1242 lagg_port_state(struct ifnet *ifp, int state)
1244 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1245 struct lagg_softc *sc = NULL;
1254 if (sc->sc_linkstate != NULL)
1255 (*sc->sc_linkstate)(lp);
1260 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1262 struct lagg_port *lp_next, *rval = NULL;
1263 // int new_link = LINK_STATE_DOWN;
1265 LAGG_RLOCK_ASSERT(sc);
1267 * Search a port which reports an active link state.
1272 if (LAGG_PORTACTIVE(lp)) {
1276 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1277 LAGG_PORTACTIVE(lp_next)) {
1283 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1284 if (LAGG_PORTACTIVE(lp_next)) {
1293 * The IEEE 802.1D standard assumes that a lagg with
1294 * multiple ports is always full duplex. This is valid
1295 * for load sharing laggs and if at least two links
1296 * are active. Unfortunately, checking the latter would
1297 * be too expensive at this point.
1299 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1301 new_link = LINK_STATE_FULL_DUPLEX;
1303 new_link = rval->lp_link_state;
1311 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1313 if (m->m_pkthdr.len < (off + len)) {
1315 } else if (m->m_len < (off + len)) {
1316 m_copydata(m, off, len, buf);
1319 return (mtod(m, char *) + off);
1323 lagg_hashmbuf(struct mbuf *m, uint32_t key)
1328 struct ether_header *eh;
1329 struct ether_vlan_header vlanbuf;
1330 const struct ether_vlan_header *vlan;
1332 const struct ip *ip;
1336 const struct ip6_hdr *ip6;
1337 struct ip6_hdr ip6buf;
1344 eh = mtod(m, struct ether_header *);
1345 etype = ntohs(eh->ether_type);
1346 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
1347 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1349 /* Special handling for encapsulating VLAN frames */
1350 if (m->m_flags & M_VLANTAG) {
1351 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1352 sizeof(m->m_pkthdr.ether_vtag), p);
1353 } else if (etype == ETHERTYPE_VLAN) {
1354 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
1358 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1359 etype = ntohs(vlan->evl_proto);
1360 off += sizeof(*vlan) - sizeof(*eh);
1366 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
1370 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1371 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1375 case ETHERTYPE_IPV6:
1376 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
1380 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1381 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1382 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1383 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1392 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1395 return (ifp->if_transmit)(ifp, m);
1399 * Simple round robin aggregation
1403 lagg_rr_attach(struct lagg_softc *sc)
1405 sc->sc_detach = lagg_rr_detach;
1406 sc->sc_start = lagg_rr_start;
1407 sc->sc_input = lagg_rr_input;
1408 sc->sc_port_create = NULL;
1409 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1416 lagg_rr_detach(struct lagg_softc *sc)
1422 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1424 struct lagg_port *lp;
1427 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1429 lp = SLIST_FIRST(&sc->sc_ports);
1431 lp = SLIST_NEXT(lp, lp_entries);
1434 * Check the port's link state. This will return the next active
1435 * port if the link is down or the port is NULL.
1437 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1443 return (lagg_enqueue(lp->lp_ifp, m));
1446 static struct mbuf *
1447 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1449 struct ifnet *ifp = sc->sc_ifp;
1451 /* Just pass in the packet to our lagg device */
1452 m->m_pkthdr.rcvif = ifp;
1462 lagg_fail_attach(struct lagg_softc *sc)
1464 sc->sc_detach = lagg_fail_detach;
1465 sc->sc_start = lagg_fail_start;
1466 sc->sc_input = lagg_fail_input;
1467 sc->sc_port_create = NULL;
1468 sc->sc_port_destroy = NULL;
1474 lagg_fail_detach(struct lagg_softc *sc)
1480 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1482 struct lagg_port *lp;
1484 /* Use the master port if active or the next available port */
1485 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1491 return (lagg_enqueue(lp->lp_ifp, m));
1494 static struct mbuf *
1495 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1497 struct ifnet *ifp = sc->sc_ifp;
1498 struct lagg_port *tmp_tp;
1500 if (lp == sc->sc_primary) {
1501 m->m_pkthdr.rcvif = ifp;
1505 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1506 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1508 * If tmp_tp is null, we've recieved a packet when all
1509 * our links are down. Weird, but process it anyways.
1511 if ((tmp_tp == NULL || tmp_tp == lp)) {
1512 m->m_pkthdr.rcvif = ifp;
1526 lagg_lb_attach(struct lagg_softc *sc)
1528 struct lagg_port *lp;
1531 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1532 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1535 sc->sc_detach = lagg_lb_detach;
1536 sc->sc_start = lagg_lb_start;
1537 sc->sc_input = lagg_lb_input;
1538 sc->sc_port_create = lagg_lb_port_create;
1539 sc->sc_port_destroy = lagg_lb_port_destroy;
1540 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1542 lb->lb_key = arc4random();
1543 sc->sc_psc = (caddr_t)lb;
1545 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1546 lagg_lb_port_create(lp);
1552 lagg_lb_detach(struct lagg_softc *sc)
1554 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1561 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1563 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1564 struct lagg_port *lp_next;
1567 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1568 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1571 if (i >= LAGG_MAX_PORTS)
1573 if (sc->sc_ifflags & IFF_DEBUG)
1574 printf("%s: port %s at index %d\n",
1575 sc->sc_ifname, lp_next->lp_ifname, i);
1576 lb->lb_ports[i++] = lp_next;
1583 lagg_lb_port_create(struct lagg_port *lp)
1585 struct lagg_softc *sc = lp->lp_softc;
1586 return (lagg_lb_porttable(sc, NULL));
1590 lagg_lb_port_destroy(struct lagg_port *lp)
1592 struct lagg_softc *sc = lp->lp_softc;
1593 lagg_lb_porttable(sc, lp);
1597 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1599 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1600 struct lagg_port *lp = NULL;
1603 if (m->m_flags & M_FLOWID)
1604 p = m->m_pkthdr.flowid;
1606 p = lagg_hashmbuf(m, lb->lb_key);
1608 lp = lb->lb_ports[p];
1611 * Check the port's link state. This will return the next active
1612 * port if the link is down or the port is NULL.
1614 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1620 return (lagg_enqueue(lp->lp_ifp, m));
1623 static struct mbuf *
1624 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1626 struct ifnet *ifp = sc->sc_ifp;
1628 /* Just pass in the packet to our lagg device */
1629 m->m_pkthdr.rcvif = ifp;
1639 lagg_lacp_attach(struct lagg_softc *sc)
1641 struct lagg_port *lp;
1644 sc->sc_detach = lagg_lacp_detach;
1645 sc->sc_port_create = lacp_port_create;
1646 sc->sc_port_destroy = lacp_port_destroy;
1647 sc->sc_linkstate = lacp_linkstate;
1648 sc->sc_start = lagg_lacp_start;
1649 sc->sc_input = lagg_lacp_input;
1650 sc->sc_init = lacp_init;
1651 sc->sc_stop = lacp_stop;
1652 sc->sc_lladdr = lagg_lacp_lladdr;
1653 sc->sc_req = lacp_req;
1654 sc->sc_portreq = lacp_portreq;
1656 error = lacp_attach(sc);
1660 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1661 lacp_port_create(lp);
1667 lagg_lacp_detach(struct lagg_softc *sc)
1669 struct lagg_port *lp;
1672 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1673 lacp_port_destroy(lp);
1675 /* unlocking is safe here */
1677 error = lacp_detach(sc);
1684 lagg_lacp_lladdr(struct lagg_softc *sc)
1686 struct lagg_port *lp;
1688 /* purge all the lacp ports */
1689 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1690 lacp_port_destroy(lp);
1692 /* add them back in */
1693 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1694 lacp_port_create(lp);
1698 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1700 struct lagg_port *lp;
1702 lp = lacp_select_tx_port(sc, m);
1709 return (lagg_enqueue(lp->lp_ifp, m));
1712 static struct mbuf *
1713 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1715 struct ifnet *ifp = sc->sc_ifp;
1716 struct ether_header *eh;
1719 eh = mtod(m, struct ether_header *);
1720 etype = ntohs(eh->ether_type);
1722 /* Tap off LACP control messages */
1723 if (etype == ETHERTYPE_SLOW) {
1724 m = lacp_input(lp, m);
1730 * If the port is not collecting or not in the active aggregator then
1733 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1738 m->m_pkthdr.rcvif = ifp;