1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */
4 * Copyright (c)2005 YAMAMOTO Takashi,
5 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_ratelimit.h"
35 #include <sys/param.h>
36 #include <sys/callout.h>
37 #include <sys/eventhandler.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h> /* hz */
42 #include <sys/socket.h> /* for net/if.h */
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <machine/stdarg.h>
47 #include <sys/rwlock.h>
48 #include <sys/taskqueue.h>
51 #include <net/if_var.h>
52 #include <net/if_dl.h>
53 #include <net/ethernet.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
57 #include <net/if_lagg.h>
58 #include <net/ieee8023ad_lacp.h>
61 * actor system priority and port priority.
62 * XXX should be configurable.
65 #define LACP_SYSTEM_PRIO 0x8000
66 #define LACP_PORT_PRIO 0x8000
68 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
69 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
71 static const struct tlv_template lacp_info_tlv_template[] = {
72 { LACP_TYPE_ACTORINFO,
73 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
74 { LACP_TYPE_PARTNERINFO,
75 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
76 { LACP_TYPE_COLLECTORINFO,
77 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
81 static const struct tlv_template marker_info_tlv_template[] = {
83 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
87 static const struct tlv_template marker_response_tlv_template[] = {
88 { MARKER_TYPE_RESPONSE,
89 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
93 typedef void (*lacp_timer_func_t)(struct lacp_port *);
95 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
96 static void lacp_fill_markerinfo(struct lacp_port *,
97 struct lacp_markerinfo *);
99 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *);
100 static void lacp_suppress_distributing(struct lacp_softc *,
101 struct lacp_aggregator *);
102 static void lacp_transit_expire(void *);
103 static void lacp_update_portmap(struct lacp_softc *);
104 static void lacp_select_active_aggregator(struct lacp_softc *);
105 static uint16_t lacp_compose_key(struct lacp_port *);
106 static int tlv_check(const void *, size_t, const struct tlvhdr *,
107 const struct tlv_template *, boolean_t);
108 static void lacp_tick(void *);
110 static void lacp_fill_aggregator_id(struct lacp_aggregator *,
111 const struct lacp_port *);
112 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
113 const struct lacp_peerinfo *);
114 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *,
115 const struct lacp_port *);
116 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
117 const struct lacp_peerinfo *);
119 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
121 static void lacp_aggregator_addref(struct lacp_softc *,
122 struct lacp_aggregator *);
123 static void lacp_aggregator_delref(struct lacp_softc *,
124 struct lacp_aggregator *);
126 /* receive machine */
128 static int lacp_pdu_input(struct lacp_port *, struct mbuf *);
129 static int lacp_marker_input(struct lacp_port *, struct mbuf *);
130 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
131 static void lacp_sm_rx_timer(struct lacp_port *);
132 static void lacp_sm_rx_set_expired(struct lacp_port *);
133 static void lacp_sm_rx_update_ntt(struct lacp_port *,
134 const struct lacpdu *);
135 static void lacp_sm_rx_record_pdu(struct lacp_port *,
136 const struct lacpdu *);
137 static void lacp_sm_rx_update_selected(struct lacp_port *,
138 const struct lacpdu *);
139 static void lacp_sm_rx_record_default(struct lacp_port *);
140 static void lacp_sm_rx_update_default_selected(struct lacp_port *);
141 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
142 const struct lacp_peerinfo *);
146 static void lacp_sm_mux(struct lacp_port *);
147 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
148 static void lacp_sm_mux_timer(struct lacp_port *);
150 /* periodic transmit machine */
152 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
153 static void lacp_sm_ptx_tx_schedule(struct lacp_port *);
154 static void lacp_sm_ptx_timer(struct lacp_port *);
156 /* transmit machine */
158 static void lacp_sm_tx(struct lacp_port *);
159 static void lacp_sm_assert_ntt(struct lacp_port *);
161 static void lacp_run_timers(struct lacp_port *);
162 static int lacp_compare_peerinfo(const struct lacp_peerinfo *,
163 const struct lacp_peerinfo *);
164 static int lacp_compare_systemid(const struct lacp_systemid *,
165 const struct lacp_systemid *);
166 static void lacp_port_enable(struct lacp_port *);
167 static void lacp_port_disable(struct lacp_port *);
168 static void lacp_select(struct lacp_port *);
169 static void lacp_unselect(struct lacp_port *);
170 static void lacp_disable_collecting(struct lacp_port *);
171 static void lacp_enable_collecting(struct lacp_port *);
172 static void lacp_disable_distributing(struct lacp_port *);
173 static void lacp_enable_distributing(struct lacp_port *);
174 static int lacp_xmit_lacpdu(struct lacp_port *);
175 static int lacp_xmit_marker(struct lacp_port *);
179 static void lacp_dump_lacpdu(const struct lacpdu *);
180 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
182 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
183 const struct lacp_peerinfo *, char *, size_t);
184 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
186 static const char *lacp_format_state(uint8_t, char *, size_t);
187 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
188 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
190 static const char *lacp_format_portid(const struct lacp_portid *, char *,
192 static void lacp_dprintf(const struct lacp_port *, const char *, ...)
193 __attribute__((__format__(__printf__, 2, 3)));
195 static VNET_DEFINE(int, lacp_debug);
196 #define V_lacp_debug VNET(lacp_debug)
197 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad");
198 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
199 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
201 static VNET_DEFINE(int, lacp_default_strict_mode) = 1;
202 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, CTLFLAG_RWTUN,
203 &VNET_NAME(lacp_default_strict_mode), 0,
204 "LACP strict protocol compliance default");
206 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
207 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
208 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
211 * partner administration variables.
212 * XXX should be configurable.
215 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
216 .lip_systemid = { .lsi_prio = 0xffff },
217 .lip_portid = { .lpi_prio = 0xffff },
218 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
219 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
222 static const struct lacp_peerinfo lacp_partner_admin_strict = {
223 .lip_systemid = { .lsi_prio = 0xffff },
224 .lip_portid = { .lpi_prio = 0xffff },
228 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
229 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
230 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
231 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
235 lacp_input(struct lagg_port *lgp, struct mbuf *m)
237 struct lacp_port *lp = LACP_PORT(lgp);
240 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
245 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
247 case SLOWPROTOCOLS_SUBTYPE_LACP:
248 lacp_pdu_input(lp, m);
251 case SLOWPROTOCOLS_SUBTYPE_MARKER:
252 lacp_marker_input(lp, m);
256 /* Not a subtype we are interested in */
261 * lacp_pdu_input: process lacpdu
264 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
266 struct lacp_softc *lsc = lp->lp_lsc;
270 if (m->m_pkthdr.len != sizeof(*du)) {
274 if ((m->m_flags & M_MCAST) == 0) {
278 if (m->m_len < sizeof(*du)) {
279 m = m_pullup(m, sizeof(*du));
285 du = mtod(m, struct lacpdu *);
287 if (memcmp(&du->ldu_eh.ether_dhost,
288 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
293 * ignore the version for compatibility with
294 * the future protocol revisions.
297 if (du->ldu_sph.sph_version != 1) {
303 * ignore tlv types for compatibility with
304 * the future protocol revisions.
306 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
307 lacp_info_tlv_template, FALSE)) {
311 if (V_lacp_debug > 0) {
312 lacp_dprintf(lp, "lacpdu receive\n");
313 lacp_dump_lacpdu(du);
316 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
317 LACP_TPRINTF((lp, "Dropping RX PDU\n"));
334 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
336 struct lagg_port *lgp = lp->lp_lagg;
337 struct lagg_softc *sc = lgp->lp_softc;
339 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
340 memcpy(&info->lip_systemid.lsi_mac,
341 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
342 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
343 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
344 info->lip_state = lp->lp_state;
348 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
350 struct ifnet *ifp = lp->lp_ifp;
352 /* Fill in the port index and system id (encoded as the MAC) */
353 info->mi_rq_port = htons(ifp->if_index);
354 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
355 info->mi_rq_xid = htonl(0);
359 lacp_xmit_lacpdu(struct lacp_port *lp)
361 struct lagg_port *lgp = lp->lp_lagg;
366 LACP_LOCK_ASSERT(lp->lp_lsc);
368 m = m_gethdr(M_NOWAIT, MT_DATA);
372 m->m_len = m->m_pkthdr.len = sizeof(*du);
374 du = mtod(m, struct lacpdu *);
375 memset(du, 0, sizeof(*du));
377 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
379 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
380 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
382 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
383 du->ldu_sph.sph_version = 1;
385 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
386 du->ldu_actor = lp->lp_actor;
388 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
389 sizeof(du->ldu_partner));
390 du->ldu_partner = lp->lp_partner;
392 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
393 sizeof(du->ldu_collector));
394 du->ldu_collector.lci_maxdelay = 0;
396 if (V_lacp_debug > 0) {
397 lacp_dprintf(lp, "lacpdu transmit\n");
398 lacp_dump_lacpdu(du);
401 m->m_flags |= M_MCAST;
404 * XXX should use higher priority queue.
405 * otherwise network congestion can break aggregation.
408 error = lagg_enqueue(lp->lp_ifp, m);
413 lacp_xmit_marker(struct lacp_port *lp)
415 struct lagg_port *lgp = lp->lp_lagg;
417 struct markerdu *mdu;
420 LACP_LOCK_ASSERT(lp->lp_lsc);
422 m = m_gethdr(M_NOWAIT, MT_DATA);
426 m->m_len = m->m_pkthdr.len = sizeof(*mdu);
428 mdu = mtod(m, struct markerdu *);
429 memset(mdu, 0, sizeof(*mdu));
431 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
433 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
434 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
436 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
437 mdu->mdu_sph.sph_version = 1;
439 /* Bump the transaction id and copy over the marker info */
440 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
441 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
442 mdu->mdu_info = lp->lp_marker;
444 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
445 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
446 ntohl(mdu->mdu_info.mi_rq_xid)));
448 m->m_flags |= M_MCAST;
449 error = lagg_enqueue(lp->lp_ifp, m);
454 lacp_linkstate(struct lagg_port *lgp)
456 struct lacp_port *lp = LACP_PORT(lgp);
457 struct lacp_softc *lsc = lp->lp_lsc;
458 struct ifnet *ifp = lgp->lp_ifp;
459 struct ifmediareq ifmr;
465 bzero((char *)&ifmr, sizeof(ifmr));
466 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
471 media = ifmr.ifm_active;
472 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
473 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
474 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
475 old_state = lp->lp_state;
476 old_key = lp->lp_key;
478 lp->lp_media = media;
480 * If the port is not an active full duplex Ethernet link then it can
483 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
484 ifp->if_link_state != LINK_STATE_UP) {
485 lacp_port_disable(lp);
487 lacp_port_enable(lp);
489 lp->lp_key = lacp_compose_key(lp);
491 if (old_state != lp->lp_state || old_key != lp->lp_key) {
492 LACP_DPRINTF((lp, "-> UNSELECTED\n"));
493 lp->lp_selected = LACP_UNSELECTED;
501 struct lacp_softc *lsc = arg;
502 struct lacp_port *lp;
504 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
505 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
508 CURVNET_SET(lp->lp_ifp->if_vnet);
514 lacp_sm_ptx_tx_schedule(lp);
517 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
521 lacp_port_create(struct lagg_port *lgp)
523 struct lagg_softc *sc = lgp->lp_softc;
524 struct lacp_softc *lsc = LACP_SOFTC(sc);
525 struct lacp_port *lp;
526 struct ifnet *ifp = lgp->lp_ifp;
527 struct sockaddr_dl sdl;
528 struct ifmultiaddr *rifma = NULL;
531 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
532 sdl.sdl_alen = ETHER_ADDR_LEN;
534 bcopy(ðermulticastaddr_slowprotocols,
535 LLADDR(&sdl), ETHER_ADDR_LEN);
536 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
538 printf("%s: ADDMULTI failed on %s\n", __func__,
539 lgp->lp_ifp->if_xname);
543 lp = malloc(sizeof(struct lacp_port),
544 M_DEVBUF, M_NOWAIT|M_ZERO);
555 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
557 lacp_fill_actorinfo(lp, &lp->lp_actor);
558 lacp_fill_markerinfo(lp, &lp->lp_marker);
559 lp->lp_state = LACP_STATE_ACTIVITY;
560 lp->lp_aggregator = NULL;
561 lacp_sm_rx_set_expired(lp);
569 lacp_port_destroy(struct lagg_port *lgp)
571 struct lacp_port *lp = LACP_PORT(lgp);
572 struct lacp_softc *lsc = lp->lp_lsc;
576 for (i = 0; i < LACP_NTIMER; i++) {
577 LACP_TIMER_DISARM(lp, i);
580 lacp_disable_collecting(lp);
581 lacp_disable_distributing(lp);
584 LIST_REMOVE(lp, lp_next);
587 /* The address may have already been removed by if_purgemaddrs() */
588 if (!lgp->lp_detaching)
589 if_delmulti_ifma(lp->lp_ifma);
595 lacp_req(struct lagg_softc *sc, void *data)
597 struct lacp_opreq *req = (struct lacp_opreq *)data;
598 struct lacp_softc *lsc = LACP_SOFTC(sc);
599 struct lacp_aggregator *la;
601 bzero(req, sizeof(struct lacp_opreq));
604 * If the LACP softc is NULL, return with the opreq structure full of
605 * zeros. It is normal for the softc to be NULL while the lagg is
611 la = lsc->lsc_active_aggregator;
614 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
615 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
617 req->actor_key = ntohs(la->la_actor.lip_key);
618 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
619 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
620 req->actor_state = la->la_actor.lip_state;
622 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
623 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
625 req->partner_key = ntohs(la->la_partner.lip_key);
626 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
627 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
628 req->partner_state = la->la_partner.lip_state;
634 lacp_portreq(struct lagg_port *lgp, void *data)
636 struct lacp_opreq *req = (struct lacp_opreq *)data;
637 struct lacp_port *lp = LACP_PORT(lgp);
638 struct lacp_softc *lsc = lp->lp_lsc;
641 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
642 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
644 req->actor_key = ntohs(lp->lp_actor.lip_key);
645 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
646 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
647 req->actor_state = lp->lp_actor.lip_state;
649 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
650 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
652 req->partner_key = ntohs(lp->lp_partner.lip_key);
653 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
654 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
655 req->partner_state = lp->lp_partner.lip_state;
660 lacp_disable_collecting(struct lacp_port *lp)
662 LACP_DPRINTF((lp, "collecting disabled\n"));
663 lp->lp_state &= ~LACP_STATE_COLLECTING;
667 lacp_enable_collecting(struct lacp_port *lp)
669 LACP_DPRINTF((lp, "collecting enabled\n"));
670 lp->lp_state |= LACP_STATE_COLLECTING;
674 lacp_disable_distributing(struct lacp_port *lp)
676 struct lacp_aggregator *la = lp->lp_aggregator;
677 struct lacp_softc *lsc = lp->lp_lsc;
678 struct lagg_softc *sc = lsc->lsc_softc;
679 char buf[LACP_LAGIDSTR_MAX+1];
681 LACP_LOCK_ASSERT(lsc);
683 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
687 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
688 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
689 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
691 LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
693 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
694 la->la_nports, la->la_nports - 1));
696 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
698 sc->sc_active = la->la_nports;
700 if (lsc->lsc_active_aggregator == la) {
701 lacp_suppress_distributing(lsc, la);
702 lacp_select_active_aggregator(lsc);
703 /* regenerate the port map, the active aggregator has changed */
704 lacp_update_portmap(lsc);
707 lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
711 lacp_enable_distributing(struct lacp_port *lp)
713 struct lacp_aggregator *la = lp->lp_aggregator;
714 struct lacp_softc *lsc = lp->lp_lsc;
715 struct lagg_softc *sc = lsc->lsc_softc;
716 char buf[LACP_LAGIDSTR_MAX+1];
718 LACP_LOCK_ASSERT(lsc);
720 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
724 LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
726 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
727 la->la_nports, la->la_nports + 1));
729 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
730 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
732 sc->sc_active = la->la_nports;
734 lp->lp_state |= LACP_STATE_DISTRIBUTING;
736 if (lsc->lsc_active_aggregator == la) {
737 lacp_suppress_distributing(lsc, la);
738 lacp_update_portmap(lsc);
740 /* try to become the active aggregator */
741 lacp_select_active_aggregator(lsc);
745 lacp_transit_expire(void *vp)
747 struct lacp_softc *lsc = vp;
749 LACP_LOCK_ASSERT(lsc);
751 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
755 lsc->lsc_suppress_distributing = FALSE;
759 lacp_attach(struct lagg_softc *sc)
761 struct lacp_softc *lsc;
763 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
768 lsc->lsc_hashkey = m_ether_tcpip_hash_init();
769 lsc->lsc_active_aggregator = NULL;
770 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
772 TAILQ_INIT(&lsc->lsc_aggregators);
773 LIST_INIT(&lsc->lsc_ports);
775 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
776 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
778 /* if the lagg is already up then do the same */
779 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
784 lacp_detach(void *psc)
786 struct lacp_softc *lsc = (struct lacp_softc *)psc;
788 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
789 ("aggregators still active"));
790 KASSERT(lsc->lsc_active_aggregator == NULL,
791 ("aggregator still attached"));
793 callout_drain(&lsc->lsc_transit_callout);
794 callout_drain(&lsc->lsc_callout);
796 LACP_LOCK_DESTROY(lsc);
801 lacp_init(struct lagg_softc *sc)
803 struct lacp_softc *lsc = LACP_SOFTC(sc);
806 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
811 lacp_stop(struct lagg_softc *sc)
813 struct lacp_softc *lsc = LACP_SOFTC(sc);
816 callout_stop(&lsc->lsc_transit_callout);
817 callout_stop(&lsc->lsc_callout);
822 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
824 struct lacp_softc *lsc = LACP_SOFTC(sc);
825 struct lacp_portmap *pm;
826 struct lacp_port *lp;
829 if (__predict_false(lsc->lsc_suppress_distributing)) {
830 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
834 pm = &lsc->lsc_pmap[lsc->lsc_activemap];
835 if (pm->pm_count == 0) {
836 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
840 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
841 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
842 hash = m->m_pkthdr.flowid >> sc->flowid_shift;
844 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
845 hash %= pm->pm_count;
846 lp = pm->pm_map[hash];
848 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
849 ("aggregated port is not distributing"));
851 return (lp->lp_lagg);
856 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid)
858 struct lacp_softc *lsc = LACP_SOFTC(sc);
859 struct lacp_portmap *pm;
860 struct lacp_port *lp;
863 if (__predict_false(lsc->lsc_suppress_distributing)) {
864 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
868 pm = &lsc->lsc_pmap[lsc->lsc_activemap];
869 if (pm->pm_count == 0) {
870 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
874 hash = flowid >> sc->flowid_shift;
875 hash %= pm->pm_count;
876 lp = pm->pm_map[hash];
878 return (lp->lp_lagg);
883 * lacp_suppress_distributing: drop transmit packets for a while
884 * to preserve packet ordering.
888 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
890 struct lacp_port *lp;
892 if (lsc->lsc_active_aggregator != la) {
898 lsc->lsc_suppress_distributing = TRUE;
900 /* send a marker frame down each port to verify the queues are empty */
901 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
902 lp->lp_flags |= LACP_PORT_MARK;
903 lacp_xmit_marker(lp);
906 /* set a timeout for the marker frames */
907 callout_reset(&lsc->lsc_transit_callout,
908 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
912 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
913 const struct lacp_peerinfo *b)
915 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
919 lacp_compare_systemid(const struct lacp_systemid *a,
920 const struct lacp_systemid *b)
922 return (memcmp(a, b, sizeof(*a)));
927 lacp_compare_portid(const struct lacp_portid *a,
928 const struct lacp_portid *b)
930 return (memcmp(a, b, sizeof(*a)));
935 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
937 struct lacp_port *lp;
940 lp = TAILQ_FIRST(&la->la_ports);
945 speed = ifmedia_baudrate(lp->lp_media);
946 speed *= la->la_nports;
948 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
949 lp->lp_media, la->la_nports));
956 * lacp_select_active_aggregator: select an aggregator to be used to transmit
957 * packets from lagg(4) interface.
961 lacp_select_active_aggregator(struct lacp_softc *lsc)
963 struct lacp_aggregator *la;
964 struct lacp_aggregator *best_la = NULL;
965 uint64_t best_speed = 0;
966 char buf[LACP_LAGIDSTR_MAX+1];
970 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
973 if (la->la_nports == 0) {
977 speed = lacp_aggregator_bandwidth(la);
978 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
979 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
980 speed, la->la_nports));
983 * This aggregator is chosen if the partner has a better
984 * system priority or, the total aggregated speed is higher
985 * or, it is already the chosen aggregator
987 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
988 LACP_SYS_PRI(best_la->la_partner)) ||
989 speed > best_speed ||
990 (speed == best_speed &&
991 la == lsc->lsc_active_aggregator)) {
997 KASSERT(best_la == NULL || best_la->la_nports > 0,
998 ("invalid aggregator refcnt"));
999 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1000 ("invalid aggregator list"));
1002 if (lsc->lsc_active_aggregator != best_la) {
1003 LACP_DPRINTF((NULL, "active aggregator changed\n"));
1004 LACP_DPRINTF((NULL, "old %s\n",
1005 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1006 buf, sizeof(buf))));
1008 LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1010 LACP_DPRINTF((NULL, "new %s\n",
1011 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1013 if (lsc->lsc_active_aggregator != best_la) {
1014 lsc->lsc_active_aggregator = best_la;
1015 lacp_update_portmap(lsc);
1017 lacp_suppress_distributing(lsc, best_la);
1023 * Updated the inactive portmap array with the new list of ports and
1027 lacp_update_portmap(struct lacp_softc *lsc)
1029 struct lagg_softc *sc = lsc->lsc_softc;
1030 struct lacp_aggregator *la;
1031 struct lacp_portmap *p;
1032 struct lacp_port *lp;
1037 newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1038 p = &lsc->lsc_pmap[newmap];
1039 la = lsc->lsc_active_aggregator;
1041 bzero(p, sizeof(struct lacp_portmap));
1043 if (la != NULL && la->la_nports > 0) {
1044 p->pm_count = la->la_nports;
1046 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q)
1047 p->pm_map[i++] = lp;
1048 KASSERT(i == p->pm_count, ("Invalid port count"));
1049 speed = lacp_aggregator_bandwidth(la);
1051 sc->sc_ifp->if_baudrate = speed;
1053 /* switch the active portmap over */
1054 atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1055 LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1057 lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1061 lacp_compose_key(struct lacp_port *lp)
1063 struct lagg_port *lgp = lp->lp_lagg;
1064 struct lagg_softc *sc = lgp->lp_softc;
1065 u_int media = lp->lp_media;
1068 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1071 * non-aggregatable links should have unique keys.
1073 * XXX this isn't really unique as if_index is 16 bit.
1076 /* bit 0..14: (some bits of) if_index of this port */
1077 key = lp->lp_ifp->if_index;
1081 u_int subtype = IFM_SUBTYPE(media);
1083 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1084 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1086 /* bit 0..4: IFM_SUBTYPE modulo speed */
1108 case IFM_1000_SGMII:
1109 case IFM_1000_CX_SGMII:
1115 case IFM_10G_TWINAX:
1116 case IFM_10G_TWINAX_LONG:
1167 /* bit 5..14: (some bits of) if_index of lagg device */
1168 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1171 return (htons(key));
1175 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1177 char buf[LACP_LAGIDSTR_MAX+1];
1179 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1181 lacp_format_lagid(&la->la_actor, &la->la_partner,
1183 la->la_refcnt, la->la_refcnt + 1));
1185 KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1187 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1191 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1193 char buf[LACP_LAGIDSTR_MAX+1];
1195 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1197 lacp_format_lagid(&la->la_actor, &la->la_partner,
1199 la->la_refcnt, la->la_refcnt - 1));
1201 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1203 if (la->la_refcnt > 0) {
1207 KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1208 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1210 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1216 * lacp_aggregator_get: allocate an aggregator.
1219 static struct lacp_aggregator *
1220 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1222 struct lacp_aggregator *la;
1224 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1228 TAILQ_INIT(&la->la_ports);
1230 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1237 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1241 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1243 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1244 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1246 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1250 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1251 const struct lacp_peerinfo *lpi_port)
1253 memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1254 lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1255 lpi_aggr->lip_key = lpi_port->lip_key;
1259 * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1263 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1264 const struct lacp_port *lp)
1266 if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1267 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1271 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1275 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1279 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1287 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1288 const struct lacp_peerinfo *b)
1290 if (memcmp(&a->lip_systemid, &b->lip_systemid,
1291 sizeof(a->lip_systemid))) {
1295 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1303 lacp_port_enable(struct lacp_port *lp)
1305 lp->lp_state |= LACP_STATE_AGGREGATION;
1309 lacp_port_disable(struct lacp_port *lp)
1311 lacp_set_mux(lp, LACP_MUX_DETACHED);
1313 lp->lp_state &= ~LACP_STATE_AGGREGATION;
1314 lp->lp_selected = LACP_UNSELECTED;
1315 lacp_sm_rx_record_default(lp);
1316 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1317 lp->lp_state &= ~LACP_STATE_EXPIRED;
1321 * lacp_select: select an aggregator. create one if necessary.
1324 lacp_select(struct lacp_port *lp)
1326 struct lacp_softc *lsc = lp->lp_lsc;
1327 struct lacp_aggregator *la;
1328 char buf[LACP_LAGIDSTR_MAX+1];
1330 if (lp->lp_aggregator) {
1334 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1335 ("timer_wait_while still active"));
1337 LACP_DPRINTF((lp, "port lagid=%s\n",
1338 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1339 buf, sizeof(buf))));
1341 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1342 if (lacp_aggregator_is_compatible(la, lp)) {
1348 la = lacp_aggregator_get(lsc, lp);
1350 LACP_DPRINTF((lp, "aggregator creation failed\n"));
1353 * will retry on the next tick.
1358 lacp_fill_aggregator_id(la, lp);
1359 LACP_DPRINTF((lp, "aggregator created\n"));
1361 LACP_DPRINTF((lp, "compatible aggregator found\n"));
1362 if (la->la_refcnt == LACP_MAX_PORTS)
1364 lacp_aggregator_addref(lsc, la);
1367 LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1368 lacp_format_lagid(&la->la_actor, &la->la_partner,
1369 buf, sizeof(buf))));
1371 lp->lp_aggregator = la;
1372 lp->lp_selected = LACP_SELECTED;
1376 * lacp_unselect: finish unselect/detach process.
1380 lacp_unselect(struct lacp_port *lp)
1382 struct lacp_softc *lsc = lp->lp_lsc;
1383 struct lacp_aggregator *la = lp->lp_aggregator;
1385 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1386 ("timer_wait_while still active"));
1392 lp->lp_aggregator = NULL;
1393 lacp_aggregator_delref(lsc, la);
1399 lacp_sm_mux(struct lacp_port *lp)
1401 struct lagg_port *lgp = lp->lp_lagg;
1402 struct lagg_softc *sc = lgp->lp_softc;
1403 enum lacp_mux_state new_state;
1405 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1406 boolean_t p_collecting =
1407 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1408 enum lacp_selected selected = lp->lp_selected;
1409 struct lacp_aggregator *la;
1411 if (V_lacp_debug > 1)
1412 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1413 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1414 lp->lp_mux_state, selected, p_sync, p_collecting);
1417 la = lp->lp_aggregator;
1418 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1419 ("MUX not detached"));
1420 new_state = lp->lp_mux_state;
1421 switch (lp->lp_mux_state) {
1422 case LACP_MUX_DETACHED:
1423 if (selected != LACP_UNSELECTED) {
1424 new_state = LACP_MUX_WAITING;
1427 case LACP_MUX_WAITING:
1428 KASSERT(la->la_pending > 0 ||
1429 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1430 ("timer_wait_while still active"));
1431 if (selected == LACP_SELECTED && la->la_pending == 0) {
1432 new_state = LACP_MUX_ATTACHED;
1433 } else if (selected == LACP_UNSELECTED) {
1434 new_state = LACP_MUX_DETACHED;
1437 case LACP_MUX_ATTACHED:
1438 if (selected == LACP_SELECTED && p_sync) {
1439 new_state = LACP_MUX_COLLECTING;
1440 } else if (selected != LACP_SELECTED) {
1441 new_state = LACP_MUX_DETACHED;
1444 case LACP_MUX_COLLECTING:
1445 if (selected == LACP_SELECTED && p_sync && p_collecting) {
1446 new_state = LACP_MUX_DISTRIBUTING;
1447 } else if (selected != LACP_SELECTED || !p_sync) {
1448 new_state = LACP_MUX_ATTACHED;
1451 case LACP_MUX_DISTRIBUTING:
1452 if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1453 new_state = LACP_MUX_COLLECTING;
1454 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1459 panic("%s: unknown state", __func__);
1462 if (lp->lp_mux_state == new_state) {
1466 lacp_set_mux(lp, new_state);
1471 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1473 struct lacp_aggregator *la = lp->lp_aggregator;
1475 if (lp->lp_mux_state == new_state) {
1479 switch (new_state) {
1480 case LACP_MUX_DETACHED:
1481 lp->lp_state &= ~LACP_STATE_SYNC;
1482 lacp_disable_distributing(lp);
1483 lacp_disable_collecting(lp);
1484 lacp_sm_assert_ntt(lp);
1486 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1487 KASSERT(la->la_pending > 0,
1488 ("timer_wait_while not active"));
1491 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1494 case LACP_MUX_WAITING:
1495 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1496 LACP_AGGREGATE_WAIT_TIME);
1499 case LACP_MUX_ATTACHED:
1500 lp->lp_state |= LACP_STATE_SYNC;
1501 lacp_disable_collecting(lp);
1502 lacp_sm_assert_ntt(lp);
1504 case LACP_MUX_COLLECTING:
1505 lacp_enable_collecting(lp);
1506 lacp_disable_distributing(lp);
1507 lacp_sm_assert_ntt(lp);
1509 case LACP_MUX_DISTRIBUTING:
1510 lacp_enable_distributing(lp);
1513 panic("%s: unknown state", __func__);
1516 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1518 lp->lp_mux_state = new_state;
1522 lacp_sm_mux_timer(struct lacp_port *lp)
1524 struct lacp_aggregator *la = lp->lp_aggregator;
1525 char buf[LACP_LAGIDSTR_MAX+1];
1527 KASSERT(la->la_pending > 0, ("no pending event"));
1529 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1530 lacp_format_lagid(&la->la_actor, &la->la_partner,
1532 la->la_pending, la->la_pending - 1));
1537 /* periodic transmit machine */
1540 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1542 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1543 LACP_STATE_TIMEOUT)) {
1547 LACP_DPRINTF((lp, "partner timeout changed\n"));
1550 * FAST_PERIODIC -> SLOW_PERIODIC
1552 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1554 * let lacp_sm_ptx_tx_schedule to update timeout.
1557 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1560 * if timeout has been shortened, assert NTT.
1563 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1564 lacp_sm_assert_ntt(lp);
1569 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1573 if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1574 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1580 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1584 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1588 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1589 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1591 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1595 lacp_sm_ptx_timer(struct lacp_port *lp)
1597 lacp_sm_assert_ntt(lp);
1601 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1606 * check LACP_DISABLED first
1609 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1614 * check loopback condition.
1617 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1618 &lp->lp_actor.lip_systemid)) {
1623 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1626 lacp_sm_rx_update_selected(lp, du);
1627 lacp_sm_rx_update_ntt(lp, du);
1628 lacp_sm_rx_record_pdu(lp, du);
1630 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1631 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1632 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1634 lp->lp_state &= ~LACP_STATE_EXPIRED;
1637 * kick transmit machine without waiting the next tick.
1644 lacp_sm_rx_set_expired(struct lacp_port *lp)
1646 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1647 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1648 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1649 lp->lp_state |= LACP_STATE_EXPIRED;
1653 lacp_sm_rx_timer(struct lacp_port *lp)
1655 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1656 /* CURRENT -> EXPIRED */
1657 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1658 lacp_sm_rx_set_expired(lp);
1660 /* EXPIRED -> DEFAULTED */
1661 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1662 lacp_sm_rx_update_default_selected(lp);
1663 lacp_sm_rx_record_default(lp);
1664 lp->lp_state &= ~LACP_STATE_EXPIRED;
1669 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1673 char buf[LACP_STATESTR_MAX+1];
1677 oldpstate = lp->lp_partner.lip_state;
1679 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1680 || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1681 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1683 lp->lp_partner = du->ldu_actor;
1685 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1686 LACP_STATE_AGGREGATION) &&
1687 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1688 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1691 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1694 lp->lp_state &= ~LACP_STATE_DEFAULTED;
1696 if (oldpstate != lp->lp_partner.lip_state) {
1697 LACP_DPRINTF((lp, "old pstate %s\n",
1698 lacp_format_state(oldpstate, buf, sizeof(buf))));
1699 LACP_DPRINTF((lp, "new pstate %s\n",
1700 lacp_format_state(lp->lp_partner.lip_state, buf,
1704 /* XXX Hack, still need to implement 5.4.9 para 2,3,4 */
1705 if (lp->lp_lsc->lsc_strict_mode)
1706 lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1708 lacp_sm_ptx_update_timeout(lp, oldpstate);
1712 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1717 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1718 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1719 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1720 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1721 lacp_sm_assert_ntt(lp);
1726 lacp_sm_rx_record_default(struct lacp_port *lp)
1732 oldpstate = lp->lp_partner.lip_state;
1733 if (lp->lp_lsc->lsc_strict_mode)
1734 lp->lp_partner = lacp_partner_admin_strict;
1736 lp->lp_partner = lacp_partner_admin_optimistic;
1737 lp->lp_state |= LACP_STATE_DEFAULTED;
1738 lacp_sm_ptx_update_timeout(lp, oldpstate);
1742 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1743 const struct lacp_peerinfo *info)
1748 if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1749 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1750 LACP_STATE_AGGREGATION)) {
1751 lp->lp_selected = LACP_UNSELECTED;
1752 /* mux machine will clean up lp->lp_aggregator */
1757 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1762 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1766 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1771 if (lp->lp_lsc->lsc_strict_mode)
1772 lacp_sm_rx_update_selected_from_peerinfo(lp,
1773 &lacp_partner_admin_strict);
1775 lacp_sm_rx_update_selected_from_peerinfo(lp,
1776 &lacp_partner_admin_optimistic);
1779 /* transmit machine */
1782 lacp_sm_tx(struct lacp_port *lp)
1786 if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1788 || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1789 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1792 lp->lp_flags &= ~LACP_PORT_NTT;
1795 if (!(lp->lp_flags & LACP_PORT_NTT)) {
1799 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1800 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1801 (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1802 LACP_DPRINTF((lp, "rate limited pdu\n"));
1806 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1807 error = lacp_xmit_lacpdu(lp);
1809 LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1813 lp->lp_flags &= ~LACP_PORT_NTT;
1815 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1821 lacp_sm_assert_ntt(struct lacp_port *lp)
1824 lp->lp_flags |= LACP_PORT_NTT;
1828 lacp_run_timers(struct lacp_port *lp)
1832 for (i = 0; i < LACP_NTIMER; i++) {
1833 KASSERT(lp->lp_timer[i] >= 0,
1834 ("invalid timer value %d", lp->lp_timer[i]));
1835 if (lp->lp_timer[i] == 0) {
1837 } else if (--lp->lp_timer[i] <= 0) {
1838 if (lacp_timer_funcs[i]) {
1839 (*lacp_timer_funcs[i])(lp);
1846 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1848 struct lacp_softc *lsc = lp->lp_lsc;
1849 struct lagg_port *lgp = lp->lp_lagg;
1850 struct lacp_port *lp2;
1851 struct markerdu *mdu;
1855 if (m->m_pkthdr.len != sizeof(*mdu)) {
1859 if ((m->m_flags & M_MCAST) == 0) {
1863 if (m->m_len < sizeof(*mdu)) {
1864 m = m_pullup(m, sizeof(*mdu));
1870 mdu = mtod(m, struct markerdu *);
1872 if (memcmp(&mdu->mdu_eh.ether_dhost,
1873 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1877 if (mdu->mdu_sph.sph_version != 1) {
1881 switch (mdu->mdu_tlv.tlv_type) {
1882 case MARKER_TYPE_INFO:
1883 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1884 marker_info_tlv_template, TRUE)) {
1887 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
1888 memcpy(&mdu->mdu_eh.ether_dhost,
1889 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
1890 memcpy(&mdu->mdu_eh.ether_shost,
1891 lgp->lp_lladdr, ETHER_ADDR_LEN);
1892 error = lagg_enqueue(lp->lp_ifp, m);
1895 case MARKER_TYPE_RESPONSE:
1896 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1897 marker_response_tlv_template, TRUE)) {
1900 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
1901 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
1902 ":", ntohl(mdu->mdu_info.mi_rq_xid)));
1904 /* Verify that it is the last marker we sent out */
1905 if (memcmp(&mdu->mdu_info, &lp->lp_marker,
1906 sizeof(struct lacp_markerinfo)))
1910 lp->lp_flags &= ~LACP_PORT_MARK;
1912 if (lsc->lsc_suppress_distributing) {
1913 /* Check if any ports are waiting for a response */
1914 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
1915 if (lp2->lp_flags & LACP_PORT_MARK) {
1922 /* All interface queues are clear */
1923 LACP_DPRINTF((NULL, "queue flush complete\n"));
1924 lsc->lsc_suppress_distributing = FALSE;
1938 LACP_DPRINTF((lp, "bad marker frame\n"));
1944 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
1945 const struct tlv_template *tmpl, boolean_t check_type)
1947 while (/* CONSTCOND */ 1) {
1948 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
1951 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
1952 tlv->tlv_length != tmpl->tmpl_length) {
1955 if (tmpl->tmpl_type == 0) {
1958 tlv = (const struct tlvhdr *)
1959 ((const char *)tlv + tlv->tlv_length);
1968 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
1970 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
1982 lacp_format_systemid(const struct lacp_systemid *sysid,
1983 char *buf, size_t buflen)
1985 char macbuf[LACP_MACSTR_MAX+1];
1987 snprintf(buf, buflen, "%04X,%s",
1988 ntohs(sysid->lsi_prio),
1989 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
1995 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
1997 snprintf(buf, buflen, "%04X,%04X",
1998 ntohs(portid->lpi_prio),
1999 ntohs(portid->lpi_portno));
2005 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2007 char sysid[LACP_SYSTEMIDSTR_MAX+1];
2008 char portid[LACP_PORTIDSTR_MAX+1];
2010 snprintf(buf, buflen, "(%s,%04X,%s)",
2011 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2012 ntohs(peer->lip_key),
2013 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2019 lacp_format_lagid(const struct lacp_peerinfo *a,
2020 const struct lacp_peerinfo *b, char *buf, size_t buflen)
2022 char astr[LACP_PARTNERSTR_MAX+1];
2023 char bstr[LACP_PARTNERSTR_MAX+1];
2027 * there's a convention to display small numbered peer
2031 if (lacp_compare_peerinfo(a, b) > 0) {
2032 const struct lacp_peerinfo *t;
2040 snprintf(buf, buflen, "[%s,%s]",
2041 lacp_format_partner(a, astr, sizeof(astr)),
2042 lacp_format_partner(b, bstr, sizeof(bstr)));
2048 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2049 char *buf, size_t buflen)
2055 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2059 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2061 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2066 lacp_dump_lacpdu(const struct lacpdu *du)
2068 char buf[LACP_PARTNERSTR_MAX+1];
2069 char buf2[LACP_STATESTR_MAX+1];
2071 printf("actor=%s\n",
2072 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2073 printf("actor.state=%s\n",
2074 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2075 printf("partner=%s\n",
2076 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2077 printf("partner.state=%s\n",
2078 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2080 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2084 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2089 printf("%s: ", lp->lp_ifp->if_xname);