2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2021 Microsoft Corp.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
44 #include <sys/eventhandler.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/in_cksum.h>
51 #include <net/if_var.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
55 #include <net/rss_config.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
67 #include "mana_sysctl.h"
69 static int mana_up(struct mana_port_context *apc);
70 static int mana_down(struct mana_port_context *apc);
73 mana_rss_key_fill(void *k, size_t size)
75 static bool rss_key_generated = false;
76 static uint8_t rss_key[MANA_HASH_KEY_SIZE];
78 KASSERT(size <= MANA_HASH_KEY_SIZE,
79 ("Request more buytes than MANA RSS key can hold"));
81 if (!rss_key_generated) {
82 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
83 rss_key_generated = true;
85 memcpy(k, rss_key, size);
89 mana_ifmedia_change(struct ifnet *ifp __unused)
95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
97 struct mana_port_context *apc = if_getsoftc(ifp);
100 if_printf(ifp, "Port not available\n");
104 MANA_APC_LOCK_LOCK(apc);
106 ifmr->ifm_status = IFM_AVALID;
107 ifmr->ifm_active = IFM_ETHER;
109 if (!apc->port_is_up) {
110 MANA_APC_LOCK_UNLOCK(apc);
111 mana_info(NULL, "Port %u link is down\n", apc->port_idx);
115 ifmr->ifm_status |= IFM_ACTIVE;
116 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
118 MANA_APC_LOCK_UNLOCK(apc);
122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
124 struct mana_port_context *apc = if_getsoftc(ifp);
125 struct mana_port_stats *stats = &apc->port_stats;
128 case IFCOUNTER_IPACKETS:
129 return (counter_u64_fetch(stats->rx_packets));
130 case IFCOUNTER_OPACKETS:
131 return (counter_u64_fetch(stats->tx_packets));
132 case IFCOUNTER_IBYTES:
133 return (counter_u64_fetch(stats->rx_bytes));
134 case IFCOUNTER_OBYTES:
135 return (counter_u64_fetch(stats->tx_bytes));
136 case IFCOUNTER_IQDROPS:
137 return (counter_u64_fetch(stats->rx_drops));
138 case IFCOUNTER_OQDROPS:
139 return (counter_u64_fetch(stats->tx_drops));
141 return (if_get_counter_default(ifp, cnt));
146 mana_drain_eq_task(struct gdma_queue *queue)
148 if (!queue || !queue->eq.cleanup_tq)
151 while (taskqueue_cancel(queue->eq.cleanup_tq,
152 &queue->eq.cleanup_task, NULL)) {
153 taskqueue_drain(queue->eq.cleanup_tq,
154 &queue->eq.cleanup_task);
159 mana_qflush(struct ifnet *ifp)
165 mana_restart(struct mana_port_context *apc)
169 MANA_APC_LOCK_LOCK(apc);
174 MANA_APC_LOCK_UNLOCK(apc);
180 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
182 struct mana_port_context *apc = if_getsoftc(ifp);
183 struct ifrsskey *ifrk;
184 struct ifrsshash *ifrh;
191 ifr = (struct ifreq *)data;
192 new_mtu = ifr->ifr_mtu;
193 if (ifp->if_mtu == new_mtu)
195 if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
196 (new_mtu + 18 < MIN_FRAME_SIZE)) {
197 if_printf(ifp, "Invalid MTU. new_mtu: %d, "
198 "max allowed: %d, min allowed: %d\n",
199 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
202 MANA_APC_LOCK_LOCK(apc);
206 apc->frame_size = new_mtu + 18;
207 if_setmtu(ifp, new_mtu);
208 mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
211 MANA_APC_LOCK_UNLOCK(apc);
215 if (ifp->if_flags & IFF_UP) {
216 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
217 MANA_APC_LOCK_LOCK(apc);
218 if (!apc->port_is_up)
220 MANA_APC_LOCK_UNLOCK(apc);
223 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
224 MANA_APC_LOCK_LOCK(apc);
227 MANA_APC_LOCK_UNLOCK(apc);
235 ifr = (struct ifreq *)data;
236 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
240 ifrk = (struct ifrsskey *)data;
241 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
242 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
243 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
247 ifrh = (struct ifrsshash *)data;
248 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
257 rc = ether_ioctl(ifp, command, data);
265 mana_alloc_counters(counter_u64_t *begin, int size)
267 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
269 for (; begin < end; ++begin)
270 *begin = counter_u64_alloc(M_WAITOK);
274 mana_free_counters(counter_u64_t *begin, int size)
276 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
278 for (; begin < end; ++begin)
279 counter_u64_free(*begin);
283 mana_can_tx(struct gdma_queue *wq)
285 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
289 mana_tx_map_mbuf(struct mana_port_context *apc,
290 struct mana_send_buf_info *tx_info,
291 struct mbuf **m_head, struct mana_tx_package *tp,
292 struct mana_stats *tx_stats)
294 struct gdma_dev *gd = apc->ac->gdma_dev;
295 bus_dma_segment_t segs[MAX_MBUF_FRAGS];
296 struct mbuf *m = *m_head;
299 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
300 m, segs, &nsegs, BUS_DMA_NOWAIT);
304 counter_u64_add(tx_stats->collapse, 1);
305 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
306 if (unlikely(m_new == NULL)) {
307 counter_u64_add(tx_stats->collapse_err, 1);
314 "Too many segs in orig mbuf, m_collapse called\n");
316 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
317 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
320 for (i = 0; i < nsegs; i++) {
321 tp->wqe_req.sgl[i].address = segs[i].ds_addr;
322 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
323 tp->wqe_req.sgl[i].size = segs[i].ds_len;
325 tp->wqe_req.num_sge = nsegs;
327 tx_info->mbuf = *m_head;
329 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
330 BUS_DMASYNC_PREWRITE);
337 mana_tx_unmap_mbuf(struct mana_port_context *apc,
338 struct mana_send_buf_info *tx_info)
340 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
341 BUS_DMASYNC_POSTWRITE);
342 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
344 m_freem(tx_info->mbuf);
345 tx_info->mbuf = NULL;
350 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
351 struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
353 bus_dma_segment_t segs[1];
359 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
360 if (unlikely(mbuf == NULL)) {
361 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
362 if (unlikely(mbuf == NULL)) {
367 mlen = rxq->datasize;
370 mbuf->m_pkthdr.len = mbuf->m_len = mlen;
374 mlen = rx_oob->mbuf->m_pkthdr.len;
380 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
381 mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
383 if (unlikely((err != 0) || (nsegs != 1))) {
384 mana_warn(NULL, "Failed to map mbuf, error: %d, "
385 "nsegs: %d\n", err, nsegs);
386 counter_u64_add(rxq->stats.dma_mapping_err, 1);
390 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
391 BUS_DMASYNC_PREREAD);
395 rx_oob->sgl[0].address = segs[0].ds_addr;
396 rx_oob->sgl[0].size = mlen;
397 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
407 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
408 struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
410 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
411 BUS_DMASYNC_POSTREAD);
412 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
414 if (free_mbuf && rx_oob->mbuf) {
415 m_freem(rx_oob->mbuf);
421 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
422 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
423 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
425 #define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
428 mana_xmit(struct mana_txq *txq)
430 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
431 struct mana_send_buf_info *tx_info;
432 struct ifnet *ndev = txq->ndev;
434 struct mana_port_context *apc = if_getsoftc(ndev);
435 struct mana_port_stats *port_stats = &apc->port_stats;
436 struct gdma_dev *gd = apc->ac->gdma_dev;
437 uint64_t packets, bytes;
438 uint16_t next_to_use;
439 struct mana_tx_package pkg = {};
440 struct mana_stats *tx_stats;
441 struct gdma_queue *gdma_sq;
442 struct gdma_queue *gdma_eq;
446 gdma_sq = txq->gdma_sq;
447 cq = &apc->tx_qp[txq->idx].tx_cq;
448 gdma_eq = cq->gdma_cq->cq.parent;
449 tx_stats = &txq->stats;
453 next_to_use = txq->next_to_use;
455 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
456 if (!apc->port_is_up ||
457 (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
458 drbr_putback(ndev, txq->txq_br, mbuf);
462 if (!mana_can_tx(gdma_sq)) {
463 /* SQ is full. Set the IFF_DRV_OACTIVE flag */
464 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
465 counter_u64_add(tx_stats->stop, 1);
466 uint64_t stops = counter_u64_fetch(tx_stats->stop);
467 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
468 #define MANA_TXQ_STOP_THRESHOLD 50
469 if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
470 stops > wakeups && txq->alt_txq_idx == txq->idx) {
472 (txq->idx + (stops / wakeups))
474 counter_u64_add(tx_stats->alt_chg, 1);
477 drbr_putback(ndev, txq->txq_br, mbuf);
479 taskqueue_enqueue(gdma_eq->eq.cleanup_tq,
480 &gdma_eq->eq.cleanup_task);
484 tx_info = &txq->tx_buf_info[next_to_use];
486 memset(&pkg, 0, sizeof(struct mana_tx_package));
487 pkg.wqe_req.sgl = pkg.sgl_array;
489 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
492 "Failed to map tx mbuf, err %d\n", err);
494 counter_u64_add(tx_stats->dma_mapping_err, 1);
496 /* The mbuf is still there. Free it */
498 /* Advance the drbr queue */
499 drbr_advance(ndev, txq->txq_br);
503 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
504 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
506 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
507 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
508 pkt_fmt = MANA_LONG_PKT_FMT;
510 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
513 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
515 if (pkt_fmt == MANA_SHORT_PKT_FMT)
516 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
518 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
520 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
521 pkg.wqe_req.flags = 0;
522 pkg.wqe_req.client_data_unit = 0;
524 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
525 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
526 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
528 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
530 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
531 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
532 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
534 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
535 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
536 } else if (mbuf->m_pkthdr.csum_flags &
537 (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
538 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
539 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
540 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
542 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
545 if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
546 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
547 pkg.tx_oob.s_oob.trans_off =
548 mbuf->m_pkthdr.l3hlen;
550 pkg.tx_oob.s_oob.comp_udp_csum = 1;
552 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
553 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
554 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
556 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
557 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
558 else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
559 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
562 len = mbuf->m_pkthdr.len;
564 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
565 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
567 /* Should not happen */
568 if_printf(ndev, "Failed to post TX OOB: %d\n", err);
570 mana_tx_unmap_mbuf(apc, tx_info);
572 drbr_advance(ndev, txq->txq_br);
577 (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
579 (void)atomic_inc_return(&txq->pending_sends);
581 drbr_advance(ndev, txq->txq_br);
583 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
590 counter_u64_add_protected(tx_stats->packets, packets);
591 counter_u64_add_protected(port_stats->tx_packets, packets);
592 counter_u64_add_protected(tx_stats->bytes, bytes);
593 counter_u64_add_protected(port_stats->tx_bytes, bytes);
596 txq->next_to_use = next_to_use;
600 mana_xmit_taskfunc(void *arg, int pending)
602 struct mana_txq *txq = (struct mana_txq *)arg;
603 struct ifnet *ndev = txq->ndev;
604 struct mana_port_context *apc = if_getsoftc(ndev);
606 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
607 (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
608 mtx_lock(&txq->txq_mtx);
610 mtx_unlock(&txq->txq_mtx);
614 #define PULLUP_HDR(m, len) \
616 if (unlikely((m)->m_len < (len))) { \
617 (m) = m_pullup((m), (len)); \
624 * If this function failed, the mbuf would be freed.
626 static inline struct mbuf *
627 mana_tso_fixup(struct mbuf *mbuf)
629 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
634 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
635 etype = ntohs(eh->evl_proto);
636 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
638 etype = ntohs(eh->evl_encap_proto);
639 ehlen = ETHER_HDR_LEN;
642 if (etype == ETHERTYPE_IP) {
646 PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
647 ip = mtodo(mbuf, ehlen);
648 iphlen = ip->ip_hl << 2;
649 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
651 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
652 th = mtodo(mbuf, ehlen + iphlen);
656 th->th_sum = in_pseudo(ip->ip_src.s_addr,
657 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
658 } else if (etype == ETHERTYPE_IPV6) {
661 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
662 ip6 = mtodo(mbuf, ehlen);
663 if (ip6->ip6_nxt != IPPROTO_TCP) {
664 /* Realy something wrong, just return */
665 mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
669 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
671 th = mtodo(mbuf, ehlen + sizeof(*ip6));
674 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
676 /* CSUM_TSO is set but not IP protocol. */
677 mana_warn(NULL, "TSO mbuf not right, freed.\n");
682 MANA_L3_PROTO(mbuf) = etype;
688 * If this function failed, the mbuf would be freed.
690 static inline struct mbuf *
691 mana_mbuf_csum_check(struct mbuf *mbuf)
693 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
694 struct mbuf *mbuf_next;
699 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
700 etype = ntohs(eh->evl_proto);
701 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
703 etype = ntohs(eh->evl_encap_proto);
704 ehlen = ETHER_HDR_LEN;
707 mbuf_next = m_getptr(mbuf, ehlen, &offset);
709 MANA_L4_PROTO(mbuf) = 0;
710 if (etype == ETHERTYPE_IP) {
714 ip = (struct ip *)(mtodo(mbuf_next, offset));
715 iphlen = ip->ip_hl << 2;
716 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
718 MANA_L4_PROTO(mbuf) = ip->ip_p;
719 } else if (etype == ETHERTYPE_IPV6) {
720 const struct ip6_hdr *ip6;
722 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
723 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
725 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
727 MANA_L4_PROTO(mbuf) = 0;
730 MANA_L3_PROTO(mbuf) = etype;
736 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
738 struct mana_port_context *apc = if_getsoftc(ifp);
739 struct mana_txq *txq;
744 if (unlikely((!apc->port_is_up) ||
745 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
748 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
749 m = mana_tso_fixup(m);
750 if (unlikely(m == NULL)) {
752 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
757 m = mana_mbuf_csum_check(m);
758 if (unlikely(m == NULL)) {
760 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
766 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
767 uint32_t hash = m->m_pkthdr.flowid;
768 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
771 txq_id = m->m_pkthdr.flowid % apc->num_queues;
774 if (apc->enable_tx_altq)
775 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
777 txq = &apc->tx_qp[txq_id].txq;
779 is_drbr_empty = drbr_empty(ifp, txq->txq_br);
780 err = drbr_enqueue(ifp, txq->txq_br, m);
782 mana_warn(NULL, "txq %u failed to enqueue: %d\n",
784 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
788 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
790 mtx_unlock(&txq->txq_mtx);
792 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
799 mana_cleanup_port_context(struct mana_port_context *apc)
801 bus_dma_tag_destroy(apc->tx_buf_tag);
802 bus_dma_tag_destroy(apc->rx_buf_tag);
803 apc->rx_buf_tag = NULL;
805 free(apc->rxqs, M_DEVBUF);
808 mana_free_counters((counter_u64_t *)&apc->port_stats,
809 sizeof(struct mana_port_stats));
813 mana_init_port_context(struct mana_port_context *apc)
815 device_t dev = apc->ac->gdma_dev->gdma_context->dev;
816 uint32_t tso_maxsize;
819 tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
820 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
822 /* Create DMA tag for tx bufs */
823 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
824 1, 0, /* alignment, boundary */
825 BUS_SPACE_MAXADDR, /* lowaddr */
826 BUS_SPACE_MAXADDR, /* highaddr */
827 NULL, NULL, /* filter, filterarg */
828 tso_maxsize, /* maxsize */
829 MAX_MBUF_FRAGS, /* nsegments */
830 tso_maxsize, /* maxsegsize */
832 NULL, NULL, /* lockfunc, lockfuncarg*/
835 device_printf(dev, "Feiled to create TX DMA tag\n");
839 /* Create DMA tag for rx bufs */
840 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
841 64, 0, /* alignment, boundary */
842 BUS_SPACE_MAXADDR, /* lowaddr */
843 BUS_SPACE_MAXADDR, /* highaddr */
844 NULL, NULL, /* filter, filterarg */
845 MJUMPAGESIZE, /* maxsize */
847 MJUMPAGESIZE, /* maxsegsize */
849 NULL, NULL, /* lockfunc, lockfuncarg*/
852 device_printf(dev, "Feiled to create RX DMA tag\n");
856 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
857 M_DEVBUF, M_WAITOK | M_ZERO);
860 bus_dma_tag_destroy(apc->tx_buf_tag);
861 bus_dma_tag_destroy(apc->rx_buf_tag);
862 apc->rx_buf_tag = NULL;
870 mana_send_request(struct mana_context *ac, void *in_buf,
871 uint32_t in_len, void *out_buf, uint32_t out_len)
873 struct gdma_context *gc = ac->gdma_dev->gdma_context;
874 struct gdma_resp_hdr *resp = out_buf;
875 struct gdma_req_hdr *req = in_buf;
876 device_t dev = gc->dev;
877 static atomic_t activity_id;
880 req->dev_id = gc->mana.dev_id;
881 req->activity_id = atomic_inc_return(&activity_id);
883 mana_dbg(NULL, "activity_id = %u\n", activity_id);
885 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
887 if (err || resp->status) {
888 device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
890 return err ? err : EPROTO;
893 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
894 req->activity_id != resp->activity_id) {
896 "Unexpected mana message response: %x,%x,%x,%x\n",
897 req->dev_id.as_uint32, resp->dev_id.as_uint32,
898 req->activity_id, resp->activity_id);
906 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
907 const enum mana_command_code expected_code,
908 const uint32_t min_size)
910 if (resp_hdr->response.msg_type != expected_code)
913 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
916 if (resp_hdr->response.msg_size < min_size)
923 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
924 uint32_t proto_minor_ver, uint32_t proto_micro_ver,
925 uint16_t *max_num_vports)
927 struct gdma_context *gc = ac->gdma_dev->gdma_context;
928 struct mana_query_device_cfg_resp resp = {};
929 struct mana_query_device_cfg_req req = {};
930 device_t dev = gc->dev;
933 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
934 sizeof(req), sizeof(resp));
935 req.proto_major_ver = proto_major_ver;
936 req.proto_minor_ver = proto_minor_ver;
937 req.proto_micro_ver = proto_micro_ver;
939 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
941 device_printf(dev, "Failed to query config: %d", err);
945 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
947 if (err || resp.hdr.status) {
948 device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
955 *max_num_vports = resp.max_num_vports;
957 mana_dbg(NULL, "mana max_num_vports from device = %d\n",
964 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
965 uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
967 struct mana_query_vport_cfg_resp resp = {};
968 struct mana_query_vport_cfg_req req = {};
971 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
972 sizeof(req), sizeof(resp));
974 req.vport_index = vport_index;
976 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
981 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
989 *max_sq = resp.max_num_sq;
990 *max_rq = resp.max_num_rq;
991 *num_indir_entry = resp.num_indirection_ent;
993 apc->port_handle = resp.vport;
994 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1000 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1001 uint32_t doorbell_pg_id)
1003 struct mana_config_vport_resp resp = {};
1004 struct mana_config_vport_req req = {};
1007 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1008 sizeof(req), sizeof(resp));
1009 req.vport = apc->port_handle;
1010 req.pdid = protection_dom_id;
1011 req.doorbell_pageid = doorbell_pg_id;
1013 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1016 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1020 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1022 if (err || resp.hdr.status) {
1023 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1024 err, resp.hdr.status);
1031 apc->tx_shortform_allowed = resp.short_form_allowed;
1032 apc->tx_vp_offset = resp.tx_vport_offset;
1038 mana_cfg_vport_steering(struct mana_port_context *apc,
1040 bool update_default_rxobj, bool update_key,
1043 uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1044 struct mana_cfg_rx_steer_req *req = NULL;
1045 struct mana_cfg_rx_steer_resp resp = {};
1046 struct ifnet *ndev = apc->ndev;
1047 mana_handle_t *req_indir_tab;
1048 uint32_t req_buf_size;
1051 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1052 req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1056 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1059 req->vport = apc->port_handle;
1060 req->num_indir_entries = num_entries;
1061 req->indir_tab_offset = sizeof(*req);
1062 req->rx_enable = rx;
1063 req->rss_enable = apc->rss_state;
1064 req->update_default_rxobj = update_default_rxobj;
1065 req->update_hashkey = update_key;
1066 req->update_indir_tab = update_tab;
1067 req->default_rxobj = apc->default_rxobj;
1070 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1073 req_indir_tab = (mana_handle_t *)(req + 1);
1074 memcpy(req_indir_tab, apc->rxobj_table,
1075 req->num_indir_entries * sizeof(mana_handle_t));
1078 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1081 if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1085 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1088 if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1092 if (resp.hdr.status) {
1093 if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1098 free(req, M_DEVBUF);
1103 mana_create_wq_obj(struct mana_port_context *apc,
1104 mana_handle_t vport,
1105 uint32_t wq_type, struct mana_obj_spec *wq_spec,
1106 struct mana_obj_spec *cq_spec,
1107 mana_handle_t *wq_obj)
1109 struct mana_create_wqobj_resp resp = {};
1110 struct mana_create_wqobj_req req = {};
1111 struct ifnet *ndev = apc->ndev;
1114 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1115 sizeof(req), sizeof(resp));
1117 req.wq_type = wq_type;
1118 req.wq_gdma_region = wq_spec->gdma_region;
1119 req.cq_gdma_region = cq_spec->gdma_region;
1120 req.wq_size = wq_spec->queue_size;
1121 req.cq_size = cq_spec->queue_size;
1122 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1123 req.cq_parent_qid = cq_spec->attached_eq;
1125 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1128 if_printf(ndev, "Failed to create WQ object: %d\n", err);
1132 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1134 if (err || resp.hdr.status) {
1135 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1142 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1143 if_printf(ndev, "Got an invalid WQ object handle\n");
1148 *wq_obj = resp.wq_obj;
1149 wq_spec->queue_index = resp.wq_id;
1150 cq_spec->queue_index = resp.cq_id;
1158 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1159 mana_handle_t wq_obj)
1161 struct mana_destroy_wqobj_resp resp = {};
1162 struct mana_destroy_wqobj_req req = {};
1163 struct ifnet *ndev = apc->ndev;
1166 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1167 sizeof(req), sizeof(resp));
1168 req.wq_type = wq_type;
1169 req.wq_obj_handle = wq_obj;
1171 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1174 if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1178 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1180 if (err || resp.hdr.status)
1181 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1182 err, resp.hdr.status);
1186 mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
1190 for (i = 0; i < CQE_POLLING_BUFFER; i++)
1191 memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
1195 mana_destroy_eq(struct gdma_context *gc, struct mana_port_context *apc)
1197 struct gdma_queue *eq;
1203 for (i = 0; i < apc->num_queues; i++) {
1204 eq = apc->eqs[i].eq;
1208 mana_gd_destroy_queue(gc, eq);
1211 free(apc->eqs, M_DEVBUF);
1216 mana_create_eq(struct mana_port_context *apc)
1218 struct gdma_dev *gd = apc->ac->gdma_dev;
1219 struct gdma_queue_spec spec = {};
1223 apc->eqs = mallocarray(apc->num_queues, sizeof(struct mana_eq),
1224 M_DEVBUF, M_WAITOK | M_ZERO);
1228 spec.type = GDMA_EQ;
1229 spec.monitor_avl_buf = false;
1230 spec.queue_size = EQ_SIZE;
1231 spec.eq.callback = NULL;
1232 spec.eq.context = apc->eqs;
1233 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1234 spec.eq.ndev = apc->ndev;
1236 for (i = 0; i < apc->num_queues; i++) {
1237 mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
1239 err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
1246 mana_destroy_eq(gd->gdma_context, apc);
1251 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1253 uint32_t used_space_old;
1254 uint32_t used_space_new;
1256 used_space_old = wq->head - wq->tail;
1257 used_space_new = wq->head - (wq->tail + num_units);
1259 if (used_space_new > used_space_old) {
1261 "WARNING: new used space %u greater than old one %u\n",
1262 used_space_new, used_space_old);
1266 wq->tail += num_units;
1271 mana_poll_tx_cq(struct mana_cq *cq)
1273 struct gdma_comp *completions = cq->gdma_comp_buf;
1274 struct gdma_posted_wqe_info *wqe_info;
1275 struct mana_send_buf_info *tx_info;
1276 unsigned int pkt_transmitted = 0;
1277 unsigned int wqe_unit_cnt = 0;
1278 struct mana_txq *txq = cq->txq;
1279 struct mana_port_context *apc;
1280 uint16_t next_to_complete;
1283 int txq_idx = txq->idx;;
1287 struct gdma_queue *gdma_wq;
1288 unsigned int avail_space;
1289 bool txq_full = false;
1292 apc = if_getsoftc(ndev);
1294 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1295 CQE_POLLING_BUFFER);
1297 next_to_complete = txq->next_to_complete;
1299 for (i = 0; i < comp_read; i++) {
1300 struct mana_tx_comp_oob *cqe_oob;
1302 if (!completions[i].is_sq) {
1303 mana_err(NULL, "WARNING: Not for SQ\n");
1307 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1308 if (cqe_oob->cqe_hdr.client_type !=
1309 MANA_CQE_COMPLETION) {
1311 "WARNING: Invalid CQE client type %u\n",
1312 cqe_oob->cqe_hdr.client_type);
1316 switch (cqe_oob->cqe_hdr.cqe_type) {
1320 case CQE_TX_SA_DROP:
1321 case CQE_TX_MTU_DROP:
1322 case CQE_TX_INVALID_OOB:
1323 case CQE_TX_INVALID_ETH_TYPE:
1324 case CQE_TX_HDR_PROCESSING_ERROR:
1325 case CQE_TX_VF_DISABLED:
1326 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1327 case CQE_TX_VPORT_DISABLED:
1328 case CQE_TX_VLAN_TAGGING_VIOLATION:
1331 "TX: txq %d CQE error %d, ntc = %d, "
1332 "pending sends = %d: err ignored.\n",
1333 txq_idx, cqe_oob->cqe_hdr.cqe_type,
1334 next_to_complete, txq->pending_sends);
1338 /* If the CQE type is unexpected, log an error,
1339 * and go through the error path.
1342 "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1343 cqe_oob->cqe_hdr.cqe_type);
1346 if (txq->gdma_txq_id != completions[i].wq_num) {
1348 "txq gdma id not match completion wq num: "
1350 txq->gdma_txq_id, completions[i].wq_num);
1354 tx_info = &txq->tx_buf_info[next_to_complete];
1355 if (!tx_info->mbuf) {
1357 "WARNING: txq %d Empty mbuf on tx_info: %u, "
1358 "ntu = %u, pending_sends = %d, "
1359 "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1360 txq_idx, next_to_complete, txq->next_to_use,
1361 txq->pending_sends, pkt_transmitted, sa_drop,
1366 wqe_info = &tx_info->wqe_inf;
1367 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1369 mana_tx_unmap_mbuf(apc, tx_info);
1373 (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1378 txq->next_to_complete = next_to_complete;
1380 if (wqe_unit_cnt == 0) {
1382 "WARNING: TX ring not proceeding!\n");
1386 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1388 /* Ensure tail updated before checking q stop */
1391 gdma_wq = txq->gdma_sq;
1392 avail_space = mana_gd_wq_avail_space(gdma_wq);
1395 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1399 /* Ensure checking txq_full before apc->port_is_up. */
1402 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1403 /* Grab the txq lock and re-test */
1404 mtx_lock(&txq->txq_mtx);
1405 avail_space = mana_gd_wq_avail_space(gdma_wq);
1407 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1408 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1409 /* Clear the Q full flag */
1410 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1412 counter_u64_add(txq->stats.wakeup, 1);
1413 if (txq->alt_txq_idx != txq->idx) {
1414 uint64_t stops = counter_u64_fetch(txq->stats.stop);
1415 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1416 /* Reset alt_txq_idx back if it is not overloaded */
1417 if (stops < wakeups) {
1418 txq->alt_txq_idx = txq->idx;
1419 counter_u64_add(txq->stats.alt_reset, 1);
1423 /* Schedule a tx enqueue task */
1424 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1426 mtx_unlock(&txq->txq_mtx);
1429 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1431 "WARNING: TX %d pending_sends error: %d\n",
1432 txq->idx, txq->pending_sends);
1436 mana_post_pkt_rxq(struct mana_rxq *rxq)
1438 struct mana_recv_buf_oob *recv_buf_oob;
1439 uint32_t curr_index;
1442 curr_index = rxq->buf_index++;
1443 if (rxq->buf_index == rxq->num_rx_buf)
1446 recv_buf_oob = &rxq->rx_oobs[curr_index];
1448 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1449 &recv_buf_oob->wqe_inf);
1451 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1456 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1457 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1458 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1463 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1464 struct mana_rxq *rxq)
1466 struct mana_stats *rx_stats = &rxq->stats;
1467 struct ifnet *ndev = rxq->ndev;
1468 uint32_t pkt_len = cqe->ppi[0].pkt_len;
1469 uint16_t rxq_idx = rxq->rxq_idx;
1470 struct mana_port_context *apc;
1471 struct gdma_queue *eq;
1472 bool do_lro = false;
1475 apc = if_getsoftc(ndev);
1476 eq = apc->eqs[rxq_idx].eq;
1483 mbuf->m_flags |= M_PKTHDR;
1484 mbuf->m_pkthdr.len = pkt_len;
1485 mbuf->m_len = pkt_len;
1486 mbuf->m_pkthdr.rcvif = ndev;
1488 if ((ndev->if_capenable & IFCAP_RXCSUM ||
1489 ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1490 (cqe->rx_iphdr_csum_succeed)) {
1491 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1492 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1493 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1494 mbuf->m_pkthdr.csum_flags |=
1495 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1496 mbuf->m_pkthdr.csum_data = 0xffff;
1498 if (cqe->rx_tcp_csum_succeed)
1503 if (cqe->rx_hashtype != 0) {
1504 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1506 uint16_t hashtype = cqe->rx_hashtype;
1507 if (hashtype & NDIS_HASH_IPV4_MASK) {
1508 hashtype &= NDIS_HASH_IPV4_MASK;
1510 case NDIS_HASH_TCP_IPV4:
1511 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1513 case NDIS_HASH_UDP_IPV4:
1514 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1517 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1519 } else if (hashtype & NDIS_HASH_IPV6_MASK) {
1520 hashtype &= NDIS_HASH_IPV6_MASK;
1522 case NDIS_HASH_TCP_IPV6:
1523 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1525 case NDIS_HASH_TCP_IPV6_EX:
1526 M_HASHTYPE_SET(mbuf,
1527 M_HASHTYPE_RSS_TCP_IPV6_EX);
1529 case NDIS_HASH_UDP_IPV6:
1530 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1532 case NDIS_HASH_UDP_IPV6_EX:
1533 M_HASHTYPE_SET(mbuf,
1534 M_HASHTYPE_RSS_UDP_IPV6_EX);
1537 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1540 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1543 mbuf->m_pkthdr.flowid = rxq_idx;
1544 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1548 if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
1549 if (rxq->lro.lro_cnt != 0 &&
1550 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1551 do_if_input = false;
1554 ndev->if_input(ndev, mbuf);
1558 counter_u64_add_protected(rx_stats->packets, 1);
1559 counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1560 counter_u64_add_protected(rx_stats->bytes, pkt_len);
1561 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1566 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1567 struct gdma_comp *cqe)
1569 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1570 struct mana_recv_buf_oob *rxbuf_oob;
1571 struct ifnet *ndev = rxq->ndev;
1572 struct mana_port_context *apc;
1573 struct mbuf *old_mbuf;
1574 uint32_t curr, pktlen;
1577 switch (oob->cqe_hdr.cqe_type) {
1581 case CQE_RX_TRUNCATED:
1582 if_printf(ndev, "Dropped a truncated packet\n");
1585 case CQE_RX_COALESCED_4:
1586 if_printf(ndev, "RX coalescing is unsupported\n");
1589 case CQE_RX_OBJECT_FENCE:
1590 if_printf(ndev, "RX Fencing is unsupported\n");
1594 if_printf(ndev, "Unknown RX CQE type = %d\n",
1595 oob->cqe_hdr.cqe_type);
1599 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1602 pktlen = oob->ppi[0].pkt_len;
1605 /* data packets should never have packetlength of zero */
1606 #if defined(__amd64__)
1607 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%lx\n",
1608 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1610 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1611 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1616 curr = rxq->buf_index;
1617 rxbuf_oob = &rxq->rx_oobs[curr];
1618 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1619 mana_err(NULL, "WARNING: Rx Incorrect complete "
1621 rxbuf_oob->wqe_inf.wqe_size_in_bu);
1624 apc = if_getsoftc(ndev);
1626 old_mbuf = rxbuf_oob->mbuf;
1628 /* Unload DMA map for the old mbuf */
1629 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1631 /* Load a new mbuf to replace the old one */
1632 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1635 "failed to load rx mbuf, err = %d, packet dropped.\n",
1637 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1639 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1640 * pointing to the old one. Drop the packet.
1643 /* Reload the existing mbuf */
1644 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1647 mana_rx_mbuf(old_mbuf, oob, rxq);
1649 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1651 mana_post_pkt_rxq(rxq);
1655 mana_poll_rx_cq(struct mana_cq *cq)
1657 struct gdma_comp *comp = cq->gdma_comp_buf;
1660 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1661 KASSERT(comp_read <= CQE_POLLING_BUFFER,
1662 ("comp_read %d great than buf size %d",
1663 comp_read, CQE_POLLING_BUFFER));
1665 for (i = 0; i < comp_read; i++) {
1666 if (comp[i].is_sq == true) {
1668 "WARNING: CQE not for receive queue\n");
1672 /* verify recv cqe references the right rxq */
1673 if (comp[i].wq_num != cq->rxq->gdma_id) {
1675 "WARNING: Received CQE %d not for "
1676 "this receive queue %d\n",
1677 comp[i].wq_num, cq->rxq->gdma_id);
1681 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1684 tcp_lro_flush_all(&cq->rxq->lro);
1688 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1690 struct mana_cq *cq = context;
1692 KASSERT(cq->gdma_cq == gdma_queue,
1693 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1695 if (cq->type == MANA_CQ_TYPE_RX) {
1696 mana_poll_rx_cq(cq);
1698 mana_poll_tx_cq(cq);
1701 mana_gd_arm_cq(gdma_queue);
1705 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1707 struct gdma_dev *gd = apc->ac->gdma_dev;
1712 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1716 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1718 struct gdma_dev *gd = apc->ac->gdma_dev;
1719 struct mana_send_buf_info *txbuf_info;
1720 uint32_t pending_sends;
1726 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1728 "WARNING: txq pending sends not zero: %u\n",
1732 if (txq->next_to_use != txq->next_to_complete) {
1734 "WARNING: txq buf not completed, "
1735 "next use %u, next complete %u\n",
1736 txq->next_to_use, txq->next_to_complete);
1739 /* Flush buf ring. Grab txq mtx lock */
1741 mtx_lock(&txq->txq_mtx);
1742 drbr_flush(apc->ndev, txq->txq_br);
1743 mtx_unlock(&txq->txq_mtx);
1744 buf_ring_free(txq->txq_br, M_DEVBUF);
1747 /* Drain taskqueue */
1748 if (txq->enqueue_tq) {
1749 while (taskqueue_cancel(txq->enqueue_tq,
1750 &txq->enqueue_task, NULL)) {
1751 taskqueue_drain(txq->enqueue_tq,
1752 &txq->enqueue_task);
1755 taskqueue_free(txq->enqueue_tq);
1758 if (txq->tx_buf_info) {
1759 /* Free all mbufs which are still in-flight */
1760 for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1761 txbuf_info = &txq->tx_buf_info[i];
1762 if (txbuf_info->mbuf) {
1763 mana_tx_unmap_mbuf(apc, txbuf_info);
1767 free(txq->tx_buf_info, M_DEVBUF);
1770 mana_free_counters((counter_u64_t *)&txq->stats,
1771 sizeof(txq->stats));
1773 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1775 mtx_destroy(&txq->txq_mtx);
1779 mana_destroy_txq(struct mana_port_context *apc)
1786 for (i = 0; i < apc->num_queues; i++) {
1787 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1789 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1791 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1794 free(apc->tx_qp, M_DEVBUF);
1799 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
1801 struct gdma_dev *gd = apc->ac->gdma_dev;
1802 struct mana_obj_spec wq_spec;
1803 struct mana_obj_spec cq_spec;
1804 struct gdma_queue_spec spec;
1805 struct gdma_context *gc;
1806 struct mana_txq *txq;
1813 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1814 M_DEVBUF, M_WAITOK | M_ZERO);
1818 /* The minimum size of the WQE is 32 bytes, hence
1819 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1820 * the SQ can store. This value is then used to size other queues
1821 * to prevent overflow.
1823 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1824 KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1825 ("txq size not page aligned"));
1827 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1828 cq_size = ALIGN(cq_size, PAGE_SIZE);
1830 gc = gd->gdma_context;
1832 for (i = 0; i < apc->num_queues; i++) {
1833 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1836 txq = &apc->tx_qp[i].txq;
1839 txq->vp_offset = apc->tx_vp_offset;
1841 txq->alt_txq_idx = i;
1843 memset(&spec, 0, sizeof(spec));
1844 spec.type = GDMA_SQ;
1845 spec.monitor_avl_buf = true;
1846 spec.queue_size = txq_size;
1847 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1851 /* Create SQ's CQ */
1852 cq = &apc->tx_qp[i].tx_cq;
1853 cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1854 cq->type = MANA_CQ_TYPE_TX;
1858 memset(&spec, 0, sizeof(spec));
1859 spec.type = GDMA_CQ;
1860 spec.monitor_avl_buf = false;
1861 spec.queue_size = cq_size;
1862 spec.cq.callback = mana_cq_handler;
1863 spec.cq.parent_eq = apc->eqs[i].eq;
1864 spec.cq.context = cq;
1865 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1869 memset(&wq_spec, 0, sizeof(wq_spec));
1870 memset(&cq_spec, 0, sizeof(cq_spec));
1872 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1873 wq_spec.queue_size = txq->gdma_sq->queue_size;
1875 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1876 cq_spec.queue_size = cq->gdma_cq->queue_size;
1877 cq_spec.modr_ctx_id = 0;
1878 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1880 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1881 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
1886 txq->gdma_sq->id = wq_spec.queue_index;
1887 cq->gdma_cq->id = cq_spec.queue_index;
1889 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1890 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1892 txq->gdma_txq_id = txq->gdma_sq->id;
1894 cq->gdma_id = cq->gdma_cq->id;
1897 "txq %d, txq gdma id %d, txq cq gdma id %d\n",
1898 i, txq->gdma_txq_id, cq->gdma_id);;
1900 if (cq->gdma_id >= gc->max_num_cqs) {
1901 if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
1905 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1907 /* Initialize tx specific data */
1908 txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
1909 sizeof(struct mana_send_buf_info),
1910 M_DEVBUF, M_WAITOK | M_ZERO);
1911 if (unlikely(txq->tx_buf_info == NULL)) {
1913 "Failed to allocate tx buf info for SQ %u\n",
1920 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
1922 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
1924 txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
1925 M_DEVBUF, M_WAITOK, &txq->txq_mtx);
1926 if (unlikely(txq->txq_br == NULL)) {
1928 "Failed to allocate buf ring for SQ %u\n",
1934 /* Allocate taskqueue for deferred send */
1935 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
1936 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
1937 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
1938 if (unlikely(txq->enqueue_tq == NULL)) {
1940 "Unable to create tx %d enqueue task queue\n", i);
1944 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
1947 mana_alloc_counters((counter_u64_t *)&txq->stats,
1948 sizeof(txq->stats));
1950 mana_gd_arm_cq(cq->gdma_cq);
1955 mana_destroy_txq(apc);
1960 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
1961 bool validate_state)
1963 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1964 struct mana_recv_buf_oob *rx_oob;
1970 if (validate_state) {
1972 * XXX Cancel and drain cleanup task queue here.
1977 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1979 mana_deinit_cq(apc, &rxq->rx_cq);
1981 mana_free_counters((counter_u64_t *)&rxq->stats,
1982 sizeof(rxq->stats));
1984 /* Free LRO resources */
1985 tcp_lro_free(&rxq->lro);
1987 for (i = 0; i < rxq->num_rx_buf; i++) {
1988 rx_oob = &rxq->rx_oobs[i];
1991 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
1993 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
1997 mana_gd_destroy_queue(gc, rxq->gdma_rq);
1999 free(rxq, M_DEVBUF);
2002 #define MANA_WQE_HEADER_SIZE 16
2003 #define MANA_WQE_SGE_SIZE 16
2006 mana_alloc_rx_wqe(struct mana_port_context *apc,
2007 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2009 struct mana_recv_buf_oob *rx_oob;
2013 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2015 "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2021 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2022 rx_oob = &rxq->rx_oobs[buf_idx];
2023 memset(rx_oob, 0, sizeof(*rx_oob));
2025 err = bus_dmamap_create(apc->rx_buf_tag, 0,
2029 "Failed to create rx DMA map for buf %d\n",
2034 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2037 "Failed to create rx DMA map for buf %d\n",
2039 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2043 rx_oob->wqe_req.sgl = rx_oob->sgl;
2044 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2045 rx_oob->wqe_req.inline_oob_size = 0;
2046 rx_oob->wqe_req.inline_oob_data = NULL;
2047 rx_oob->wqe_req.flags = 0;
2048 rx_oob->wqe_req.client_data_unit = 0;
2050 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2051 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2052 *cq_size += COMP_ENTRY_SIZE;
2059 mana_push_wqe(struct mana_rxq *rxq)
2061 struct mana_recv_buf_oob *rx_oob;
2065 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2066 rx_oob = &rxq->rx_oobs[buf_idx];
2068 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2077 static struct mana_rxq *
2078 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2079 struct mana_eq *eq, struct ifnet *ndev)
2081 struct gdma_dev *gd = apc->ac->gdma_dev;
2082 struct mana_obj_spec wq_spec;
2083 struct mana_obj_spec cq_spec;
2084 struct gdma_queue_spec spec;
2085 struct mana_cq *cq = NULL;
2086 uint32_t cq_size, rq_size;
2087 struct gdma_context *gc;
2088 struct mana_rxq *rxq;
2091 gc = gd->gdma_context;
2093 rxq = malloc(sizeof(*rxq) +
2094 RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2095 M_DEVBUF, M_WAITOK | M_ZERO);
2100 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2101 rxq->rxq_idx = rxq_idx;
2103 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2104 * Now we just allow maxium size of 4096.
2106 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2107 if (rxq->datasize > MAX_FRAME_SIZE)
2108 rxq->datasize = MAX_FRAME_SIZE;
2110 mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2111 rxq_idx, rxq->datasize);
2113 rxq->rxobj = INVALID_MANA_HANDLE;
2115 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2119 /* Create LRO for the RQ */
2120 if (ndev->if_capenable & IFCAP_LRO) {
2121 err = tcp_lro_init(&rxq->lro);
2123 if_printf(ndev, "Failed to create LRO for rxq %d\n",
2126 rxq->lro.ifp = ndev;
2130 mana_alloc_counters((counter_u64_t *)&rxq->stats,
2131 sizeof(rxq->stats));
2133 rq_size = ALIGN(rq_size, PAGE_SIZE);
2134 cq_size = ALIGN(cq_size, PAGE_SIZE);
2137 memset(&spec, 0, sizeof(spec));
2138 spec.type = GDMA_RQ;
2139 spec.monitor_avl_buf = true;
2140 spec.queue_size = rq_size;
2141 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2145 /* Create RQ's CQ */
2147 cq->gdma_comp_buf = eq->cqe_poll;
2148 cq->type = MANA_CQ_TYPE_RX;
2151 memset(&spec, 0, sizeof(spec));
2152 spec.type = GDMA_CQ;
2153 spec.monitor_avl_buf = false;
2154 spec.queue_size = cq_size;
2155 spec.cq.callback = mana_cq_handler;
2156 spec.cq.parent_eq = eq->eq;
2157 spec.cq.context = cq;
2158 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2162 memset(&wq_spec, 0, sizeof(wq_spec));
2163 memset(&cq_spec, 0, sizeof(cq_spec));
2164 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
2165 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2167 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
2168 cq_spec.queue_size = cq->gdma_cq->queue_size;
2169 cq_spec.modr_ctx_id = 0;
2170 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2172 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2173 &wq_spec, &cq_spec, &rxq->rxobj);
2177 rxq->gdma_rq->id = wq_spec.queue_index;
2178 cq->gdma_cq->id = cq_spec.queue_index;
2180 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2181 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2183 rxq->gdma_id = rxq->gdma_rq->id;
2184 cq->gdma_id = cq->gdma_cq->id;
2186 err = mana_push_wqe(rxq);
2190 if (cq->gdma_id >= gc->max_num_cqs)
2193 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2195 mana_gd_arm_cq(cq->gdma_cq);
2200 if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2202 mana_destroy_rxq(apc, rxq, false);
2205 mana_deinit_cq(apc, cq);
2211 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
2213 struct mana_rxq *rxq;
2217 for (i = 0; i < apc->num_queues; i++) {
2218 rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
2227 apc->default_rxobj = apc->rxqs[0]->rxobj;
2233 mana_destroy_vport(struct mana_port_context *apc)
2235 struct mana_rxq *rxq;
2237 struct mana_cq *rx_cq;
2238 struct gdma_queue *cq, *eq;
2240 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2241 rxq = apc->rxqs[rxq_idx];
2245 rx_cq = &rxq->rx_cq;
2246 if ((cq = rx_cq->gdma_cq) != NULL) {
2248 mana_drain_eq_task(eq);
2251 mana_destroy_rxq(apc, rxq, true);
2252 apc->rxqs[rxq_idx] = NULL;
2255 mana_destroy_txq(apc);
2259 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
2261 struct gdma_dev *gd = apc->ac->gdma_dev;
2264 apc->default_rxobj = INVALID_MANA_HANDLE;
2266 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2270 return mana_create_txq(apc, net);
2274 static void mana_rss_table_init(struct mana_port_context *apc)
2278 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2279 apc->indir_table[i] = i % apc->num_queues;
2282 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2283 bool update_hash, bool update_tab)
2289 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2290 queue_idx = apc->indir_table[i];
2291 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2295 return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2299 mana_init_port(struct ifnet *ndev)
2301 struct mana_port_context *apc = if_getsoftc(ndev);
2302 uint32_t max_txq, max_rxq, max_queues;
2303 int port_idx = apc->port_idx;
2304 uint32_t num_indirect_entries;
2307 err = mana_init_port_context(apc);
2311 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2312 &num_indirect_entries);
2314 if_printf(ndev, "Failed to query info for vPort 0\n");
2318 max_queues = min_t(uint32_t, max_txq, max_rxq);
2319 if (apc->max_queues > max_queues)
2320 apc->max_queues = max_queues;
2322 if (apc->num_queues > apc->max_queues)
2323 apc->num_queues = apc->max_queues;
2328 bus_dma_tag_destroy(apc->rx_buf_tag);
2329 apc->rx_buf_tag = NULL;
2330 free(apc->rxqs, M_DEVBUF);
2336 mana_alloc_queues(struct ifnet *ndev)
2338 struct mana_port_context *apc = if_getsoftc(ndev);
2339 struct gdma_dev *gd = apc->ac->gdma_dev;
2342 err = mana_create_eq(apc);
2346 err = mana_create_vport(apc, ndev);
2350 err = mana_add_rx_queues(apc, ndev);
2354 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2356 mana_rss_table_init(apc);
2358 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2365 mana_destroy_vport(apc);
2367 mana_destroy_eq(gd->gdma_context, apc);
2372 mana_up(struct mana_port_context *apc)
2376 mana_dbg(NULL, "mana_up called\n");
2378 err = mana_alloc_queues(apc->ndev);
2380 mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2384 /* Add queue specific sysctl */
2385 mana_sysctl_add_queues(apc);
2387 apc->port_is_up = true;
2389 /* Ensure port state updated before txq state */
2392 if_link_state_change(apc->ndev, LINK_STATE_UP);
2393 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2400 mana_init(void *arg)
2402 struct mana_port_context *apc = (struct mana_port_context *)arg;
2404 MANA_APC_LOCK_LOCK(apc);
2405 if (!apc->port_is_up) {
2408 MANA_APC_LOCK_UNLOCK(apc);
2412 mana_dealloc_queues(struct ifnet *ndev)
2414 struct mana_port_context *apc = if_getsoftc(ndev);
2415 struct mana_txq *txq;
2418 if (apc->port_is_up)
2421 /* No packet can be transmitted now since apc->port_is_up is false.
2422 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2423 * a txq because it may not timely see apc->port_is_up being cleared
2424 * to false, but it doesn't matter since mana_start_xmit() drops any
2425 * new packets due to apc->port_is_up being false.
2427 * Drain all the in-flight TX packets
2429 for (i = 0; i < apc->num_queues; i++) {
2430 txq = &apc->tx_qp[i].txq;
2432 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2433 struct gdma_queue *eq = NULL;
2435 eq = tx_cq->gdma_cq->cq.parent;
2437 /* Stop EQ interrupt */
2438 eq->eq.do_not_ring_db = true;
2439 /* Schedule a cleanup task */
2440 taskqueue_enqueue(eq->eq.cleanup_tq,
2441 &eq->eq.cleanup_task);
2444 while (atomic_read(&txq->pending_sends) > 0)
2445 usleep_range(1000, 2000);
2448 /* We're 100% sure the queues can no longer be woken up, because
2449 * we're sure now mana_poll_tx_cq() can't be running.
2452 apc->rss_state = TRI_STATE_FALSE;
2453 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2455 if_printf(ndev, "Failed to disable vPort: %d\n", err);
2459 /* TODO: Implement RX fencing */
2462 mana_destroy_vport(apc);
2464 mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
2470 mana_down(struct mana_port_context *apc)
2474 apc->port_st_save = apc->port_is_up;
2475 apc->port_is_up = false;
2477 /* Ensure port state updated before txq state */
2480 if (apc->port_st_save) {
2481 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2483 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2485 mana_sysctl_free_queues(apc);
2487 err = mana_dealloc_queues(apc->ndev);
2489 if_printf(apc->ndev,
2490 "Failed to bring down mana interface: %d\n", err);
2498 mana_detach(struct ifnet *ndev)
2500 struct mana_port_context *apc = if_getsoftc(ndev);
2503 ether_ifdetach(ndev);
2508 MANA_APC_LOCK_LOCK(apc);
2509 err = mana_down(apc);
2510 MANA_APC_LOCK_UNLOCK(apc);
2512 mana_cleanup_port_context(apc);
2514 MANA_APC_LOCK_DESTROY(apc);
2516 free(apc, M_DEVBUF);
2522 mana_probe_port(struct mana_context *ac, int port_idx,
2523 struct ifnet **ndev_storage)
2525 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2526 struct mana_port_context *apc;
2530 ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2532 mana_err(NULL, "Failed to allocate ifnet struct\n");
2536 *ndev_storage = ndev;
2538 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2540 mana_err(NULL, "Failed to allocate port context\n");
2547 apc->max_queues = gc->max_num_queues;
2548 apc->num_queues = min_t(unsigned int,
2549 gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2550 apc->port_handle = INVALID_MANA_HANDLE;
2551 apc->port_idx = port_idx;
2552 apc->frame_size = DEFAULT_FRAME_SIZE;
2554 MANA_APC_LOCK_INIT(apc);
2556 if_initname(ndev, device_get_name(gc->dev), port_idx);
2557 if_setdev(ndev,gc->dev);
2558 if_setsoftc(ndev, apc);
2560 if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2561 if_setinitfn(ndev, mana_init);
2562 if_settransmitfn(ndev, mana_start_xmit);
2563 if_setqflushfn(ndev, mana_qflush);
2564 if_setioctlfn(ndev, mana_ioctl);
2565 if_setgetcounterfn(ndev, mana_get_counter);
2567 if_setmtu(ndev, ETHERMTU);
2568 if_setbaudrate(ndev, IF_Gbps(100));
2570 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2572 err = mana_init_port(ndev);
2576 ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
2577 ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
2578 ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
2580 ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
2582 /* Enable all available capabilities by default. */
2583 ndev->if_capenable = ndev->if_capabilities;
2585 /* TSO parameters */
2586 ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
2587 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2588 ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
2589 ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
2591 ifmedia_init(&apc->media, IFM_IMASK,
2592 mana_ifmedia_change, mana_ifmedia_status);
2593 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2594 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2596 ether_ifattach(ndev, apc->mac_addr);
2598 /* Initialize statistics */
2599 mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2600 sizeof(struct mana_port_stats));
2601 mana_sysctl_add_port(apc);
2603 /* Tell the stack that the interface is not active */
2604 if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2609 free(apc, M_DEVBUF);
2611 *ndev_storage = NULL;
2612 if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2617 int mana_probe(struct gdma_dev *gd)
2619 struct gdma_context *gc = gd->gdma_context;
2620 device_t dev = gc->dev;
2621 struct mana_context *ac;
2625 device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2626 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2628 err = mana_gd_register_device(gd);
2632 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2638 gd->driver_data = ac;
2640 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2641 MANA_MICRO_VERSION, &ac->num_ports);
2645 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2646 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2648 for (i = 0; i < ac->num_ports; i++) {
2649 err = mana_probe_port(ac, i, &ac->ports[i]);
2652 "Failed to probe mana port %d\n", i);
2665 mana_remove(struct gdma_dev *gd)
2667 struct gdma_context *gc = gd->gdma_context;
2668 struct mana_context *ac = gd->driver_data;
2669 device_t dev = gc->dev;
2673 for (i = 0; i < ac->num_ports; i++) {
2674 ndev = ac->ports[i];
2677 device_printf(dev, "No net device to remove\n");
2686 mana_gd_deregister_device(gd);
2687 gd->driver_data = NULL;
2688 gd->gdma_context = NULL;