2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2021 Microsoft Corp.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
44 #include <sys/eventhandler.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/in_cksum.h>
51 #include <net/if_var.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
55 #include <net/rss_config.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
67 #include "mana_sysctl.h"
69 static int mana_up(struct mana_port_context *apc);
70 static int mana_down(struct mana_port_context *apc);
73 mana_rss_key_fill(void *k, size_t size)
75 static bool rss_key_generated = false;
76 static uint8_t rss_key[MANA_HASH_KEY_SIZE];
78 KASSERT(size <= MANA_HASH_KEY_SIZE,
79 ("Request more buytes than MANA RSS key can hold"));
81 if (!rss_key_generated) {
82 arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
83 rss_key_generated = true;
85 memcpy(k, rss_key, size);
89 mana_ifmedia_change(struct ifnet *ifp __unused)
95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
97 struct mana_port_context *apc = if_getsoftc(ifp);
100 if_printf(ifp, "Port not available\n");
104 MANA_APC_LOCK_LOCK(apc);
106 ifmr->ifm_status = IFM_AVALID;
107 ifmr->ifm_active = IFM_ETHER;
109 if (!apc->port_is_up) {
110 MANA_APC_LOCK_UNLOCK(apc);
111 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
115 ifmr->ifm_status |= IFM_ACTIVE;
116 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
118 MANA_APC_LOCK_UNLOCK(apc);
122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
124 struct mana_port_context *apc = if_getsoftc(ifp);
125 struct mana_port_stats *stats = &apc->port_stats;
128 case IFCOUNTER_IPACKETS:
129 return (counter_u64_fetch(stats->rx_packets));
130 case IFCOUNTER_OPACKETS:
131 return (counter_u64_fetch(stats->tx_packets));
132 case IFCOUNTER_IBYTES:
133 return (counter_u64_fetch(stats->rx_bytes));
134 case IFCOUNTER_OBYTES:
135 return (counter_u64_fetch(stats->tx_bytes));
136 case IFCOUNTER_IQDROPS:
137 return (counter_u64_fetch(stats->rx_drops));
138 case IFCOUNTER_OQDROPS:
139 return (counter_u64_fetch(stats->tx_drops));
141 return (if_get_counter_default(ifp, cnt));
146 mana_qflush(struct ifnet *ifp)
152 mana_restart(struct mana_port_context *apc)
156 MANA_APC_LOCK_LOCK(apc);
161 MANA_APC_LOCK_UNLOCK(apc);
167 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
169 struct mana_port_context *apc = if_getsoftc(ifp);
170 struct ifrsskey *ifrk;
171 struct ifrsshash *ifrh;
178 ifr = (struct ifreq *)data;
179 new_mtu = ifr->ifr_mtu;
180 if (ifp->if_mtu == new_mtu)
182 if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
183 (new_mtu + 18 < MIN_FRAME_SIZE)) {
184 if_printf(ifp, "Invalid MTU. new_mtu: %d, "
185 "max allowed: %d, min allowed: %d\n",
186 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
189 MANA_APC_LOCK_LOCK(apc);
193 apc->frame_size = new_mtu + 18;
194 if_setmtu(ifp, new_mtu);
195 mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
198 MANA_APC_LOCK_UNLOCK(apc);
202 if (ifp->if_flags & IFF_UP) {
203 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
204 MANA_APC_LOCK_LOCK(apc);
205 if (!apc->port_is_up)
207 MANA_APC_LOCK_UNLOCK(apc);
210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
211 MANA_APC_LOCK_LOCK(apc);
214 MANA_APC_LOCK_UNLOCK(apc);
222 ifr = (struct ifreq *)data;
223 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
227 ifrk = (struct ifrsskey *)data;
228 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
229 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
230 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
234 ifrh = (struct ifrsshash *)data;
235 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
244 rc = ether_ioctl(ifp, command, data);
252 mana_alloc_counters(counter_u64_t *begin, int size)
254 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
256 for (; begin < end; ++begin)
257 *begin = counter_u64_alloc(M_WAITOK);
261 mana_free_counters(counter_u64_t *begin, int size)
263 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
265 for (; begin < end; ++begin)
266 counter_u64_free(*begin);
270 mana_can_tx(struct gdma_queue *wq)
272 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
276 mana_tx_map_mbuf(struct mana_port_context *apc,
277 struct mana_send_buf_info *tx_info,
278 struct mbuf **m_head, struct mana_tx_package *tp,
279 struct mana_stats *tx_stats)
281 struct gdma_dev *gd = apc->ac->gdma_dev;
282 bus_dma_segment_t segs[MAX_MBUF_FRAGS];
283 struct mbuf *m = *m_head;
286 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
287 m, segs, &nsegs, BUS_DMA_NOWAIT);
291 counter_u64_add(tx_stats->collapse, 1);
292 m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
293 if (unlikely(m_new == NULL)) {
294 counter_u64_add(tx_stats->collapse_err, 1);
301 "Too many segs in orig mbuf, m_collapse called\n");
303 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
304 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
307 for (i = 0; i < nsegs; i++) {
308 tp->wqe_req.sgl[i].address = segs[i].ds_addr;
309 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
310 tp->wqe_req.sgl[i].size = segs[i].ds_len;
312 tp->wqe_req.num_sge = nsegs;
314 tx_info->mbuf = *m_head;
316 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
317 BUS_DMASYNC_PREWRITE);
324 mana_tx_unmap_mbuf(struct mana_port_context *apc,
325 struct mana_send_buf_info *tx_info)
327 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
328 BUS_DMASYNC_POSTWRITE);
329 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
331 m_freem(tx_info->mbuf);
332 tx_info->mbuf = NULL;
337 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
338 struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
340 bus_dma_segment_t segs[1];
346 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
347 if (unlikely(mbuf == NULL)) {
348 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
349 if (unlikely(mbuf == NULL)) {
354 mlen = rxq->datasize;
357 mbuf->m_pkthdr.len = mbuf->m_len = mlen;
361 mlen = rx_oob->mbuf->m_pkthdr.len;
367 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
368 mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
370 if (unlikely((err != 0) || (nsegs != 1))) {
371 mana_warn(NULL, "Failed to map mbuf, error: %d, "
372 "nsegs: %d\n", err, nsegs);
373 counter_u64_add(rxq->stats.dma_mapping_err, 1);
377 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
378 BUS_DMASYNC_PREREAD);
382 rx_oob->sgl[0].address = segs[0].ds_addr;
383 rx_oob->sgl[0].size = mlen;
384 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
394 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
395 struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
397 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
398 BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
401 if (free_mbuf && rx_oob->mbuf) {
402 m_freem(rx_oob->mbuf);
408 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
409 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
410 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
412 #define MANA_TXQ_FULL (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
415 mana_xmit(struct mana_txq *txq)
417 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
418 struct mana_send_buf_info *tx_info;
419 struct ifnet *ndev = txq->ndev;
421 struct mana_port_context *apc = if_getsoftc(ndev);
422 struct mana_port_stats *port_stats = &apc->port_stats;
423 struct gdma_dev *gd = apc->ac->gdma_dev;
424 uint64_t packets, bytes;
425 uint16_t next_to_use;
426 struct mana_tx_package pkg = {};
427 struct mana_stats *tx_stats;
428 struct gdma_queue *gdma_sq;
432 gdma_sq = txq->gdma_sq;
433 cq = &apc->tx_qp[txq->idx].tx_cq;
434 tx_stats = &txq->stats;
438 next_to_use = txq->next_to_use;
440 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
441 if (!apc->port_is_up ||
442 (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
443 drbr_putback(ndev, txq->txq_br, mbuf);
447 if (!mana_can_tx(gdma_sq)) {
448 /* SQ is full. Set the IFF_DRV_OACTIVE flag */
449 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
450 counter_u64_add(tx_stats->stop, 1);
451 uint64_t stops = counter_u64_fetch(tx_stats->stop);
452 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
453 #define MANA_TXQ_STOP_THRESHOLD 50
454 if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
455 stops > wakeups && txq->alt_txq_idx == txq->idx) {
457 (txq->idx + (stops / wakeups))
459 counter_u64_add(tx_stats->alt_chg, 1);
462 drbr_putback(ndev, txq->txq_br, mbuf);
464 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
468 tx_info = &txq->tx_buf_info[next_to_use];
470 memset(&pkg, 0, sizeof(struct mana_tx_package));
471 pkg.wqe_req.sgl = pkg.sgl_array;
473 err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
476 "Failed to map tx mbuf, err %d\n", err);
478 counter_u64_add(tx_stats->dma_mapping_err, 1);
480 /* The mbuf is still there. Free it */
482 /* Advance the drbr queue */
483 drbr_advance(ndev, txq->txq_br);
487 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
488 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
490 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
491 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
492 pkt_fmt = MANA_LONG_PKT_FMT;
494 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
497 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
499 if (pkt_fmt == MANA_SHORT_PKT_FMT)
500 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
502 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
504 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
505 pkg.wqe_req.flags = 0;
506 pkg.wqe_req.client_data_unit = 0;
508 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
509 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
510 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
512 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
514 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
515 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
516 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
518 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
519 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
520 } else if (mbuf->m_pkthdr.csum_flags &
521 (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
522 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
523 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
524 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
526 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
529 if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
530 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
531 pkg.tx_oob.s_oob.trans_off =
532 mbuf->m_pkthdr.l3hlen;
534 pkg.tx_oob.s_oob.comp_udp_csum = 1;
536 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
537 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
538 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
540 if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
541 pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
542 else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
543 pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
546 len = mbuf->m_pkthdr.len;
548 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
549 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
551 /* Should not happen */
552 if_printf(ndev, "Failed to post TX OOB: %d\n", err);
554 mana_tx_unmap_mbuf(apc, tx_info);
556 drbr_advance(ndev, txq->txq_br);
561 (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
563 (void)atomic_inc_return(&txq->pending_sends);
565 drbr_advance(ndev, txq->txq_br);
567 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
574 counter_u64_add_protected(tx_stats->packets, packets);
575 counter_u64_add_protected(port_stats->tx_packets, packets);
576 counter_u64_add_protected(tx_stats->bytes, bytes);
577 counter_u64_add_protected(port_stats->tx_bytes, bytes);
580 txq->next_to_use = next_to_use;
584 mana_xmit_taskfunc(void *arg, int pending)
586 struct mana_txq *txq = (struct mana_txq *)arg;
587 struct ifnet *ndev = txq->ndev;
588 struct mana_port_context *apc = if_getsoftc(ndev);
590 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
591 (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
592 mtx_lock(&txq->txq_mtx);
594 mtx_unlock(&txq->txq_mtx);
598 #define PULLUP_HDR(m, len) \
600 if (unlikely((m)->m_len < (len))) { \
601 (m) = m_pullup((m), (len)); \
608 * If this function failed, the mbuf would be freed.
610 static inline struct mbuf *
611 mana_tso_fixup(struct mbuf *mbuf)
613 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
618 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
619 etype = ntohs(eh->evl_proto);
620 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
622 etype = ntohs(eh->evl_encap_proto);
623 ehlen = ETHER_HDR_LEN;
626 if (etype == ETHERTYPE_IP) {
630 PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
631 ip = mtodo(mbuf, ehlen);
632 iphlen = ip->ip_hl << 2;
633 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
635 PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
636 th = mtodo(mbuf, ehlen + iphlen);
640 th->th_sum = in_pseudo(ip->ip_src.s_addr,
641 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
642 } else if (etype == ETHERTYPE_IPV6) {
645 PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
646 ip6 = mtodo(mbuf, ehlen);
647 if (ip6->ip6_nxt != IPPROTO_TCP) {
648 /* Realy something wrong, just return */
649 mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
653 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
655 th = mtodo(mbuf, ehlen + sizeof(*ip6));
658 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
660 /* CSUM_TSO is set but not IP protocol. */
661 mana_warn(NULL, "TSO mbuf not right, freed.\n");
666 MANA_L3_PROTO(mbuf) = etype;
672 * If this function failed, the mbuf would be freed.
674 static inline struct mbuf *
675 mana_mbuf_csum_check(struct mbuf *mbuf)
677 struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
678 struct mbuf *mbuf_next;
683 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
684 etype = ntohs(eh->evl_proto);
685 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
687 etype = ntohs(eh->evl_encap_proto);
688 ehlen = ETHER_HDR_LEN;
691 mbuf_next = m_getptr(mbuf, ehlen, &offset);
693 MANA_L4_PROTO(mbuf) = 0;
694 if (etype == ETHERTYPE_IP) {
698 ip = (struct ip *)(mtodo(mbuf_next, offset));
699 iphlen = ip->ip_hl << 2;
700 mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
702 MANA_L4_PROTO(mbuf) = ip->ip_p;
703 } else if (etype == ETHERTYPE_IPV6) {
704 const struct ip6_hdr *ip6;
706 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
707 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
709 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
711 MANA_L4_PROTO(mbuf) = 0;
714 MANA_L3_PROTO(mbuf) = etype;
720 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
722 struct mana_port_context *apc = if_getsoftc(ifp);
723 struct mana_txq *txq;
728 if (unlikely((!apc->port_is_up) ||
729 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
732 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
733 m = mana_tso_fixup(m);
734 if (unlikely(m == NULL)) {
736 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
741 m = mana_mbuf_csum_check(m);
742 if (unlikely(m == NULL)) {
744 counter_u64_add_protected(apc->port_stats.tx_drops, 1);
750 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
751 uint32_t hash = m->m_pkthdr.flowid;
752 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
755 txq_id = m->m_pkthdr.flowid % apc->num_queues;
758 if (apc->enable_tx_altq)
759 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
761 txq = &apc->tx_qp[txq_id].txq;
763 is_drbr_empty = drbr_empty(ifp, txq->txq_br);
764 err = drbr_enqueue(ifp, txq->txq_br, m);
766 mana_warn(NULL, "txq %u failed to enqueue: %d\n",
768 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
772 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
774 mtx_unlock(&txq->txq_mtx);
776 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
783 mana_cleanup_port_context(struct mana_port_context *apc)
785 bus_dma_tag_destroy(apc->tx_buf_tag);
786 bus_dma_tag_destroy(apc->rx_buf_tag);
787 apc->rx_buf_tag = NULL;
789 free(apc->rxqs, M_DEVBUF);
792 mana_free_counters((counter_u64_t *)&apc->port_stats,
793 sizeof(struct mana_port_stats));
797 mana_init_port_context(struct mana_port_context *apc)
799 device_t dev = apc->ac->gdma_dev->gdma_context->dev;
800 uint32_t tso_maxsize;
803 tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
804 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
806 /* Create DMA tag for tx bufs */
807 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
808 1, 0, /* alignment, boundary */
809 BUS_SPACE_MAXADDR, /* lowaddr */
810 BUS_SPACE_MAXADDR, /* highaddr */
811 NULL, NULL, /* filter, filterarg */
812 tso_maxsize, /* maxsize */
813 MAX_MBUF_FRAGS, /* nsegments */
814 tso_maxsize, /* maxsegsize */
816 NULL, NULL, /* lockfunc, lockfuncarg*/
819 device_printf(dev, "Feiled to create TX DMA tag\n");
823 /* Create DMA tag for rx bufs */
824 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
825 64, 0, /* alignment, boundary */
826 BUS_SPACE_MAXADDR, /* lowaddr */
827 BUS_SPACE_MAXADDR, /* highaddr */
828 NULL, NULL, /* filter, filterarg */
829 MJUMPAGESIZE, /* maxsize */
831 MJUMPAGESIZE, /* maxsegsize */
833 NULL, NULL, /* lockfunc, lockfuncarg*/
836 device_printf(dev, "Feiled to create RX DMA tag\n");
840 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
841 M_DEVBUF, M_WAITOK | M_ZERO);
844 bus_dma_tag_destroy(apc->tx_buf_tag);
845 bus_dma_tag_destroy(apc->rx_buf_tag);
846 apc->rx_buf_tag = NULL;
854 mana_send_request(struct mana_context *ac, void *in_buf,
855 uint32_t in_len, void *out_buf, uint32_t out_len)
857 struct gdma_context *gc = ac->gdma_dev->gdma_context;
858 struct gdma_resp_hdr *resp = out_buf;
859 struct gdma_req_hdr *req = in_buf;
860 device_t dev = gc->dev;
861 static atomic_t activity_id;
864 req->dev_id = gc->mana.dev_id;
865 req->activity_id = atomic_inc_return(&activity_id);
867 mana_dbg(NULL, "activity_id = %u\n", activity_id);
869 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
871 if (err || resp->status) {
872 device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
874 return err ? err : EPROTO;
877 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
878 req->activity_id != resp->activity_id) {
880 "Unexpected mana message response: %x,%x,%x,%x\n",
881 req->dev_id.as_uint32, resp->dev_id.as_uint32,
882 req->activity_id, resp->activity_id);
890 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
891 const enum mana_command_code expected_code,
892 const uint32_t min_size)
894 if (resp_hdr->response.msg_type != expected_code)
897 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
900 if (resp_hdr->response.msg_size < min_size)
907 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
908 uint32_t proto_minor_ver, uint32_t proto_micro_ver,
909 uint16_t *max_num_vports)
911 struct gdma_context *gc = ac->gdma_dev->gdma_context;
912 struct mana_query_device_cfg_resp resp = {};
913 struct mana_query_device_cfg_req req = {};
914 device_t dev = gc->dev;
917 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
918 sizeof(req), sizeof(resp));
919 req.proto_major_ver = proto_major_ver;
920 req.proto_minor_ver = proto_minor_ver;
921 req.proto_micro_ver = proto_micro_ver;
923 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
925 device_printf(dev, "Failed to query config: %d", err);
929 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
931 if (err || resp.hdr.status) {
932 device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
939 *max_num_vports = resp.max_num_vports;
941 mana_dbg(NULL, "mana max_num_vports from device = %d\n",
948 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
949 uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
951 struct mana_query_vport_cfg_resp resp = {};
952 struct mana_query_vport_cfg_req req = {};
955 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
956 sizeof(req), sizeof(resp));
958 req.vport_index = vport_index;
960 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
965 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
973 *max_sq = resp.max_num_sq;
974 *max_rq = resp.max_num_rq;
975 *num_indir_entry = resp.num_indirection_ent;
977 apc->port_handle = resp.vport;
978 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
984 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
985 uint32_t doorbell_pg_id)
987 struct mana_config_vport_resp resp = {};
988 struct mana_config_vport_req req = {};
991 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
992 sizeof(req), sizeof(resp));
993 req.vport = apc->port_handle;
994 req.pdid = protection_dom_id;
995 req.doorbell_pageid = doorbell_pg_id;
997 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1000 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1004 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1006 if (err || resp.hdr.status) {
1007 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1008 err, resp.hdr.status);
1015 apc->tx_shortform_allowed = resp.short_form_allowed;
1016 apc->tx_vp_offset = resp.tx_vport_offset;
1022 mana_cfg_vport_steering(struct mana_port_context *apc,
1024 bool update_default_rxobj, bool update_key,
1027 uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1028 struct mana_cfg_rx_steer_req *req = NULL;
1029 struct mana_cfg_rx_steer_resp resp = {};
1030 struct ifnet *ndev = apc->ndev;
1031 mana_handle_t *req_indir_tab;
1032 uint32_t req_buf_size;
1035 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1036 req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1040 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1043 req->vport = apc->port_handle;
1044 req->num_indir_entries = num_entries;
1045 req->indir_tab_offset = sizeof(*req);
1046 req->rx_enable = rx;
1047 req->rss_enable = apc->rss_state;
1048 req->update_default_rxobj = update_default_rxobj;
1049 req->update_hashkey = update_key;
1050 req->update_indir_tab = update_tab;
1051 req->default_rxobj = apc->default_rxobj;
1054 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1057 req_indir_tab = (mana_handle_t *)(req + 1);
1058 memcpy(req_indir_tab, apc->rxobj_table,
1059 req->num_indir_entries * sizeof(mana_handle_t));
1062 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1065 if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1069 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1072 if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1076 if (resp.hdr.status) {
1077 if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1082 free(req, M_DEVBUF);
1087 mana_create_wq_obj(struct mana_port_context *apc,
1088 mana_handle_t vport,
1089 uint32_t wq_type, struct mana_obj_spec *wq_spec,
1090 struct mana_obj_spec *cq_spec,
1091 mana_handle_t *wq_obj)
1093 struct mana_create_wqobj_resp resp = {};
1094 struct mana_create_wqobj_req req = {};
1095 struct ifnet *ndev = apc->ndev;
1098 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1099 sizeof(req), sizeof(resp));
1101 req.wq_type = wq_type;
1102 req.wq_gdma_region = wq_spec->gdma_region;
1103 req.cq_gdma_region = cq_spec->gdma_region;
1104 req.wq_size = wq_spec->queue_size;
1105 req.cq_size = cq_spec->queue_size;
1106 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1107 req.cq_parent_qid = cq_spec->attached_eq;
1109 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1112 if_printf(ndev, "Failed to create WQ object: %d\n", err);
1116 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1118 if (err || resp.hdr.status) {
1119 if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1126 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1127 if_printf(ndev, "Got an invalid WQ object handle\n");
1132 *wq_obj = resp.wq_obj;
1133 wq_spec->queue_index = resp.wq_id;
1134 cq_spec->queue_index = resp.cq_id;
1142 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1143 mana_handle_t wq_obj)
1145 struct mana_destroy_wqobj_resp resp = {};
1146 struct mana_destroy_wqobj_req req = {};
1147 struct ifnet *ndev = apc->ndev;
1150 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1151 sizeof(req), sizeof(resp));
1152 req.wq_type = wq_type;
1153 req.wq_obj_handle = wq_obj;
1155 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1158 if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1162 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1164 if (err || resp.hdr.status)
1165 if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1166 err, resp.hdr.status);
1170 mana_destroy_eq(struct mana_context *ac)
1172 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1173 struct gdma_queue *eq;
1179 for (i = 0; i < gc->max_num_queues; i++) {
1184 mana_gd_destroy_queue(gc, eq);
1187 free(ac->eqs, M_DEVBUF);
1192 mana_create_eq(struct mana_context *ac)
1194 struct gdma_dev *gd = ac->gdma_dev;
1195 struct gdma_context *gc = gd->gdma_context;
1196 struct gdma_queue_spec spec = {};
1200 ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1201 M_DEVBUF, M_WAITOK | M_ZERO);
1205 spec.type = GDMA_EQ;
1206 spec.monitor_avl_buf = false;
1207 spec.queue_size = EQ_SIZE;
1208 spec.eq.callback = NULL;
1209 spec.eq.context = ac->eqs;
1210 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1212 for (i = 0; i < gc->max_num_queues; i++) {
1213 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1220 mana_destroy_eq(ac);
1225 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1227 struct mana_fence_rq_resp resp = {};
1228 struct mana_fence_rq_req req = {};
1231 init_completion(&rxq->fence_event);
1233 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1234 sizeof(req), sizeof(resp));
1235 req.wq_obj_handle = rxq->rxobj;
1237 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1240 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1245 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1246 if (err || resp.hdr.status) {
1247 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1248 rxq->rxq_idx, err, resp.hdr.status);
1255 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1256 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1265 mana_fence_rqs(struct mana_port_context *apc)
1267 unsigned int rxq_idx;
1268 struct mana_rxq *rxq;
1271 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1272 rxq = apc->rxqs[rxq_idx];
1273 err = mana_fence_rq(apc, rxq);
1275 /* In case of any error, use sleep instead. */
1282 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1284 uint32_t used_space_old;
1285 uint32_t used_space_new;
1287 used_space_old = wq->head - wq->tail;
1288 used_space_new = wq->head - (wq->tail + num_units);
1290 if (used_space_new > used_space_old) {
1292 "WARNING: new used space %u greater than old one %u\n",
1293 used_space_new, used_space_old);
1297 wq->tail += num_units;
1302 mana_poll_tx_cq(struct mana_cq *cq)
1304 struct gdma_comp *completions = cq->gdma_comp_buf;
1305 struct gdma_posted_wqe_info *wqe_info;
1306 struct mana_send_buf_info *tx_info;
1307 unsigned int pkt_transmitted = 0;
1308 unsigned int wqe_unit_cnt = 0;
1309 struct mana_txq *txq = cq->txq;
1310 struct mana_port_context *apc;
1311 uint16_t next_to_complete;
1314 int txq_idx = txq->idx;;
1318 struct gdma_queue *gdma_wq;
1319 unsigned int avail_space;
1320 bool txq_full = false;
1323 apc = if_getsoftc(ndev);
1325 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1326 CQE_POLLING_BUFFER);
1331 next_to_complete = txq->next_to_complete;
1333 for (i = 0; i < comp_read; i++) {
1334 struct mana_tx_comp_oob *cqe_oob;
1336 if (!completions[i].is_sq) {
1337 mana_err(NULL, "WARNING: Not for SQ\n");
1341 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1342 if (cqe_oob->cqe_hdr.client_type !=
1343 MANA_CQE_COMPLETION) {
1345 "WARNING: Invalid CQE client type %u\n",
1346 cqe_oob->cqe_hdr.client_type);
1350 switch (cqe_oob->cqe_hdr.cqe_type) {
1354 case CQE_TX_SA_DROP:
1355 case CQE_TX_MTU_DROP:
1356 case CQE_TX_INVALID_OOB:
1357 case CQE_TX_INVALID_ETH_TYPE:
1358 case CQE_TX_HDR_PROCESSING_ERROR:
1359 case CQE_TX_VF_DISABLED:
1360 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1361 case CQE_TX_VPORT_DISABLED:
1362 case CQE_TX_VLAN_TAGGING_VIOLATION:
1365 "TX: txq %d CQE error %d, ntc = %d, "
1366 "pending sends = %d: err ignored.\n",
1367 txq_idx, cqe_oob->cqe_hdr.cqe_type,
1368 next_to_complete, txq->pending_sends);
1372 /* If the CQE type is unexpected, log an error,
1373 * and go through the error path.
1376 "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1377 cqe_oob->cqe_hdr.cqe_type);
1380 if (txq->gdma_txq_id != completions[i].wq_num) {
1382 "txq gdma id not match completion wq num: "
1384 txq->gdma_txq_id, completions[i].wq_num);
1388 tx_info = &txq->tx_buf_info[next_to_complete];
1389 if (!tx_info->mbuf) {
1391 "WARNING: txq %d Empty mbuf on tx_info: %u, "
1392 "ntu = %u, pending_sends = %d, "
1393 "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1394 txq_idx, next_to_complete, txq->next_to_use,
1395 txq->pending_sends, pkt_transmitted, sa_drop,
1400 wqe_info = &tx_info->wqe_inf;
1401 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1403 mana_tx_unmap_mbuf(apc, tx_info);
1407 (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1412 txq->next_to_complete = next_to_complete;
1414 if (wqe_unit_cnt == 0) {
1416 "WARNING: TX ring not proceeding!\n");
1420 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1422 /* Ensure tail updated before checking q stop */
1425 gdma_wq = txq->gdma_sq;
1426 avail_space = mana_gd_wq_avail_space(gdma_wq);
1429 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1433 /* Ensure checking txq_full before apc->port_is_up. */
1436 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1437 /* Grab the txq lock and re-test */
1438 mtx_lock(&txq->txq_mtx);
1439 avail_space = mana_gd_wq_avail_space(gdma_wq);
1441 if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1442 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1443 /* Clear the Q full flag */
1444 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1446 counter_u64_add(txq->stats.wakeup, 1);
1447 if (txq->alt_txq_idx != txq->idx) {
1448 uint64_t stops = counter_u64_fetch(txq->stats.stop);
1449 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1450 /* Reset alt_txq_idx back if it is not overloaded */
1451 if (stops < wakeups) {
1452 txq->alt_txq_idx = txq->idx;
1453 counter_u64_add(txq->stats.alt_reset, 1);
1457 /* Schedule a tx enqueue task */
1458 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1460 mtx_unlock(&txq->txq_mtx);
1463 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1465 "WARNING: TX %d pending_sends error: %d\n",
1466 txq->idx, txq->pending_sends);
1468 cq->work_done = pkt_transmitted;
1472 mana_post_pkt_rxq(struct mana_rxq *rxq)
1474 struct mana_recv_buf_oob *recv_buf_oob;
1475 uint32_t curr_index;
1478 curr_index = rxq->buf_index++;
1479 if (rxq->buf_index == rxq->num_rx_buf)
1482 recv_buf_oob = &rxq->rx_oobs[curr_index];
1484 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1485 &recv_buf_oob->wqe_inf);
1487 mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1492 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1493 mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1494 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1499 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1500 struct mana_rxq *rxq)
1502 struct mana_stats *rx_stats = &rxq->stats;
1503 struct ifnet *ndev = rxq->ndev;
1504 uint32_t pkt_len = cqe->ppi[0].pkt_len;
1505 uint16_t rxq_idx = rxq->rxq_idx;
1506 struct mana_port_context *apc;
1507 bool do_lro = false;
1510 apc = if_getsoftc(ndev);
1511 rxq->rx_cq.work_done++;
1517 mbuf->m_flags |= M_PKTHDR;
1518 mbuf->m_pkthdr.len = pkt_len;
1519 mbuf->m_len = pkt_len;
1520 mbuf->m_pkthdr.rcvif = ndev;
1522 if ((ndev->if_capenable & IFCAP_RXCSUM ||
1523 ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1524 (cqe->rx_iphdr_csum_succeed)) {
1525 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1526 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1527 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1528 mbuf->m_pkthdr.csum_flags |=
1529 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1530 mbuf->m_pkthdr.csum_data = 0xffff;
1532 if (cqe->rx_tcp_csum_succeed)
1537 if (cqe->rx_hashtype != 0) {
1538 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1540 uint16_t hashtype = cqe->rx_hashtype;
1541 if (hashtype & NDIS_HASH_IPV4_MASK) {
1542 hashtype &= NDIS_HASH_IPV4_MASK;
1544 case NDIS_HASH_TCP_IPV4:
1545 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1547 case NDIS_HASH_UDP_IPV4:
1548 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1551 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1553 } else if (hashtype & NDIS_HASH_IPV6_MASK) {
1554 hashtype &= NDIS_HASH_IPV6_MASK;
1556 case NDIS_HASH_TCP_IPV6:
1557 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1559 case NDIS_HASH_TCP_IPV6_EX:
1560 M_HASHTYPE_SET(mbuf,
1561 M_HASHTYPE_RSS_TCP_IPV6_EX);
1563 case NDIS_HASH_UDP_IPV6:
1564 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1566 case NDIS_HASH_UDP_IPV6_EX:
1567 M_HASHTYPE_SET(mbuf,
1568 M_HASHTYPE_RSS_UDP_IPV6_EX);
1571 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1574 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1577 mbuf->m_pkthdr.flowid = rxq_idx;
1578 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1582 if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
1583 if (rxq->lro.lro_cnt != 0 &&
1584 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1585 do_if_input = false;
1588 ndev->if_input(ndev, mbuf);
1592 counter_u64_add_protected(rx_stats->packets, 1);
1593 counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1594 counter_u64_add_protected(rx_stats->bytes, pkt_len);
1595 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1600 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1601 struct gdma_comp *cqe)
1603 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1604 struct mana_recv_buf_oob *rxbuf_oob;
1605 struct ifnet *ndev = rxq->ndev;
1606 struct mana_port_context *apc;
1607 struct mbuf *old_mbuf;
1608 uint32_t curr, pktlen;
1611 switch (oob->cqe_hdr.cqe_type) {
1615 case CQE_RX_TRUNCATED:
1616 apc = if_getsoftc(ndev);
1617 counter_u64_add(apc->port_stats.rx_drops, 1);
1618 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1619 if_printf(ndev, "Dropped a truncated packet\n");
1622 case CQE_RX_COALESCED_4:
1623 if_printf(ndev, "RX coalescing is unsupported\n");
1626 case CQE_RX_OBJECT_FENCE:
1627 complete(&rxq->fence_event);
1631 if_printf(ndev, "Unknown RX CQE type = %d\n",
1632 oob->cqe_hdr.cqe_type);
1636 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1639 pktlen = oob->ppi[0].pkt_len;
1642 /* data packets should never have packetlength of zero */
1643 #if defined(__amd64__)
1644 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%lx\n",
1645 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1647 if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1648 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1653 curr = rxq->buf_index;
1654 rxbuf_oob = &rxq->rx_oobs[curr];
1655 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1656 mana_err(NULL, "WARNING: Rx Incorrect complete "
1658 rxbuf_oob->wqe_inf.wqe_size_in_bu);
1661 apc = if_getsoftc(ndev);
1663 old_mbuf = rxbuf_oob->mbuf;
1665 /* Unload DMA map for the old mbuf */
1666 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1668 /* Load a new mbuf to replace the old one */
1669 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1672 "failed to load rx mbuf, err = %d, packet dropped.\n",
1674 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1676 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1677 * pointing to the old one. Drop the packet.
1680 /* Reload the existing mbuf */
1681 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1684 mana_rx_mbuf(old_mbuf, oob, rxq);
1687 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1689 mana_post_pkt_rxq(rxq);
1693 mana_poll_rx_cq(struct mana_cq *cq)
1695 struct gdma_comp *comp = cq->gdma_comp_buf;
1698 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1699 KASSERT(comp_read <= CQE_POLLING_BUFFER,
1700 ("comp_read %d great than buf size %d",
1701 comp_read, CQE_POLLING_BUFFER));
1703 for (i = 0; i < comp_read; i++) {
1704 if (comp[i].is_sq == true) {
1706 "WARNING: CQE not for receive queue\n");
1710 /* verify recv cqe references the right rxq */
1711 if (comp[i].wq_num != cq->rxq->gdma_id) {
1713 "WARNING: Received CQE %d not for "
1714 "this receive queue %d\n",
1715 comp[i].wq_num, cq->rxq->gdma_id);
1719 mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1722 tcp_lro_flush_all(&cq->rxq->lro);
1726 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1728 struct mana_cq *cq = context;
1731 KASSERT(cq->gdma_cq == gdma_queue,
1732 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1734 if (cq->type == MANA_CQ_TYPE_RX) {
1735 mana_poll_rx_cq(cq);
1737 mana_poll_tx_cq(cq);
1740 if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1741 arm_bit = SET_ARM_BIT;
1745 mana_gd_ring_cq(gdma_queue, arm_bit);
1748 #define MANA_POLL_BUDGET 8
1749 #define MANA_RX_BUDGET 256
1750 #define MANA_TX_BUDGET MAX_SEND_BUFFERS_PER_QUEUE
1753 mana_poll(void *arg, int pending)
1755 struct mana_cq *cq = arg;
1759 if (cq->type == MANA_CQ_TYPE_RX) {
1760 cq->budget = MANA_RX_BUDGET;
1762 cq->budget = MANA_TX_BUDGET;
1765 for (i = 0; i < MANA_POLL_BUDGET; i++) {
1767 * If this is the last loop, set the budget big enough
1768 * so it will arm the CQ any way.
1770 if (i == (MANA_POLL_BUDGET - 1))
1771 cq->budget = CQE_POLLING_BUFFER + 1;
1773 mana_cq_handler(cq, cq->gdma_cq);
1775 if (cq->work_done < cq->budget)
1783 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1785 struct mana_cq *cq = arg;
1787 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1791 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1793 struct gdma_dev *gd = apc->ac->gdma_dev;
1798 /* Drain cleanup taskqueue */
1799 if (cq->cleanup_tq) {
1800 while (taskqueue_cancel(cq->cleanup_tq,
1801 &cq->cleanup_task, NULL)) {
1802 taskqueue_drain(cq->cleanup_tq,
1806 taskqueue_free(cq->cleanup_tq);
1809 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1813 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1815 struct gdma_dev *gd = apc->ac->gdma_dev;
1816 struct mana_send_buf_info *txbuf_info;
1817 uint32_t pending_sends;
1823 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1825 "WARNING: txq pending sends not zero: %u\n",
1829 if (txq->next_to_use != txq->next_to_complete) {
1831 "WARNING: txq buf not completed, "
1832 "next use %u, next complete %u\n",
1833 txq->next_to_use, txq->next_to_complete);
1836 /* Flush buf ring. Grab txq mtx lock */
1838 mtx_lock(&txq->txq_mtx);
1839 drbr_flush(apc->ndev, txq->txq_br);
1840 mtx_unlock(&txq->txq_mtx);
1841 buf_ring_free(txq->txq_br, M_DEVBUF);
1844 /* Drain taskqueue */
1845 if (txq->enqueue_tq) {
1846 while (taskqueue_cancel(txq->enqueue_tq,
1847 &txq->enqueue_task, NULL)) {
1848 taskqueue_drain(txq->enqueue_tq,
1849 &txq->enqueue_task);
1852 taskqueue_free(txq->enqueue_tq);
1855 if (txq->tx_buf_info) {
1856 /* Free all mbufs which are still in-flight */
1857 for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1858 txbuf_info = &txq->tx_buf_info[i];
1859 if (txbuf_info->mbuf) {
1860 mana_tx_unmap_mbuf(apc, txbuf_info);
1864 free(txq->tx_buf_info, M_DEVBUF);
1867 mana_free_counters((counter_u64_t *)&txq->stats,
1868 sizeof(txq->stats));
1870 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1872 mtx_destroy(&txq->txq_mtx);
1876 mana_destroy_txq(struct mana_port_context *apc)
1883 for (i = 0; i < apc->num_queues; i++) {
1884 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1886 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1888 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1891 free(apc->tx_qp, M_DEVBUF);
1896 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
1898 struct mana_context *ac = apc->ac;
1899 struct gdma_dev *gd = ac->gdma_dev;
1900 struct mana_obj_spec wq_spec;
1901 struct mana_obj_spec cq_spec;
1902 struct gdma_queue_spec spec;
1903 struct gdma_context *gc;
1904 struct mana_txq *txq;
1911 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1912 M_DEVBUF, M_WAITOK | M_ZERO);
1916 /* The minimum size of the WQE is 32 bytes, hence
1917 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1918 * the SQ can store. This value is then used to size other queues
1919 * to prevent overflow.
1921 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1922 KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1923 ("txq size not page aligned"));
1925 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1926 cq_size = ALIGN(cq_size, PAGE_SIZE);
1928 gc = gd->gdma_context;
1930 for (i = 0; i < apc->num_queues; i++) {
1931 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1934 txq = &apc->tx_qp[i].txq;
1937 txq->vp_offset = apc->tx_vp_offset;
1939 txq->alt_txq_idx = i;
1941 memset(&spec, 0, sizeof(spec));
1942 spec.type = GDMA_SQ;
1943 spec.monitor_avl_buf = true;
1944 spec.queue_size = txq_size;
1945 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1949 /* Create SQ's CQ */
1950 cq = &apc->tx_qp[i].tx_cq;
1951 cq->type = MANA_CQ_TYPE_TX;
1955 memset(&spec, 0, sizeof(spec));
1956 spec.type = GDMA_CQ;
1957 spec.monitor_avl_buf = false;
1958 spec.queue_size = cq_size;
1959 spec.cq.callback = mana_schedule_task;
1960 spec.cq.parent_eq = ac->eqs[i].eq;
1961 spec.cq.context = cq;
1962 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1966 memset(&wq_spec, 0, sizeof(wq_spec));
1967 memset(&cq_spec, 0, sizeof(cq_spec));
1969 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1970 wq_spec.queue_size = txq->gdma_sq->queue_size;
1972 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1973 cq_spec.queue_size = cq->gdma_cq->queue_size;
1974 cq_spec.modr_ctx_id = 0;
1975 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1977 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1978 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
1983 txq->gdma_sq->id = wq_spec.queue_index;
1984 cq->gdma_cq->id = cq_spec.queue_index;
1986 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1987 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1989 txq->gdma_txq_id = txq->gdma_sq->id;
1991 cq->gdma_id = cq->gdma_cq->id;
1994 "txq %d, txq gdma id %d, txq cq gdma id %d\n",
1995 i, txq->gdma_txq_id, cq->gdma_id);;
1997 if (cq->gdma_id >= gc->max_num_cqs) {
1998 if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2003 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2005 /* Initialize tx specific data */
2006 txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
2007 sizeof(struct mana_send_buf_info),
2008 M_DEVBUF, M_WAITOK | M_ZERO);
2009 if (unlikely(txq->tx_buf_info == NULL)) {
2011 "Failed to allocate tx buf info for SQ %u\n",
2018 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2020 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2022 txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
2023 M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2024 if (unlikely(txq->txq_br == NULL)) {
2026 "Failed to allocate buf ring for SQ %u\n",
2032 /* Allocate taskqueue for deferred send */
2033 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2034 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2035 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2036 if (unlikely(txq->enqueue_tq == NULL)) {
2038 "Unable to create tx %d enqueue task queue\n", i);
2042 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2043 "mana txq p%u-tx%d", apc->port_idx, i);
2045 mana_alloc_counters((counter_u64_t *)&txq->stats,
2046 sizeof(txq->stats));
2048 /* Allocate and start the cleanup task on CQ */
2049 cq->do_not_ring_db = false;
2051 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2053 taskqueue_create_fast("mana tx cq cleanup",
2054 M_WAITOK, taskqueue_thread_enqueue,
2057 if (apc->last_tx_cq_bind_cpu < 0)
2058 apc->last_tx_cq_bind_cpu = CPU_FIRST();
2059 cq->cpu = apc->last_tx_cq_bind_cpu;
2060 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2062 if (apc->bind_cleanup_thread_cpu) {
2064 CPU_SETOF(cq->cpu, &cpu_mask);
2065 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2066 1, PI_NET, &cpu_mask,
2067 "mana cq p%u-tx%u-cpu%d",
2068 apc->port_idx, txq->idx, cq->cpu);
2070 taskqueue_start_threads(&cq->cleanup_tq, 1,
2071 PI_NET, "mana cq p%u-tx%u",
2072 apc->port_idx, txq->idx);
2075 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2080 mana_destroy_txq(apc);
2085 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2086 bool validate_state)
2088 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2089 struct mana_recv_buf_oob *rx_oob;
2095 if (validate_state) {
2097 * XXX Cancel and drain cleanup task queue here.
2102 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2104 mana_deinit_cq(apc, &rxq->rx_cq);
2106 mana_free_counters((counter_u64_t *)&rxq->stats,
2107 sizeof(rxq->stats));
2109 /* Free LRO resources */
2110 tcp_lro_free(&rxq->lro);
2112 for (i = 0; i < rxq->num_rx_buf; i++) {
2113 rx_oob = &rxq->rx_oobs[i];
2116 mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2118 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2122 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2124 free(rxq, M_DEVBUF);
2127 #define MANA_WQE_HEADER_SIZE 16
2128 #define MANA_WQE_SGE_SIZE 16
2131 mana_alloc_rx_wqe(struct mana_port_context *apc,
2132 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2134 struct mana_recv_buf_oob *rx_oob;
2138 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2140 "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2146 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2147 rx_oob = &rxq->rx_oobs[buf_idx];
2148 memset(rx_oob, 0, sizeof(*rx_oob));
2150 err = bus_dmamap_create(apc->rx_buf_tag, 0,
2154 "Failed to create rx DMA map for buf %d\n",
2159 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2162 "Failed to create rx DMA map for buf %d\n",
2164 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2168 rx_oob->wqe_req.sgl = rx_oob->sgl;
2169 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2170 rx_oob->wqe_req.inline_oob_size = 0;
2171 rx_oob->wqe_req.inline_oob_data = NULL;
2172 rx_oob->wqe_req.flags = 0;
2173 rx_oob->wqe_req.client_data_unit = 0;
2175 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2176 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2177 *cq_size += COMP_ENTRY_SIZE;
2184 mana_push_wqe(struct mana_rxq *rxq)
2186 struct mana_recv_buf_oob *rx_oob;
2190 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2191 rx_oob = &rxq->rx_oobs[buf_idx];
2193 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2202 static struct mana_rxq *
2203 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2204 struct mana_eq *eq, struct ifnet *ndev)
2206 struct gdma_dev *gd = apc->ac->gdma_dev;
2207 struct mana_obj_spec wq_spec;
2208 struct mana_obj_spec cq_spec;
2209 struct gdma_queue_spec spec;
2210 struct mana_cq *cq = NULL;
2211 uint32_t cq_size, rq_size;
2212 struct gdma_context *gc;
2213 struct mana_rxq *rxq;
2216 gc = gd->gdma_context;
2218 rxq = malloc(sizeof(*rxq) +
2219 RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2220 M_DEVBUF, M_WAITOK | M_ZERO);
2225 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2226 rxq->rxq_idx = rxq_idx;
2228 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2229 * Now we just allow maximum size of 4096.
2231 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2232 if (rxq->datasize > MAX_FRAME_SIZE)
2233 rxq->datasize = MAX_FRAME_SIZE;
2235 mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2236 rxq_idx, rxq->datasize);
2238 rxq->rxobj = INVALID_MANA_HANDLE;
2240 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2244 /* Create LRO for the RQ */
2245 if (ndev->if_capenable & IFCAP_LRO) {
2246 err = tcp_lro_init(&rxq->lro);
2248 if_printf(ndev, "Failed to create LRO for rxq %d\n",
2251 rxq->lro.ifp = ndev;
2255 mana_alloc_counters((counter_u64_t *)&rxq->stats,
2256 sizeof(rxq->stats));
2258 rq_size = ALIGN(rq_size, PAGE_SIZE);
2259 cq_size = ALIGN(cq_size, PAGE_SIZE);
2262 memset(&spec, 0, sizeof(spec));
2263 spec.type = GDMA_RQ;
2264 spec.monitor_avl_buf = true;
2265 spec.queue_size = rq_size;
2266 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2270 /* Create RQ's CQ */
2272 cq->type = MANA_CQ_TYPE_RX;
2275 memset(&spec, 0, sizeof(spec));
2276 spec.type = GDMA_CQ;
2277 spec.monitor_avl_buf = false;
2278 spec.queue_size = cq_size;
2279 spec.cq.callback = mana_schedule_task;
2280 spec.cq.parent_eq = eq->eq;
2281 spec.cq.context = cq;
2282 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2286 memset(&wq_spec, 0, sizeof(wq_spec));
2287 memset(&cq_spec, 0, sizeof(cq_spec));
2288 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
2289 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2291 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
2292 cq_spec.queue_size = cq->gdma_cq->queue_size;
2293 cq_spec.modr_ctx_id = 0;
2294 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2296 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2297 &wq_spec, &cq_spec, &rxq->rxobj);
2301 rxq->gdma_rq->id = wq_spec.queue_index;
2302 cq->gdma_cq->id = cq_spec.queue_index;
2304 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2305 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2307 rxq->gdma_id = rxq->gdma_rq->id;
2308 cq->gdma_id = cq->gdma_cq->id;
2310 err = mana_push_wqe(rxq);
2314 if (cq->gdma_id >= gc->max_num_cqs) {
2319 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2321 /* Allocate and start the cleanup task on CQ */
2322 cq->do_not_ring_db = false;
2324 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2326 taskqueue_create_fast("mana rx cq cleanup",
2327 M_WAITOK, taskqueue_thread_enqueue,
2330 if (apc->last_rx_cq_bind_cpu < 0)
2331 apc->last_rx_cq_bind_cpu = CPU_FIRST();
2332 cq->cpu = apc->last_rx_cq_bind_cpu;
2333 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2335 if (apc->bind_cleanup_thread_cpu) {
2337 CPU_SETOF(cq->cpu, &cpu_mask);
2338 taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2339 1, PI_NET, &cpu_mask,
2340 "mana cq p%u-rx%u-cpu%d",
2341 apc->port_idx, rxq->rxq_idx, cq->cpu);
2343 taskqueue_start_threads(&cq->cleanup_tq, 1,
2344 PI_NET, "mana cq p%u-rx%u",
2345 apc->port_idx, rxq->rxq_idx);
2348 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2353 if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2355 mana_destroy_rxq(apc, rxq, false);
2358 mana_deinit_cq(apc, cq);
2364 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
2366 struct mana_context *ac = apc->ac;
2367 struct mana_rxq *rxq;
2371 for (i = 0; i < apc->num_queues; i++) {
2372 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2381 apc->default_rxobj = apc->rxqs[0]->rxobj;
2387 mana_destroy_vport(struct mana_port_context *apc)
2389 struct mana_rxq *rxq;
2392 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2393 rxq = apc->rxqs[rxq_idx];
2397 mana_destroy_rxq(apc, rxq, true);
2398 apc->rxqs[rxq_idx] = NULL;
2401 mana_destroy_txq(apc);
2405 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
2407 struct gdma_dev *gd = apc->ac->gdma_dev;
2410 apc->default_rxobj = INVALID_MANA_HANDLE;
2412 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2416 return mana_create_txq(apc, net);
2420 static void mana_rss_table_init(struct mana_port_context *apc)
2424 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2425 apc->indir_table[i] = i % apc->num_queues;
2428 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2429 bool update_hash, bool update_tab)
2436 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2437 queue_idx = apc->indir_table[i];
2438 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2442 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2446 mana_fence_rqs(apc);
2452 mana_init_port(struct ifnet *ndev)
2454 struct mana_port_context *apc = if_getsoftc(ndev);
2455 uint32_t max_txq, max_rxq, max_queues;
2456 int port_idx = apc->port_idx;
2457 uint32_t num_indirect_entries;
2460 err = mana_init_port_context(apc);
2464 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2465 &num_indirect_entries);
2467 if_printf(ndev, "Failed to query info for vPort %d\n",
2472 max_queues = min_t(uint32_t, max_txq, max_rxq);
2473 if (apc->max_queues > max_queues)
2474 apc->max_queues = max_queues;
2476 if (apc->num_queues > apc->max_queues)
2477 apc->num_queues = apc->max_queues;
2482 bus_dma_tag_destroy(apc->rx_buf_tag);
2483 apc->rx_buf_tag = NULL;
2484 free(apc->rxqs, M_DEVBUF);
2490 mana_alloc_queues(struct ifnet *ndev)
2492 struct mana_port_context *apc = if_getsoftc(ndev);
2495 err = mana_create_vport(apc, ndev);
2499 err = mana_add_rx_queues(apc, ndev);
2503 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2505 mana_rss_table_init(apc);
2507 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2514 mana_destroy_vport(apc);
2519 mana_up(struct mana_port_context *apc)
2523 mana_dbg(NULL, "mana_up called\n");
2525 err = mana_alloc_queues(apc->ndev);
2527 mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2531 /* Add queue specific sysctl */
2532 mana_sysctl_add_queues(apc);
2534 apc->port_is_up = true;
2536 /* Ensure port state updated before txq state */
2539 if_link_state_change(apc->ndev, LINK_STATE_UP);
2540 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2547 mana_init(void *arg)
2549 struct mana_port_context *apc = (struct mana_port_context *)arg;
2551 MANA_APC_LOCK_LOCK(apc);
2552 if (!apc->port_is_up) {
2555 MANA_APC_LOCK_UNLOCK(apc);
2559 mana_dealloc_queues(struct ifnet *ndev)
2561 struct mana_port_context *apc = if_getsoftc(ndev);
2562 struct mana_txq *txq;
2565 if (apc->port_is_up)
2568 /* No packet can be transmitted now since apc->port_is_up is false.
2569 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2570 * a txq because it may not timely see apc->port_is_up being cleared
2571 * to false, but it doesn't matter since mana_start_xmit() drops any
2572 * new packets due to apc->port_is_up being false.
2574 * Drain all the in-flight TX packets
2576 for (i = 0; i < apc->num_queues; i++) {
2577 txq = &apc->tx_qp[i].txq;
2579 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2580 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2582 tx_cq->do_not_ring_db = true;
2583 rx_cq->do_not_ring_db = true;
2585 /* Schedule a cleanup task */
2586 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2588 while (atomic_read(&txq->pending_sends) > 0)
2589 usleep_range(1000, 2000);
2592 /* We're 100% sure the queues can no longer be woken up, because
2593 * we're sure now mana_poll_tx_cq() can't be running.
2596 apc->rss_state = TRI_STATE_FALSE;
2597 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2599 if_printf(ndev, "Failed to disable vPort: %d\n", err);
2603 mana_destroy_vport(apc);
2609 mana_down(struct mana_port_context *apc)
2613 apc->port_st_save = apc->port_is_up;
2614 apc->port_is_up = false;
2616 /* Ensure port state updated before txq state */
2619 if (apc->port_st_save) {
2620 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2622 if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2624 mana_sysctl_free_queues(apc);
2626 err = mana_dealloc_queues(apc->ndev);
2628 if_printf(apc->ndev,
2629 "Failed to bring down mana interface: %d\n", err);
2637 mana_detach(struct ifnet *ndev)
2639 struct mana_port_context *apc = if_getsoftc(ndev);
2642 ether_ifdetach(ndev);
2647 MANA_APC_LOCK_LOCK(apc);
2648 err = mana_down(apc);
2649 MANA_APC_LOCK_UNLOCK(apc);
2651 mana_cleanup_port_context(apc);
2653 MANA_APC_LOCK_DESTROY(apc);
2655 free(apc, M_DEVBUF);
2661 mana_probe_port(struct mana_context *ac, int port_idx,
2662 struct ifnet **ndev_storage)
2664 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2665 struct mana_port_context *apc;
2669 ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2671 mana_err(NULL, "Failed to allocate ifnet struct\n");
2675 *ndev_storage = ndev;
2677 apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2679 mana_err(NULL, "Failed to allocate port context\n");
2686 apc->max_queues = gc->max_num_queues;
2687 apc->num_queues = min_t(unsigned int,
2688 gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2689 apc->port_handle = INVALID_MANA_HANDLE;
2690 apc->port_idx = port_idx;
2691 apc->frame_size = DEFAULT_FRAME_SIZE;
2692 apc->last_tx_cq_bind_cpu = -1;
2693 apc->last_rx_cq_bind_cpu = -1;
2695 MANA_APC_LOCK_INIT(apc);
2697 if_initname(ndev, device_get_name(gc->dev), port_idx);
2698 if_setdev(ndev,gc->dev);
2699 if_setsoftc(ndev, apc);
2701 if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2702 if_setinitfn(ndev, mana_init);
2703 if_settransmitfn(ndev, mana_start_xmit);
2704 if_setqflushfn(ndev, mana_qflush);
2705 if_setioctlfn(ndev, mana_ioctl);
2706 if_setgetcounterfn(ndev, mana_get_counter);
2708 if_setmtu(ndev, ETHERMTU);
2709 if_setbaudrate(ndev, IF_Gbps(100));
2711 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2713 err = mana_init_port(ndev);
2717 ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
2718 ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
2719 ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
2721 ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
2723 /* Enable all available capabilities by default. */
2724 ndev->if_capenable = ndev->if_capabilities;
2726 /* TSO parameters */
2727 ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
2728 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2729 ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
2730 ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
2732 ifmedia_init(&apc->media, IFM_IMASK,
2733 mana_ifmedia_change, mana_ifmedia_status);
2734 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2735 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2737 ether_ifattach(ndev, apc->mac_addr);
2739 /* Initialize statistics */
2740 mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2741 sizeof(struct mana_port_stats));
2742 mana_sysctl_add_port(apc);
2744 /* Tell the stack that the interface is not active */
2745 if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2750 free(apc, M_DEVBUF);
2752 *ndev_storage = NULL;
2753 if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2758 int mana_probe(struct gdma_dev *gd)
2760 struct gdma_context *gc = gd->gdma_context;
2761 device_t dev = gc->dev;
2762 struct mana_context *ac;
2766 device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2767 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2769 err = mana_gd_register_device(gd);
2773 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2779 gd->driver_data = ac;
2781 err = mana_create_eq(ac);
2785 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2786 MANA_MICRO_VERSION, &ac->num_ports);
2790 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2791 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2793 for (i = 0; i < ac->num_ports; i++) {
2794 err = mana_probe_port(ac, i, &ac->ports[i]);
2797 "Failed to probe mana port %d\n", i);
2810 mana_remove(struct gdma_dev *gd)
2812 struct gdma_context *gc = gd->gdma_context;
2813 struct mana_context *ac = gd->driver_data;
2814 device_t dev = gc->dev;
2818 for (i = 0; i < ac->num_ports; i++) {
2819 ndev = ac->ports[i];
2822 device_printf(dev, "No net device to remove\n");
2831 mana_destroy_eq(ac);
2834 mana_gd_deregister_device(gd);
2835 gd->driver_data = NULL;
2836 gd->gdma_context = NULL;