2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/socket.h>
32 #include <sys/endian.h>
34 #include <net/if_var.h>
35 #include <net/ethernet.h>
36 #include <net/iflib.h>
39 #include "opt_inet6.h"
48 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
49 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
50 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
52 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
54 /* uint16_t rxqid, uint8_t flid,
55 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
58 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
60 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
62 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
64 static int bnxt_intr(void *sc);
66 struct if_txrx bnxt_txrx = {
67 .ift_txd_encap = bnxt_isc_txd_encap,
68 .ift_txd_flush = bnxt_isc_txd_flush,
69 .ift_txd_credits_update = bnxt_isc_txd_credits_update,
70 .ift_rxd_available = bnxt_isc_rxd_available,
71 .ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
72 .ift_rxd_refill = bnxt_isc_rxd_refill,
73 .ift_rxd_flush = bnxt_isc_rxd_flush,
74 .ift_legacy_intr = bnxt_intr
78 * Device Dependent Packet Transmit and Receive Functions
81 static const uint16_t bnxt_tx_lhint[] = {
82 TX_BD_SHORT_FLAGS_LHINT_LT512,
83 TX_BD_SHORT_FLAGS_LHINT_LT1K,
84 TX_BD_SHORT_FLAGS_LHINT_LT2K,
85 TX_BD_SHORT_FLAGS_LHINT_LT2K,
86 TX_BD_SHORT_FLAGS_LHINT_GTE2K,
90 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
92 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
93 struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
94 struct tx_bd_long *tbd;
95 struct tx_bd_long_hi *tbdh;
102 /* If we have offloads enabled, we need to use two BDs. */
103 if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
104 pi->ipi_mflags & M_VLANTAG)
107 /* TODO: Devices before Cu+B1 need to not mix long and short BDs */
110 pi->ipi_new_pidx = pi->ipi_pidx;
111 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
113 /* No need to byte-swap the opaque value */
114 tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
115 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
116 tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
117 flags_type = ((pi->ipi_nsegs + need_hi) <<
118 TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
119 if (pi->ipi_len >= 2048)
120 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
122 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
125 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
127 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
128 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
129 tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
130 tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
131 pi->ipi_tcp_hlen) >> 1);
132 tbdh->cfa_action = 0;
135 if (pi->ipi_mflags & M_VLANTAG) {
136 /* TODO: Do we need to byte-swap the vtag here? */
137 cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
139 cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
141 tbdh->cfa_meta = htole32(cfa_meta);
142 if (pi->ipi_csum_flags & CSUM_TSO) {
143 lflags |= TX_BD_LONG_LFLAGS_LSO |
144 TX_BD_LONG_LFLAGS_T_IPID;
146 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
147 lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
148 TX_BD_LONG_LFLAGS_IP_CHKSUM;
150 else if(pi->ipi_csum_flags & CSUM_IP) {
151 lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
153 tbdh->lflags = htole16(lflags);
156 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
159 for (; seg < pi->ipi_nsegs; seg++) {
160 tbd->flags_type = htole16(flags_type);
161 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
162 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
163 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
164 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
165 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
167 flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
168 tbd->flags_type = htole16(flags_type);
169 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
175 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
177 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
178 struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
180 /* pidx is what we last set ipi_new_pidx to */
181 softc->db_ops.bnxt_db_tx(tx_ring, pidx);
186 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
188 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
189 struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
190 struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
192 uint32_t cons = cpr->cons;
193 bool v_bit = cpr->v_bit;
202 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
203 CMPL_PREFETCH_NEXT(cpr, cons);
205 if (!CMP_VALID(&cmpl[cons], v_bit))
208 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
210 case TX_CMPL_TYPE_TX_L2:
211 err = (le16toh(cmpl[cons].errors_v) &
212 TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
213 TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
215 device_printf(softc->dev,
216 "TX completion error %u\n", err);
217 /* No need to byte-swap the opaque value */
218 avail += cmpl[cons].opaque >> 24;
220 * If we're not clearing, iflib only cares if there's
221 * at least one buffer. Don't scan the whole ring in
229 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
230 if (!CMP_VALID(&cmpl[cons], v_bit))
233 device_printf(softc->dev,
234 "Unhandled TX completion type %u\n", type);
240 if (clear && avail) {
241 cpr->cons = last_cons;
242 cpr->v_bit = last_v_bit;
243 softc->db_ops.bnxt_db_tx_cq(cpr, 0);
250 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
252 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
253 struct bnxt_ring *rx_ring;
254 struct rx_prod_pkt_bd *rxbd;
264 rxqid = iru->iru_qsidx;
265 count = iru->iru_count;
266 pidx = iru->iru_pidx;
267 flid = iru->iru_flidx;
268 paddrs = iru->iru_paddrs;
269 frag_idxs = iru->iru_idxs;
272 rx_ring = &softc->rx_rings[rxqid];
273 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
276 rx_ring = &softc->ag_rings[rxqid];
277 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
279 rxbd = (void *)rx_ring->vaddr;
281 for (i=0; i<count; i++) {
282 rxbd[pidx].flags_type = htole16(type);
283 rxbd[pidx].len = htole16(softc->rx_buf_size);
284 /* No need to byte-swap the opaque value */
285 rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
287 rxbd[pidx].addr = htole64(paddrs[i]);
288 if (++pidx == rx_ring->ring_size)
295 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
298 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
299 struct bnxt_ring *rx_ring;
302 rx_ring = &softc->rx_rings[rxqid];
304 rx_ring = &softc->ag_rings[rxqid];
307 * We *must* update the completion ring before updating the RX ring
308 * or we will overrun the completion ring and the device will wedge for
311 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
312 softc->db_ops.bnxt_db_rx(rx_ring, pidx);
317 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
319 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
320 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
321 struct rx_pkt_cmpl *rcp;
322 struct rx_tpa_end_cmpl *rtpae;
323 struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
325 uint32_t cons = cpr->cons;
326 bool v_bit = cpr->v_bit;
332 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
333 CMPL_PREFETCH_NEXT(cpr, cons);
335 if (!CMP_VALID(&cmp[cons], v_bit))
338 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
340 case CMPL_BASE_TYPE_RX_L2:
341 rcp = (void *)&cmp[cons];
342 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
343 RX_PKT_CMPL_AGG_BUFS_SFT;
344 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
345 CMPL_PREFETCH_NEXT(cpr, cons);
347 if (!CMP_VALID(&cmp[cons], v_bit))
350 /* Now account for all the AG completions */
351 for (i=0; i<ags; i++) {
352 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
353 CMPL_PREFETCH_NEXT(cpr, cons);
354 if (!CMP_VALID(&cmp[cons], v_bit))
359 case CMPL_BASE_TYPE_RX_TPA_END:
360 rtpae = (void *)&cmp[cons];
361 ags = (rtpae->agg_bufs_v1 &
362 RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
363 RX_TPA_END_CMPL_AGG_BUFS_SFT;
364 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
365 CMPL_PREFETCH_NEXT(cpr, cons);
367 if (!CMP_VALID(&cmp[cons], v_bit))
369 /* Now account for all the AG completions */
370 for (i=0; i<ags; i++) {
371 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
372 CMPL_PREFETCH_NEXT(cpr, cons);
373 if (!CMP_VALID(&cmp[cons], v_bit))
378 case CMPL_BASE_TYPE_RX_TPA_START:
379 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
380 CMPL_PREFETCH_NEXT(cpr, cons);
382 if (!CMP_VALID(&cmp[cons], v_bit))
385 case CMPL_BASE_TYPE_RX_AGG:
388 device_printf(softc->dev,
389 "Unhandled completion type %d on RXQ %d\n",
392 /* Odd completion types use two completions */
394 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
395 CMPL_PREFETCH_NEXT(cpr, cons);
397 if (!CMP_VALID(&cmp[cons], v_bit))
411 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
413 uint8_t rss_profile_id;
415 rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
416 switch (rss_profile_id) {
417 case BNXT_RSS_HASH_TYPE_TCPV4:
418 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
420 case BNXT_RSS_HASH_TYPE_UDPV4:
421 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
423 case BNXT_RSS_HASH_TYPE_IPV4:
424 ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
426 case BNXT_RSS_HASH_TYPE_TCPV6:
427 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
429 case BNXT_RSS_HASH_TYPE_UDPV6:
430 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
432 case BNXT_RSS_HASH_TYPE_IPV6:
433 ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
436 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
442 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
443 struct bnxt_cp_ring *cpr, uint16_t flags_type)
445 struct rx_pkt_cmpl *rcp;
446 struct rx_pkt_cmpl_hi *rcph;
447 struct rx_abuf_cmpl *acp;
453 rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
455 /* Extract from the first 16-byte BD */
456 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
457 ri->iri_flowid = le32toh(rcp->rss_hash);
458 bnxt_set_rsstype(ri, rcp->rss_hash_type);
461 ri->iri_rsstype = M_HASHTYPE_NONE;
463 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
464 RX_PKT_CMPL_AGG_BUFS_SFT;
465 ri->iri_nfrags = ags + 1;
466 /* No need to byte-swap the opaque value */
467 ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
468 ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
469 ri->iri_frags[0].irf_len = le16toh(rcp->len);
470 ri->iri_len = le16toh(rcp->len);
472 /* Now the second 16-byte BD */
473 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
474 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
475 rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
477 flags2 = le32toh(rcph->flags2);
478 errors = le16toh(rcph->errors_v2);
479 if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
480 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
481 ri->iri_flags |= M_VLANTAG;
482 /* TODO: Should this be the entire 16-bits? */
483 ri->iri_vtag = le32toh(rcph->metadata) &
484 (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
485 RX_PKT_CMPL_METADATA_PRI_MASK);
487 if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
488 ri->iri_csum_flags |= CSUM_IP_CHECKED;
489 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
490 ri->iri_csum_flags |= CSUM_IP_VALID;
492 if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
493 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
494 ri->iri_csum_flags |= CSUM_L4_CALC;
495 if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
496 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
497 ri->iri_csum_flags |= CSUM_L4_VALID;
498 ri->iri_csum_data = 0xffff;
502 /* And finally the ag ring stuff. */
503 for (i=1; i < ri->iri_nfrags; i++) {
504 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
505 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
506 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
508 /* No need to byte-swap the opaque value */
509 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
510 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
511 ri->iri_frags[i].irf_len = le16toh(acp->len);
512 ri->iri_len += le16toh(acp->len);
519 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
520 struct bnxt_cp_ring *cpr, uint16_t flags_type)
522 struct rx_tpa_end_cmpl *agend =
523 &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
524 struct rx_abuf_cmpl *acp;
525 struct bnxt_full_tpa_start *tpas;
532 agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
533 RX_TPA_END_CMPL_AGG_ID_SFT;
534 tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
536 /* Extract from the first 16-byte BD */
537 if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
538 ri->iri_flowid = le32toh(tpas->low.rss_hash);
539 bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
542 ri->iri_rsstype = M_HASHTYPE_NONE;
544 ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
545 RX_TPA_END_CMPL_AGG_BUFS_SFT;
546 ri->iri_nfrags = ags + 1;
547 /* No need to byte-swap the opaque value */
548 ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
549 ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
550 ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
551 ri->iri_len = le16toh(tpas->low.len);
553 /* Now the second 16-byte BD */
554 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
555 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
557 flags2 = le32toh(tpas->high.flags2);
558 if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
559 RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
560 ri->iri_flags |= M_VLANTAG;
561 /* TODO: Should this be the entire 16-bits? */
562 ri->iri_vtag = le32toh(tpas->high.metadata) &
563 (RX_TPA_START_CMPL_METADATA_VID_MASK |
564 RX_TPA_START_CMPL_METADATA_DE |
565 RX_TPA_START_CMPL_METADATA_PRI_MASK);
567 if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
568 ri->iri_csum_flags |= CSUM_IP_CHECKED;
569 ri->iri_csum_flags |= CSUM_IP_VALID;
571 if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
572 ri->iri_csum_flags |= CSUM_L4_CALC;
573 ri->iri_csum_flags |= CSUM_L4_VALID;
574 ri->iri_csum_data = 0xffff;
577 /* Now the ag ring stuff. */
578 for (i=1; i < ri->iri_nfrags; i++) {
579 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
580 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
581 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
583 /* No need to byte-swap the opaque value */
584 ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
585 ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
586 ri->iri_frags[i].irf_len = le16toh(acp->len);
587 ri->iri_len += le16toh(acp->len);
590 /* And finally, the empty BD at the end... */
592 /* No need to byte-swap the opaque value */
593 ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
594 ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
595 ri->iri_frags[i].irf_len = le16toh(agend->len);
596 ri->iri_len += le16toh(agend->len);
601 /* If we return anything but zero, iflib will assert... */
603 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
605 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
606 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
607 struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
608 struct cmpl_base *cmp;
609 struct rx_tpa_start_cmpl *rtpa;
615 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
616 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
617 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
618 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
620 flags_type = le16toh(cmp->type);
621 type = flags_type & CMPL_BASE_TYPE_MASK;
624 case CMPL_BASE_TYPE_RX_L2:
625 return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
626 case CMPL_BASE_TYPE_RX_TPA_END:
627 return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
628 case CMPL_BASE_TYPE_RX_TPA_START:
629 rtpa = (void *)&cmp_q[cpr->cons];
630 agg_id = (rtpa->agg_id &
631 RX_TPA_START_CMPL_AGG_ID_MASK) >>
632 RX_TPA_START_CMPL_AGG_ID_SFT;
633 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
635 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
636 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
637 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
639 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
640 ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
643 device_printf(softc->dev,
644 "Unhandled completion type %d on RXQ %d get\n",
645 type, ri->iri_qsidx);
647 NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
649 ri->iri_cidx = RING_NEXT(&cpr->ring,
651 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
663 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
665 device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);