2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/endian.h>
36 #include <net/if_var.h>
37 #include <net/ethernet.h>
38 #include <net/iflib.h>
41 #include "opt_inet6.h"
50 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
51 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
52 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
54 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
56 /* uint16_t rxqid, uint8_t flid,
57 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
60 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
62 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
64 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
66 static int bnxt_intr(void *sc);
68 struct if_txrx bnxt_txrx = {
71 bnxt_isc_txd_credits_update,
72 bnxt_isc_rxd_available,
80 * Device Dependent Packet Transmit and Receive Functions
83 static const uint16_t bnxt_tx_lhint[] = {
84 TX_BD_SHORT_FLAGS_LHINT_LT512,
85 TX_BD_SHORT_FLAGS_LHINT_LT1K,
86 TX_BD_SHORT_FLAGS_LHINT_LT2K,
87 TX_BD_SHORT_FLAGS_LHINT_LT2K,
88 TX_BD_SHORT_FLAGS_LHINT_GTE2K,
92 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
94 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
95 struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
96 struct tx_bd_long *tbd;
97 struct tx_bd_long_hi *tbdh;
104 /* If we have offloads enabled, we need to use two BDs. */
105 if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
106 pi->ipi_mflags & M_VLANTAG)
109 /* TODO: Devices before Cu+B1 need to not mix long and short BDs */
112 pi->ipi_new_pidx = pi->ipi_pidx;
113 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
115 /* No need to byte-swap the opaque value */
116 tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
117 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
118 tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
119 flags_type = ((pi->ipi_nsegs + need_hi) <<
120 TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
121 if (pi->ipi_len >= 2048)
122 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
124 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
127 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
129 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
130 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
131 tbdh->mss = htole16(pi->ipi_tso_segsz);
132 tbdh->hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
133 pi->ipi_tcp_hlen) >> 1);
134 tbdh->cfa_action = 0;
137 if (pi->ipi_mflags & M_VLANTAG) {
138 /* TODO: Do we need to byte-swap the vtag here? */
139 cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
141 cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
143 tbdh->cfa_meta = htole32(cfa_meta);
144 if (pi->ipi_csum_flags & CSUM_TSO) {
145 lflags |= TX_BD_LONG_LFLAGS_LSO |
146 TX_BD_LONG_LFLAGS_T_IPID;
148 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
149 lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
150 TX_BD_LONG_LFLAGS_IP_CHKSUM;
152 else if(pi->ipi_csum_flags & CSUM_IP) {
153 lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
155 tbdh->lflags = htole16(lflags);
158 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
161 for (; seg < pi->ipi_nsegs; seg++) {
162 tbd->flags_type = htole16(flags_type);
163 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
164 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
165 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
166 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
167 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
169 flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
170 tbd->flags_type = htole16(flags_type);
171 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
177 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
179 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
180 struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
182 /* pidx is what we last set ipi_new_pidx to */
183 BNXT_TX_DB(tx_ring, pidx);
184 /* TODO: Cumulus+ doesn't need the double doorbell */
185 BNXT_TX_DB(tx_ring, pidx);
190 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
192 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
193 struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
194 struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
196 uint32_t cons = cpr->cons;
197 bool v_bit = cpr->v_bit;
206 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
207 CMPL_PREFETCH_NEXT(cpr, cons);
209 if (!CMP_VALID(&cmpl[cons], v_bit))
212 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
214 case TX_CMPL_TYPE_TX_L2:
215 err = (le16toh(cmpl[cons].errors_v) &
216 TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
217 TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
219 device_printf(softc->dev,
220 "TX completion error %u\n", err);
221 /* No need to byte-swap the opaque value */
222 avail += cmpl[cons].opaque >> 24;
224 * If we're not clearing, iflib only cares if there's
225 * at least one buffer. Don't scan the whole ring in
233 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
234 if (!CMP_VALID(&cmpl[cons], v_bit))
237 device_printf(softc->dev,
238 "Unhandled TX completion type %u\n", type);
244 if (clear && avail) {
245 cpr->cons = last_cons;
246 cpr->v_bit = last_v_bit;
247 BNXT_CP_IDX_DISABLE_DB(&cpr->ring, cpr->cons);
254 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
256 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
257 struct bnxt_ring *rx_ring;
258 struct rx_prod_pkt_bd *rxbd;
269 rxqid = iru->iru_qsidx;
270 count = iru->iru_count;
271 len = iru->iru_buf_size;
272 pidx = iru->iru_pidx;
273 flid = iru->iru_flidx;
274 vaddrs = iru->iru_vaddrs;
275 paddrs = iru->iru_paddrs;
276 frag_idxs = iru->iru_idxs;
279 rx_ring = &softc->rx_rings[rxqid];
280 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
283 rx_ring = &softc->ag_rings[rxqid];
284 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
286 rxbd = (void *)rx_ring->vaddr;
288 for (i=0; i<count; i++) {
289 rxbd[pidx].flags_type = htole16(type);
290 rxbd[pidx].len = htole16(len);
291 /* No need to byte-swap the opaque value */
292 rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
294 rxbd[pidx].addr = htole64(paddrs[i]);
295 if (++pidx == rx_ring->ring_size)
302 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
305 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
306 struct bnxt_ring *rx_ring;
309 rx_ring = &softc->rx_rings[rxqid];
311 rx_ring = &softc->ag_rings[rxqid];
314 * We *must* update the completion ring before updating the RX ring
315 * or we will overrun the completion ring and the device will wedge for
318 if (softc->rx_cp_rings[rxqid].cons != UINT32_MAX)
319 BNXT_CP_IDX_DISABLE_DB(&softc->rx_cp_rings[rxqid].ring,
320 softc->rx_cp_rings[rxqid].cons);
321 /* We're given the last filled RX buffer here, not the next empty one */
322 BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx));
323 /* TODO: Cumulus+ doesn't need the double doorbell */
324 BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx));
329 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
331 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
332 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
333 struct rx_pkt_cmpl *rcp;
334 struct rx_tpa_end_cmpl *rtpae;
335 struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
337 uint32_t cons = cpr->cons;
338 bool v_bit = cpr->v_bit;
344 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
345 CMPL_PREFETCH_NEXT(cpr, cons);
347 if (!CMP_VALID(&cmp[cons], v_bit))
350 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
352 case CMPL_BASE_TYPE_RX_L2:
353 rcp = (void *)&cmp[cons];
354 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
355 RX_PKT_CMPL_AGG_BUFS_SFT;
356 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
357 CMPL_PREFETCH_NEXT(cpr, cons);
359 if (!CMP_VALID(&cmp[cons], v_bit))
362 /* Now account for all the AG completions */
363 for (i=0; i<ags; i++) {
364 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
365 CMPL_PREFETCH_NEXT(cpr, cons);
366 if (!CMP_VALID(&cmp[cons], v_bit))
371 case CMPL_BASE_TYPE_RX_TPA_END:
372 rtpae = (void *)&cmp[cons];
373 ags = (rtpae->agg_bufs_v1 &
374 RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
375 RX_TPA_END_CMPL_AGG_BUFS_SFT;
376 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
377 CMPL_PREFETCH_NEXT(cpr, cons);
379 if (!CMP_VALID(&cmp[cons], v_bit))
381 /* Now account for all the AG completions */
382 for (i=0; i<ags; i++) {
383 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
384 CMPL_PREFETCH_NEXT(cpr, cons);
385 if (!CMP_VALID(&cmp[cons], v_bit))
390 case CMPL_BASE_TYPE_RX_TPA_START:
391 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
392 CMPL_PREFETCH_NEXT(cpr, cons);
394 if (!CMP_VALID(&cmp[cons], v_bit))
397 case CMPL_BASE_TYPE_RX_AGG:
400 device_printf(softc->dev,
401 "Unhandled completion type %d on RXQ %d\n",
404 /* Odd completion types use two completions */
406 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
407 CMPL_PREFETCH_NEXT(cpr, cons);
409 if (!CMP_VALID(&cmp[cons], v_bit))
423 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
424 struct bnxt_cp_ring *cpr, uint16_t flags_type)
426 struct rx_pkt_cmpl *rcp;
427 struct rx_pkt_cmpl_hi *rcph;
428 struct rx_abuf_cmpl *acp;
434 rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
436 /* Extract from the first 16-byte BD */
437 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
438 ri->iri_flowid = le32toh(rcp->rss_hash);
440 * TODO: Extract something useful from rcp->rss_hash_type
442 * May be documented in the "LSI ES"
443 * also check the firmware code.
445 ri->iri_rsstype = M_HASHTYPE_OPAQUE;
448 ri->iri_rsstype = M_HASHTYPE_NONE;
450 ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
451 RX_PKT_CMPL_AGG_BUFS_SFT;
452 ri->iri_nfrags = ags + 1;
453 /* No need to byte-swap the opaque value */
454 ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
455 ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
456 ri->iri_frags[0].irf_len = le16toh(rcp->len);
457 ri->iri_len = le16toh(rcp->len);
459 /* Now the second 16-byte BD */
460 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
461 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
462 rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
464 flags2 = le32toh(rcph->flags2);
465 errors = le16toh(rcph->errors_v2);
466 if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
467 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
468 ri->iri_flags |= M_VLANTAG;
469 /* TODO: Should this be the entire 16-bits? */
470 ri->iri_vtag = le32toh(rcph->metadata) &
471 (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
472 RX_PKT_CMPL_METADATA_PRI_MASK);
474 if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
475 ri->iri_csum_flags |= CSUM_IP_CHECKED;
476 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
477 ri->iri_csum_flags |= CSUM_IP_VALID;
479 if (flags2 & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) {
480 ri->iri_csum_flags |= CSUM_L4_CALC;
481 if (!(errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR)) {
482 ri->iri_csum_flags |= CSUM_L4_VALID;
483 ri->iri_csum_data = 0xffff;
487 /* And finally the ag ring stuff. */
488 for (i=1; i < ri->iri_nfrags; i++) {
489 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
490 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
491 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
493 /* No need to byte-swap the opaque value */
494 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
495 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
496 ri->iri_frags[i].irf_len = le16toh(acp->len);
497 ri->iri_len += le16toh(acp->len);
504 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
505 struct bnxt_cp_ring *cpr, uint16_t flags_type)
507 struct rx_tpa_end_cmpl *agend =
508 &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
509 struct rx_tpa_end_cmpl_hi *agendh;
510 struct rx_abuf_cmpl *acp;
511 struct bnxt_full_tpa_start *tpas;
518 agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
519 RX_TPA_END_CMPL_AGG_ID_SFT;
520 tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
522 /* Extract from the first 16-byte BD */
523 if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
524 ri->iri_flowid = le32toh(tpas->low.rss_hash);
526 * TODO: Extract something useful from tpas->low.rss_hash_type
528 * May be documented in the "LSI ES"
529 * also check the firmware code.
531 ri->iri_rsstype = M_HASHTYPE_OPAQUE;
534 ri->iri_rsstype = M_HASHTYPE_NONE;
536 ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
537 RX_TPA_END_CMPL_AGG_BUFS_SFT;
538 ri->iri_nfrags = ags + 1;
539 /* No need to byte-swap the opaque value */
540 ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
541 ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
542 ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
543 ri->iri_len = le16toh(tpas->low.len);
545 /* Now the second 16-byte BD */
546 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
547 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
548 agendh = &((struct rx_tpa_end_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
550 flags2 = le32toh(tpas->high.flags2);
551 if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
552 RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
553 ri->iri_flags |= M_VLANTAG;
554 /* TODO: Should this be the entire 16-bits? */
555 ri->iri_vtag = le32toh(tpas->high.metadata) &
556 (RX_TPA_START_CMPL_METADATA_VID_MASK |
557 RX_TPA_START_CMPL_METADATA_DE |
558 RX_TPA_START_CMPL_METADATA_PRI_MASK);
560 if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
561 ri->iri_csum_flags |= CSUM_IP_CHECKED;
562 ri->iri_csum_flags |= CSUM_IP_VALID;
564 if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
565 ri->iri_csum_flags |= CSUM_L4_CALC;
566 ri->iri_csum_flags |= CSUM_L4_VALID;
567 ri->iri_csum_data = 0xffff;
570 /* Now the ag ring stuff. */
571 for (i=1; i < ri->iri_nfrags; i++) {
572 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
573 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
574 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
576 /* No need to byte-swap the opaque value */
577 ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
578 ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
579 ri->iri_frags[i].irf_len = le16toh(acp->len);
580 ri->iri_len += le16toh(acp->len);
583 /* And finally, the empty BD at the end... */
585 /* No need to byte-swap the opaque value */
586 ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
587 ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
588 ri->iri_frags[i].irf_len = le16toh(agend->len);
589 ri->iri_len += le16toh(agend->len);
594 /* If we return anything but zero, iflib will assert... */
596 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
598 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
599 struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
600 struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
601 struct cmpl_base *cmp;
602 struct rx_tpa_start_cmpl *rtpa;
608 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
609 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
610 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
611 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
613 flags_type = le16toh(cmp->type);
614 type = flags_type & CMPL_BASE_TYPE_MASK;
617 case CMPL_BASE_TYPE_RX_L2:
618 return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
619 case CMPL_BASE_TYPE_RX_TPA_END:
620 return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
621 case CMPL_BASE_TYPE_RX_TPA_START:
622 rtpa = (void *)&cmp_q[cpr->cons];
623 agg_id = (rtpa->agg_id &
624 RX_TPA_START_CMPL_AGG_ID_MASK) >>
625 RX_TPA_START_CMPL_AGG_ID_SFT;
626 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
628 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
629 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
630 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
632 softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
633 ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
636 device_printf(softc->dev,
637 "Unhandled completion type %d on RXQ %d get\n",
638 type, ri->iri_qsidx);
640 NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
642 ri->iri_cidx = RING_NEXT(&cpr->ring,
644 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
656 struct bnxt_softc *softc = (struct bnxt_softc *)sc;
658 device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);