]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bnxt/bnxt_txrx.c
MFV r315633, 315635:
[FreeBSD/FreeBSD.git] / sys / dev / bnxt / bnxt_txrx.c
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/endian.h>
35 #include <net/if.h>
36 #include <net/if_var.h>
37 #include <net/ethernet.h>
38 #include <net/iflib.h>
39
40 #include "opt_inet.h"
41 #include "opt_inet6.h"
42 #include "opt_rss.h"
43
44 #include "bnxt.h"
45
46 /*
47  * Function prototypes
48  */
49
50 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
51 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
52 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
53
54 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
55
56 /*                              uint16_t rxqid, uint8_t flid,
57     uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
58     uint16_t buf_size);
59 */
60 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
61     qidx_t pidx);
62 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
63     qidx_t budget);
64 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
65
66 static int bnxt_intr(void *sc);
67
68 struct if_txrx bnxt_txrx  = {
69         bnxt_isc_txd_encap,
70         bnxt_isc_txd_flush,
71         bnxt_isc_txd_credits_update,
72         bnxt_isc_rxd_available,
73         bnxt_isc_rxd_pkt_get,
74         bnxt_isc_rxd_refill,
75         bnxt_isc_rxd_flush,
76         bnxt_intr
77 };
78
79 /*
80  * Device Dependent Packet Transmit and Receive Functions
81  */
82
83 static const uint16_t bnxt_tx_lhint[] = {
84         TX_BD_SHORT_FLAGS_LHINT_LT512,
85         TX_BD_SHORT_FLAGS_LHINT_LT1K,
86         TX_BD_SHORT_FLAGS_LHINT_LT2K,
87         TX_BD_SHORT_FLAGS_LHINT_LT2K,
88         TX_BD_SHORT_FLAGS_LHINT_GTE2K,
89 };
90
91 static int
92 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
93 {
94         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
95         struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
96         struct tx_bd_long *tbd;
97         struct tx_bd_long_hi *tbdh;
98         bool need_hi = false;
99         uint16_t flags_type;
100         uint16_t lflags;
101         uint32_t cfa_meta;
102         int seg = 0;
103
104         /* If we have offloads enabled, we need to use two BDs. */
105         if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
106             pi->ipi_mflags & M_VLANTAG)
107                 need_hi = true;
108
109         /* TODO: Devices before Cu+B1 need to not mix long and short BDs */
110         need_hi = true;
111
112         pi->ipi_new_pidx = pi->ipi_pidx;
113         tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
114         pi->ipi_ndescs = 0;
115         /* No need to byte-swap the opaque value */
116         tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
117         tbd->len = htole16(pi->ipi_segs[seg].ds_len);
118         tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
119         flags_type = ((pi->ipi_nsegs + need_hi) <<
120             TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
121         if (pi->ipi_len >= 2048)
122                 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
123         else
124                 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
125
126         if (need_hi) {
127                 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
128
129                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
130                 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
131                 tbdh->mss = htole16(pi->ipi_tso_segsz);
132                 tbdh->hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
133                     pi->ipi_tcp_hlen) >> 1);
134                 tbdh->cfa_action = 0;
135                 lflags = 0;
136                 cfa_meta = 0;
137                 if (pi->ipi_mflags & M_VLANTAG) {
138                         /* TODO: Do we need to byte-swap the vtag here? */
139                         cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
140                             pi->ipi_vtag;
141                         cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
142                 }
143                 tbdh->cfa_meta = htole32(cfa_meta);
144                 if (pi->ipi_csum_flags & CSUM_TSO) {
145                         lflags |= TX_BD_LONG_LFLAGS_LSO |
146                             TX_BD_LONG_LFLAGS_T_IPID;
147                 }
148                 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
149                         lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
150                             TX_BD_LONG_LFLAGS_IP_CHKSUM;
151                 }
152                 else if(pi->ipi_csum_flags & CSUM_IP) {
153                         lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
154                 }
155                 tbdh->lflags = htole16(lflags);
156         }
157         else {
158                 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
159         }
160
161         for (; seg < pi->ipi_nsegs; seg++) {
162                 tbd->flags_type = htole16(flags_type);
163                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
164                 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
165                 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
166                 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
167                 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
168         }
169         flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
170         tbd->flags_type = htole16(flags_type);
171         pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
172
173         return 0;
174 }
175
176 static void
177 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
178 {
179         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
180         struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
181
182         /* pidx is what we last set ipi_new_pidx to */
183         BNXT_TX_DB(tx_ring, pidx);
184         /* TODO: Cumulus+ doesn't need the double doorbell */
185         BNXT_TX_DB(tx_ring, pidx);
186         return;
187 }
188
189 static int
190 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
191 {
192         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
193         struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
194         struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
195         int avail = 0;
196         uint32_t cons = cpr->cons;
197         bool v_bit = cpr->v_bit;
198         bool last_v_bit;
199         uint32_t last_cons;
200         uint16_t type;
201         uint16_t err;
202
203         for (;;) {
204                 last_cons = cons;
205                 last_v_bit = v_bit;
206                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
207                 CMPL_PREFETCH_NEXT(cpr, cons);
208
209                 if (!CMP_VALID(&cmpl[cons], v_bit))
210                         goto done;
211
212                 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
213                 switch (type) {
214                 case TX_CMPL_TYPE_TX_L2:
215                         err = (le16toh(cmpl[cons].errors_v) &
216                             TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
217                             TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
218                         if (err)
219                                 device_printf(softc->dev,
220                                     "TX completion error %u\n", err);
221                         /* No need to byte-swap the opaque value */
222                         avail += cmpl[cons].opaque >> 24;
223                         /*
224                          * If we're not clearing, iflib only cares if there's
225                          * at least one buffer.  Don't scan the whole ring in
226                          * this case.
227                          */
228                         if (!clear)
229                                 goto done;
230                         break;
231                 default:
232                         if (type & 1) {
233                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
234                                 if (!CMP_VALID(&cmpl[cons], v_bit))
235                                         goto done;
236                         }
237                         device_printf(softc->dev,
238                             "Unhandled TX completion type %u\n", type);
239                         break;
240                 }
241         }
242 done:
243
244         if (clear && avail) {
245                 cpr->cons = last_cons;
246                 cpr->v_bit = last_v_bit;
247                 BNXT_CP_IDX_DISABLE_DB(&cpr->ring, cpr->cons);
248         }
249
250         return avail;
251 }
252
253 static void
254 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
255 {
256         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
257         struct bnxt_ring *rx_ring;
258         struct rx_prod_pkt_bd *rxbd;
259         uint16_t type;
260         uint16_t i;
261         uint16_t rxqid;
262         uint16_t count, len;
263         uint32_t pidx;
264         uint8_t flid;
265         uint64_t *paddrs;
266         caddr_t *vaddrs;
267
268         rxqid = iru->iru_qsidx;
269         count = iru->iru_count;
270         len = iru->iru_buf_size;
271         pidx = iru->iru_pidx;
272         flid = iru->iru_flidx;
273         vaddrs = iru->iru_vaddrs;
274         paddrs = iru->iru_paddrs;
275
276         if (flid == 0) {
277                 rx_ring = &softc->rx_rings[rxqid];
278                 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
279         }
280         else {
281                 rx_ring = &softc->ag_rings[rxqid];
282                 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
283         }
284         rxbd = (void *)rx_ring->vaddr;
285
286         for (i=0; i<count; i++) {
287                 rxbd[pidx].flags_type = htole16(type);
288                 rxbd[pidx].len = htole16(len);
289                 /* No need to byte-swap the opaque value */
290                 rxbd[pidx].opaque = ((rxqid & 0xff) << 24) | (flid << 16)
291                     | pidx;
292                 rxbd[pidx].addr = htole64(paddrs[i]);
293                 if (++pidx == rx_ring->ring_size)
294                         pidx = 0;
295         }
296         return;
297 }
298
299 static void
300 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
301     qidx_t pidx)
302 {
303         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
304         struct bnxt_ring *rx_ring;
305
306         if (flid == 0)
307                 rx_ring = &softc->rx_rings[rxqid];
308         else
309                 rx_ring = &softc->ag_rings[rxqid];
310
311         /*
312          * We *must* update the completion ring before updating the RX ring
313          * or we will overrun the completion ring and the device will wedge for
314          * RX.
315          */
316         if (softc->rx_cp_rings[rxqid].cons != UINT32_MAX)
317                 BNXT_CP_IDX_DISABLE_DB(&softc->rx_cp_rings[rxqid].ring,
318                     softc->rx_cp_rings[rxqid].cons);
319         /* We're given the last filled RX buffer here, not the next empty one */
320         BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx));
321         /* TODO: Cumulus+ doesn't need the double doorbell */
322         BNXT_RX_DB(rx_ring, RING_NEXT(rx_ring, pidx));
323         return;
324 }
325
326 static int
327 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
328 {
329         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
330         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
331         struct rx_pkt_cmpl *rcp;
332         struct rx_tpa_start_cmpl *rtpa;
333         struct rx_tpa_end_cmpl *rtpae;
334         struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
335         int avail = 0;
336         uint32_t cons = cpr->cons;
337         bool v_bit = cpr->v_bit;
338         uint8_t ags;
339         int i;
340         uint16_t type;
341         uint8_t agg_id;
342
343         for (;;) {
344                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
345                 CMPL_PREFETCH_NEXT(cpr, cons);
346
347                 if (!CMP_VALID(&cmp[cons], v_bit))
348                         goto cmpl_invalid;
349
350                 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
351                 switch (type) {
352                 case CMPL_BASE_TYPE_RX_L2:
353                         rcp = (void *)&cmp[cons];
354                         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
355                             RX_PKT_CMPL_AGG_BUFS_SFT;
356                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
357                         CMPL_PREFETCH_NEXT(cpr, cons);
358
359                         if (!CMP_VALID(&cmp[cons], v_bit))
360                                 goto cmpl_invalid;
361
362                         /* Now account for all the AG completions */
363                         for (i=0; i<ags; i++) {
364                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
365                                 CMPL_PREFETCH_NEXT(cpr, cons);
366                                 if (!CMP_VALID(&cmp[cons], v_bit))
367                                         goto cmpl_invalid;
368                         }
369                         avail++;
370                         break;
371                 case CMPL_BASE_TYPE_RX_TPA_END:
372                         rtpae = (void *)&cmp[cons];
373                         ags = (rtpae->agg_bufs_v1 &
374                             RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
375                             RX_TPA_END_CMPL_AGG_BUFS_SFT;
376                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
377                         CMPL_PREFETCH_NEXT(cpr, cons);
378
379                         if (!CMP_VALID(&cmp[cons], v_bit))
380                                 goto cmpl_invalid;
381                         /* Now account for all the AG completions */
382                         for (i=0; i<ags; i++) {
383                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
384                                 CMPL_PREFETCH_NEXT(cpr, cons);
385                                 if (!CMP_VALID(&cmp[cons], v_bit))
386                                         goto cmpl_invalid;
387                         }
388                         avail++;
389                         break;
390                 case CMPL_BASE_TYPE_RX_TPA_START:
391                         rtpa = (void *)&cmp[cons];
392                         agg_id = (rtpa->agg_id &
393                             RX_TPA_START_CMPL_AGG_ID_MASK) >>
394                             RX_TPA_START_CMPL_AGG_ID_SFT;
395                         softc->tpa_start[agg_id].low = *rtpa;
396                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
397                         CMPL_PREFETCH_NEXT(cpr, cons);
398
399                         if (!CMP_VALID(&cmp[cons], v_bit))
400                                 goto cmpl_invalid;
401                         softc->tpa_start[agg_id].high =
402                             ((struct rx_tpa_start_cmpl_hi *)cmp)[cons];
403                         break;
404                 case CMPL_BASE_TYPE_RX_AGG:
405                         break;
406                 default:
407                         device_printf(softc->dev,
408                             "Unhandled completion type %d on RXQ %d\n",
409                             type, rxqid);
410
411                         /* Odd completion types use two completions */
412                         if (type & 1) {
413                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
414                                 CMPL_PREFETCH_NEXT(cpr, cons);
415
416                                 if (!CMP_VALID(&cmp[cons], v_bit))
417                                         goto cmpl_invalid;
418                         }
419                         break;
420                 }
421                 if (avail > budget)
422                         break;
423         }
424 cmpl_invalid:
425
426         return avail;
427 }
428
429 static void
430 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
431 {
432         uint8_t rss_profile_id;
433
434         rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
435         switch (rss_profile_id) {
436         case BNXT_RSS_HASH_TYPE_TCPV4:
437                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
438                 break;
439         case BNXT_RSS_HASH_TYPE_UDPV4:
440                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
441                 break;
442         case BNXT_RSS_HASH_TYPE_IPV4:
443                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
444                 break;
445         case BNXT_RSS_HASH_TYPE_TCPV6:
446                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
447                 break;
448         case BNXT_RSS_HASH_TYPE_UDPV6:
449                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
450                 break;
451         case BNXT_RSS_HASH_TYPE_IPV6:
452                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
453                 break;
454         default:
455                 ri->iri_rsstype = M_HASHTYPE_OPAQUE;
456                 break;
457         }
458 }
459
460 static int
461 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
462     struct bnxt_cp_ring *cpr, uint16_t flags_type)
463 {
464         struct rx_pkt_cmpl *rcp;
465         struct rx_pkt_cmpl_hi *rcph;
466         struct rx_abuf_cmpl *acp;
467         uint32_t flags2;
468         uint32_t errors;
469         uint8_t ags;
470         int i;
471
472         rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
473
474         /* Extract from the first 16-byte BD */
475         if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
476                 ri->iri_flowid = le32toh(rcp->rss_hash);
477                 bnxt_set_rsstype(ri, rcp->rss_hash_type);
478         }
479         else {
480                 ri->iri_rsstype = M_HASHTYPE_NONE;
481         }
482         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
483             RX_PKT_CMPL_AGG_BUFS_SFT;
484         ri->iri_nfrags = ags + 1;
485         /* No need to byte-swap the opaque value */
486         ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
487         ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
488         ri->iri_frags[0].irf_len = le16toh(rcp->len);
489         ri->iri_len = le16toh(rcp->len);
490
491         /* Now the second 16-byte BD */
492         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
493         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
494         rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
495
496         flags2 = le32toh(rcph->flags2);
497         errors = le16toh(rcph->errors_v2);
498         if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
499             RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
500                 ri->iri_flags |= M_VLANTAG;
501                 /* TODO: Should this be the entire 16-bits? */
502                 ri->iri_vtag = le32toh(rcph->metadata) &
503                     (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
504                     RX_PKT_CMPL_METADATA_PRI_MASK);
505         }
506         if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
507                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
508                 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
509                         ri->iri_csum_flags |= CSUM_IP_VALID;
510         }
511         if (flags2 & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) {
512                 ri->iri_csum_flags |= CSUM_L4_CALC;
513                 if (!(errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR)) {
514                         ri->iri_csum_flags |= CSUM_L4_VALID;
515                         ri->iri_csum_data = 0xffff;
516                 }
517         }
518
519         /* And finally the ag ring stuff. */
520         for (i=1; i < ri->iri_nfrags; i++) {
521                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
522                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
523                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
524
525                 /* No need to byte-swap the opaque value */
526                 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
527                 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
528                 ri->iri_frags[i].irf_len = le16toh(acp->len);
529                 ri->iri_len += le16toh(acp->len);
530         }
531
532         return 0;
533 }
534
535 static int
536 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
537     struct bnxt_cp_ring *cpr, uint16_t flags_type)
538 {
539         struct rx_tpa_end_cmpl *agend =
540             &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
541         struct rx_tpa_end_cmpl_hi *agendh;
542         struct rx_abuf_cmpl *acp;
543         struct bnxt_full_tpa_start *tpas;
544         uint32_t flags2;
545         uint8_t ags;
546         uint8_t agg_id;
547         int i;
548
549         /* Get the agg_id */
550         agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
551             RX_TPA_END_CMPL_AGG_ID_SFT;
552         tpas = &softc->tpa_start[agg_id];
553
554         /* Extract from the first 16-byte BD */
555         if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
556                 ri->iri_flowid = le32toh(tpas->low.rss_hash);
557                 bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
558         }
559         else {
560                 ri->iri_rsstype = M_HASHTYPE_NONE;
561         }
562         ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
563             RX_TPA_END_CMPL_AGG_BUFS_SFT;
564         ri->iri_nfrags = ags + 1;
565         /* No need to byte-swap the opaque value */
566         ri->iri_frags[0].irf_flid = (tpas->low.opaque >> 16) & 0xff;
567         ri->iri_frags[0].irf_idx = tpas->low.opaque & 0xffff;
568         ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
569         ri->iri_len = le16toh(tpas->low.len);
570
571         /* Now the second 16-byte BD */
572         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
573         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
574         agendh = &((struct rx_tpa_end_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
575
576         flags2 = le32toh(tpas->high.flags2);
577         if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
578             RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
579                 ri->iri_flags |= M_VLANTAG;
580                 /* TODO: Should this be the entire 16-bits? */
581                 ri->iri_vtag = le32toh(tpas->high.metadata) &
582                     (RX_TPA_START_CMPL_METADATA_VID_MASK |
583                     RX_TPA_START_CMPL_METADATA_DE |
584                     RX_TPA_START_CMPL_METADATA_PRI_MASK);
585         }
586         if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
587                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
588                 ri->iri_csum_flags |= CSUM_IP_VALID;
589         }
590         if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
591                 ri->iri_csum_flags |= CSUM_L4_CALC;
592                 ri->iri_csum_flags |= CSUM_L4_VALID;
593                 ri->iri_csum_data = 0xffff;
594         }
595
596         /* Now the ag ring stuff. */
597         for (i=1; i < ri->iri_nfrags; i++) {
598                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
599                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
600                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
601
602                 /* No need to byte-swap the opaque value */
603                 ri->iri_frags[i].irf_flid = (acp->opaque >> 16) & 0xff;
604                 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
605                 ri->iri_frags[i].irf_len = le16toh(acp->len);
606                 ri->iri_len += le16toh(acp->len);
607         }
608
609         /* And finally, the empty BD at the end... */
610         ri->iri_nfrags++;
611         /* No need to byte-swap the opaque value */
612         ri->iri_frags[i].irf_flid = (agend->opaque >> 16) % 0xff;
613         ri->iri_frags[i].irf_idx = agend->opaque & 0xffff;
614         ri->iri_frags[i].irf_len = le16toh(agend->len);
615         ri->iri_len += le16toh(agend->len);
616
617         return 0;
618 }
619
620 /* If we return anything but zero, iflib will assert... */
621 static int
622 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
623 {
624         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
625         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
626         struct cmpl_base *cmp;
627         uint16_t flags_type;
628         uint16_t type;
629
630         for (;;) {
631                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
632                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
633                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
634                 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
635
636                 flags_type = le16toh(cmp->type);
637                 type = flags_type & CMPL_BASE_TYPE_MASK;
638
639                 switch (type) {
640                 case CMPL_BASE_TYPE_RX_L2:
641                         return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
642                 case CMPL_BASE_TYPE_RX_TPA_END:
643                         return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
644                 case CMPL_BASE_TYPE_RX_TPA_START:
645                         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
646                         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
647                         CMPL_PREFETCH_NEXT(cpr, cpr->cons);
648                         break;
649                 default:
650                         device_printf(softc->dev,
651                             "Unhandled completion type %d on RXQ %d get\n",
652                             type, ri->iri_qsidx);
653                         if (type & 1) {
654                                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
655                                     cpr->v_bit);
656                                 ri->iri_cidx = RING_NEXT(&cpr->ring,
657                                     ri->iri_cidx);
658                                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
659                         }
660                         break;
661                 }
662         }
663
664         return 0;
665 }
666
667 static int
668 bnxt_intr(void *sc)
669 {
670         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
671
672         device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
673         return ENOSYS;
674 }