]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bnxt/bnxt_txrx.c
zfs: merge openzfs/zfs@887a3c533
[FreeBSD/FreeBSD.git] / sys / dev / bnxt / bnxt_txrx.c
1 /*-
2  * Broadcom NetXtreme-C/E network driver.
3  *
4  * Copyright (c) 2016 Broadcom, All Rights Reserved.
5  * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/socket.h>
32 #include <sys/endian.h>
33 #include <net/if.h>
34 #include <net/if_var.h>
35 #include <net/ethernet.h>
36 #include <net/iflib.h>
37
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_rss.h"
41
42 #include "bnxt.h"
43
44 /*
45  * Function prototypes
46  */
47
48 static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
49 static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
50 static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
51
52 static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
53
54 /*                              uint16_t rxqid, uint8_t flid,
55     uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
56     uint16_t buf_size);
57 */
58 static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
59     qidx_t pidx);
60 static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
61     qidx_t budget);
62 static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
63
64 static int bnxt_intr(void *sc);
65
66 struct if_txrx bnxt_txrx  = {
67         .ift_txd_encap = bnxt_isc_txd_encap,
68         .ift_txd_flush = bnxt_isc_txd_flush,
69         .ift_txd_credits_update = bnxt_isc_txd_credits_update,
70         .ift_rxd_available = bnxt_isc_rxd_available,
71         .ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
72         .ift_rxd_refill = bnxt_isc_rxd_refill,
73         .ift_rxd_flush = bnxt_isc_rxd_flush,
74         .ift_legacy_intr = bnxt_intr
75 };
76
77 /*
78  * Device Dependent Packet Transmit and Receive Functions
79  */
80
81 static const uint16_t bnxt_tx_lhint[] = {
82         TX_BD_SHORT_FLAGS_LHINT_LT512,
83         TX_BD_SHORT_FLAGS_LHINT_LT1K,
84         TX_BD_SHORT_FLAGS_LHINT_LT2K,
85         TX_BD_SHORT_FLAGS_LHINT_LT2K,
86         TX_BD_SHORT_FLAGS_LHINT_GTE2K,
87 };
88
89 static int
90 bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
91 {
92         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
93         struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
94         struct tx_bd_long *tbd;
95         struct tx_bd_long_hi *tbdh;
96         bool need_hi = false;
97         uint16_t flags_type;
98         uint16_t lflags;
99         uint32_t cfa_meta;
100         int seg = 0;
101
102         /* If we have offloads enabled, we need to use two BDs. */
103         if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
104             pi->ipi_mflags & M_VLANTAG)
105                 need_hi = true;
106
107         /* TODO: Devices before Cu+B1 need to not mix long and short BDs */
108         need_hi = true;
109
110         pi->ipi_new_pidx = pi->ipi_pidx;
111         tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
112         pi->ipi_ndescs = 0;
113         /* No need to byte-swap the opaque value */
114         tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
115         tbd->len = htole16(pi->ipi_segs[seg].ds_len);
116         tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
117         flags_type = ((pi->ipi_nsegs + need_hi) <<
118             TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
119         if (pi->ipi_len >= 2048)
120                 flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
121         else
122                 flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
123
124         if (need_hi) {
125                 flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
126
127                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
128                 tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
129                 tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
130                 tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
131                     pi->ipi_tcp_hlen) >> 1);
132                 tbdh->cfa_action = 0;
133                 lflags = 0;
134                 cfa_meta = 0;
135                 if (pi->ipi_mflags & M_VLANTAG) {
136                         /* TODO: Do we need to byte-swap the vtag here? */
137                         cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
138                             pi->ipi_vtag;
139                         cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
140                 }
141                 tbdh->cfa_meta = htole32(cfa_meta);
142                 if (pi->ipi_csum_flags & CSUM_TSO) {
143                         lflags |= TX_BD_LONG_LFLAGS_LSO |
144                             TX_BD_LONG_LFLAGS_T_IPID;
145                 }
146                 else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
147                         lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
148                             TX_BD_LONG_LFLAGS_IP_CHKSUM;
149                 }
150                 else if(pi->ipi_csum_flags & CSUM_IP) {
151                         lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
152                 }
153                 tbdh->lflags = htole16(lflags);
154         }
155         else {
156                 flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
157         }
158
159         for (; seg < pi->ipi_nsegs; seg++) {
160                 tbd->flags_type = htole16(flags_type);
161                 pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
162                 tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
163                 tbd->len = htole16(pi->ipi_segs[seg].ds_len);
164                 tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
165                 flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
166         }
167         flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
168         tbd->flags_type = htole16(flags_type);
169         pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
170
171         return 0;
172 }
173
174 static void
175 bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
176 {
177         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
178         struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
179
180         /* pidx is what we last set ipi_new_pidx to */
181         softc->db_ops.bnxt_db_tx(tx_ring, pidx);
182         return;
183 }
184
185 static int
186 bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
187 {
188         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
189         struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
190         struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
191         int avail = 0;
192         uint32_t cons = cpr->cons;
193         bool v_bit = cpr->v_bit;
194         bool last_v_bit;
195         uint32_t last_cons;
196         uint16_t type;
197         uint16_t err;
198
199         for (;;) {
200                 last_cons = cons;
201                 last_v_bit = v_bit;
202                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
203                 CMPL_PREFETCH_NEXT(cpr, cons);
204
205                 if (!CMP_VALID(&cmpl[cons], v_bit))
206                         goto done;
207
208                 type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
209                 switch (type) {
210                 case TX_CMPL_TYPE_TX_L2:
211                         err = (le16toh(cmpl[cons].errors_v) &
212                             TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
213                             TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
214                         if (err)
215                                 device_printf(softc->dev,
216                                     "TX completion error %u\n", err);
217                         /* No need to byte-swap the opaque value */
218                         avail += cmpl[cons].opaque >> 24;
219                         /*
220                          * If we're not clearing, iflib only cares if there's
221                          * at least one buffer.  Don't scan the whole ring in
222                          * this case.
223                          */
224                         if (!clear)
225                                 goto done;
226                         break;
227                 default:
228                         if (type & 1) {
229                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
230                                 if (!CMP_VALID(&cmpl[cons], v_bit))
231                                         goto done;
232                         }
233                         device_printf(softc->dev,
234                             "Unhandled TX completion type %u\n", type);
235                         break;
236                 }
237         }
238 done:
239
240         if (clear && avail) {
241                 cpr->cons = last_cons;
242                 cpr->v_bit = last_v_bit;
243                 softc->db_ops.bnxt_db_tx_cq(cpr, 0);
244         }
245
246         return avail;
247 }
248
249 static void
250 bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
251 {
252         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
253         struct bnxt_ring *rx_ring;
254         struct rx_prod_pkt_bd *rxbd;
255         uint16_t type;
256         uint16_t i;
257         uint16_t rxqid;
258         uint16_t count;
259         uint32_t pidx;
260         uint8_t flid;
261         uint64_t *paddrs;
262         qidx_t  *frag_idxs;
263
264         rxqid = iru->iru_qsidx;
265         count = iru->iru_count;
266         pidx = iru->iru_pidx;
267         flid = iru->iru_flidx;
268         paddrs = iru->iru_paddrs;
269         frag_idxs = iru->iru_idxs;
270
271         if (flid == 0) {
272                 rx_ring = &softc->rx_rings[rxqid];
273                 type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
274         }
275         else {
276                 rx_ring = &softc->ag_rings[rxqid];
277                 type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
278         }
279         rxbd = (void *)rx_ring->vaddr;
280
281         for (i=0; i<count; i++) {
282                 rxbd[pidx].flags_type = htole16(type);
283                 rxbd[pidx].len = htole16(softc->rx_buf_size);
284                 /* No need to byte-swap the opaque value */
285                 rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
286                     | (frag_idxs[i]));
287                 rxbd[pidx].addr = htole64(paddrs[i]);
288                 if (++pidx == rx_ring->ring_size)
289                         pidx = 0;
290         }
291         return;
292 }
293
294 static void
295 bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
296     qidx_t pidx)
297 {
298         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
299         struct bnxt_ring *rx_ring;
300
301         if (flid == 0)
302                 rx_ring = &softc->rx_rings[rxqid];
303         else
304                 rx_ring = &softc->ag_rings[rxqid];
305
306         /*
307          * We *must* update the completion ring before updating the RX ring
308          * or we will overrun the completion ring and the device will wedge for
309          * RX.
310          */
311         softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
312         softc->db_ops.bnxt_db_rx(rx_ring, pidx);
313         return;
314 }
315
316 static int
317 bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
318 {
319         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
320         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
321         struct rx_pkt_cmpl *rcp;
322         struct rx_tpa_end_cmpl *rtpae;
323         struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
324         int avail = 0;
325         uint32_t cons = cpr->cons;
326         bool v_bit = cpr->v_bit;
327         uint8_t ags;
328         int i;
329         uint16_t type;
330
331         for (;;) {
332                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
333                 CMPL_PREFETCH_NEXT(cpr, cons);
334
335                 if (!CMP_VALID(&cmp[cons], v_bit))
336                         goto cmpl_invalid;
337
338                 type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
339                 switch (type) {
340                 case CMPL_BASE_TYPE_RX_L2:
341                         rcp = (void *)&cmp[cons];
342                         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
343                             RX_PKT_CMPL_AGG_BUFS_SFT;
344                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
345                         CMPL_PREFETCH_NEXT(cpr, cons);
346
347                         if (!CMP_VALID(&cmp[cons], v_bit))
348                                 goto cmpl_invalid;
349
350                         /* Now account for all the AG completions */
351                         for (i=0; i<ags; i++) {
352                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
353                                 CMPL_PREFETCH_NEXT(cpr, cons);
354                                 if (!CMP_VALID(&cmp[cons], v_bit))
355                                         goto cmpl_invalid;
356                         }
357                         avail++;
358                         break;
359                 case CMPL_BASE_TYPE_RX_TPA_END:
360                         rtpae = (void *)&cmp[cons];
361                         ags = (rtpae->agg_bufs_v1 &
362                             RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
363                             RX_TPA_END_CMPL_AGG_BUFS_SFT;
364                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
365                         CMPL_PREFETCH_NEXT(cpr, cons);
366
367                         if (!CMP_VALID(&cmp[cons], v_bit))
368                                 goto cmpl_invalid;
369                         /* Now account for all the AG completions */
370                         for (i=0; i<ags; i++) {
371                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
372                                 CMPL_PREFETCH_NEXT(cpr, cons);
373                                 if (!CMP_VALID(&cmp[cons], v_bit))
374                                         goto cmpl_invalid;
375                         }
376                         avail++;
377                         break;
378                 case CMPL_BASE_TYPE_RX_TPA_START:
379                         NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
380                         CMPL_PREFETCH_NEXT(cpr, cons);
381
382                         if (!CMP_VALID(&cmp[cons], v_bit))
383                                 goto cmpl_invalid;
384                         break;
385                 case CMPL_BASE_TYPE_RX_AGG:
386                         break;
387                 default:
388                         device_printf(softc->dev,
389                             "Unhandled completion type %d on RXQ %d\n",
390                             type, rxqid);
391
392                         /* Odd completion types use two completions */
393                         if (type & 1) {
394                                 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
395                                 CMPL_PREFETCH_NEXT(cpr, cons);
396
397                                 if (!CMP_VALID(&cmp[cons], v_bit))
398                                         goto cmpl_invalid;
399                         }
400                         break;
401                 }
402                 if (avail > budget)
403                         break;
404         }
405 cmpl_invalid:
406
407         return avail;
408 }
409
410 static void
411 bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
412 {
413         uint8_t rss_profile_id;
414
415         rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
416         switch (rss_profile_id) {
417         case BNXT_RSS_HASH_TYPE_TCPV4:
418                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
419                 break;
420         case BNXT_RSS_HASH_TYPE_UDPV4:
421                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
422                 break;
423         case BNXT_RSS_HASH_TYPE_IPV4:
424                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
425                 break;
426         case BNXT_RSS_HASH_TYPE_TCPV6:
427                 ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
428                 break;
429         case BNXT_RSS_HASH_TYPE_UDPV6:
430                 ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
431                 break;
432         case BNXT_RSS_HASH_TYPE_IPV6:
433                 ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
434                 break;
435         default:
436                 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
437                 break;
438         }
439 }
440
441 static int
442 bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
443     struct bnxt_cp_ring *cpr, uint16_t flags_type)
444 {
445         struct rx_pkt_cmpl *rcp;
446         struct rx_pkt_cmpl_hi *rcph;
447         struct rx_abuf_cmpl *acp;
448         uint32_t flags2;
449         uint32_t errors;
450         uint8_t ags;
451         int i;
452
453         rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
454
455         /* Extract from the first 16-byte BD */
456         if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
457                 ri->iri_flowid = le32toh(rcp->rss_hash);
458                 bnxt_set_rsstype(ri, rcp->rss_hash_type);
459         }
460         else {
461                 ri->iri_rsstype = M_HASHTYPE_NONE;
462         }
463         ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
464             RX_PKT_CMPL_AGG_BUFS_SFT;
465         ri->iri_nfrags = ags + 1;
466         /* No need to byte-swap the opaque value */
467         ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
468         ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
469         ri->iri_frags[0].irf_len = le16toh(rcp->len);
470         ri->iri_len = le16toh(rcp->len);
471
472         /* Now the second 16-byte BD */
473         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
474         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
475         rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
476
477         flags2 = le32toh(rcph->flags2);
478         errors = le16toh(rcph->errors_v2);
479         if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
480             RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
481                 ri->iri_flags |= M_VLANTAG;
482                 /* TODO: Should this be the entire 16-bits? */
483                 ri->iri_vtag = le32toh(rcph->metadata) &
484                     (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
485                     RX_PKT_CMPL_METADATA_PRI_MASK);
486         }
487         if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
488                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
489                 if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
490                         ri->iri_csum_flags |= CSUM_IP_VALID;
491         }
492         if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
493                       RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
494                 ri->iri_csum_flags |= CSUM_L4_CALC;
495                 if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
496                                 RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
497                         ri->iri_csum_flags |= CSUM_L4_VALID;
498                         ri->iri_csum_data = 0xffff;
499                 }
500         }
501
502         /* And finally the ag ring stuff. */
503         for (i=1; i < ri->iri_nfrags; i++) {
504                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
505                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
506                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
507
508                 /* No need to byte-swap the opaque value */
509                 ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
510                 ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
511                 ri->iri_frags[i].irf_len = le16toh(acp->len);
512                 ri->iri_len += le16toh(acp->len);
513         }
514
515         return 0;
516 }
517
518 static int
519 bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
520     struct bnxt_cp_ring *cpr, uint16_t flags_type)
521 {
522         struct rx_tpa_end_cmpl *agend =
523             &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
524         struct rx_abuf_cmpl *acp;
525         struct bnxt_full_tpa_start *tpas;
526         uint32_t flags2;
527         uint8_t ags;
528         uint8_t agg_id;
529         int i;
530
531         /* Get the agg_id */
532         agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
533             RX_TPA_END_CMPL_AGG_ID_SFT;
534         tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
535
536         /* Extract from the first 16-byte BD */
537         if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
538                 ri->iri_flowid = le32toh(tpas->low.rss_hash);
539                 bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
540         }
541         else {
542                 ri->iri_rsstype = M_HASHTYPE_NONE;
543         }
544         ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
545             RX_TPA_END_CMPL_AGG_BUFS_SFT;
546         ri->iri_nfrags = ags + 1;
547         /* No need to byte-swap the opaque value */
548         ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
549         ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
550         ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
551         ri->iri_len = le16toh(tpas->low.len);
552
553         /* Now the second 16-byte BD */
554         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
555         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
556
557         flags2 = le32toh(tpas->high.flags2);
558         if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
559             RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
560                 ri->iri_flags |= M_VLANTAG;
561                 /* TODO: Should this be the entire 16-bits? */
562                 ri->iri_vtag = le32toh(tpas->high.metadata) &
563                     (RX_TPA_START_CMPL_METADATA_VID_MASK |
564                     RX_TPA_START_CMPL_METADATA_DE |
565                     RX_TPA_START_CMPL_METADATA_PRI_MASK);
566         }
567         if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
568                 ri->iri_csum_flags |= CSUM_IP_CHECKED;
569                 ri->iri_csum_flags |= CSUM_IP_VALID;
570         }
571         if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
572                 ri->iri_csum_flags |= CSUM_L4_CALC;
573                 ri->iri_csum_flags |= CSUM_L4_VALID;
574                 ri->iri_csum_data = 0xffff;
575         }
576
577         /* Now the ag ring stuff. */
578         for (i=1; i < ri->iri_nfrags; i++) {
579                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
580                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
581                 acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
582
583                 /* No need to byte-swap the opaque value */
584                 ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
585                 ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
586                 ri->iri_frags[i].irf_len = le16toh(acp->len);
587                 ri->iri_len += le16toh(acp->len);
588         }
589
590         /* And finally, the empty BD at the end... */
591         ri->iri_nfrags++;
592         /* No need to byte-swap the opaque value */
593         ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
594         ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
595         ri->iri_frags[i].irf_len = le16toh(agend->len);
596         ri->iri_len += le16toh(agend->len);
597
598         return 0;
599 }
600
601 /* If we return anything but zero, iflib will assert... */
602 static int
603 bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
604 {
605         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
606         struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
607         struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
608         struct cmpl_base *cmp;
609         struct rx_tpa_start_cmpl *rtpa;
610         uint16_t flags_type;
611         uint16_t type;
612         uint8_t agg_id;
613
614         for (;;) {
615                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
616                 ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
617                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
618                 cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
619
620                 flags_type = le16toh(cmp->type);
621                 type = flags_type & CMPL_BASE_TYPE_MASK;
622
623                 switch (type) {
624                 case CMPL_BASE_TYPE_RX_L2:
625                         return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
626                 case CMPL_BASE_TYPE_RX_TPA_END:
627                         return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
628                 case CMPL_BASE_TYPE_RX_TPA_START:
629                         rtpa = (void *)&cmp_q[cpr->cons];
630                         agg_id = (rtpa->agg_id &
631                             RX_TPA_START_CMPL_AGG_ID_MASK) >>
632                             RX_TPA_START_CMPL_AGG_ID_SFT;
633                         softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
634
635                         NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
636                         ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
637                         CMPL_PREFETCH_NEXT(cpr, cpr->cons);
638
639                         softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
640                             ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
641                         break;
642                 default:
643                         device_printf(softc->dev,
644                             "Unhandled completion type %d on RXQ %d get\n",
645                             type, ri->iri_qsidx);
646                         if (type & 1) {
647                                 NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
648                                     cpr->v_bit);
649                                 ri->iri_cidx = RING_NEXT(&cpr->ring,
650                                     ri->iri_cidx);
651                                 CMPL_PREFETCH_NEXT(cpr, cpr->cons);
652                         }
653                         break;
654                 }
655         }
656
657         return 0;
658 }
659
660 static int
661 bnxt_intr(void *sc)
662 {
663         struct bnxt_softc *softc = (struct bnxt_softc *)sc;
664
665         device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
666         return ENOSYS;
667 }