]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/ix_txrx.c
Merge lldb trunk r338150, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / ix_txrx.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44
45 /************************************************************************
46  * Local Function prototypes
47  ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
51
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
54 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
55                                    qidx_t budget);
56 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
57
58 static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
59 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
60
61 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
62 static int ixgbe_determine_rsstype(u16 pkt_info);
63
64 struct if_txrx ixgbe_txrx  = {
65         .ift_txd_encap = ixgbe_isc_txd_encap,
66         .ift_txd_flush = ixgbe_isc_txd_flush,
67         .ift_txd_credits_update = ixgbe_isc_txd_credits_update,
68         .ift_rxd_available = ixgbe_isc_rxd_available,
69         .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
70         .ift_rxd_refill = ixgbe_isc_rxd_refill,
71         .ift_rxd_flush = ixgbe_isc_rxd_flush,
72         .ift_legacy_intr = NULL
73 };
74
75 extern if_shared_ctx_t ixgbe_sctx;
76
77 /************************************************************************
78  * ixgbe_tx_ctx_setup
79  *
80  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
81  *
82  ************************************************************************/
83 static int
84 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
85 {
86         u32 vlan_macip_lens, type_tucmd_mlhl;
87         u32 olinfo_status, mss_l4len_idx, pktlen, offload;
88         u8  ehdrlen;
89
90         offload = TRUE;
91         olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
92         /* VLAN MACLEN IPLEN */
93         vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
94
95         /*
96          * Some of our VF devices need a context descriptor for every
97          * packet.  That means the ehdrlen needs to be non-zero in order
98          * for the host driver not to flag a malicious event. The stack
99          * will most likely populate this for all other reasons of why
100          * this function was called.
101          */
102         if (pi->ipi_ehdrlen == 0) {
103                 ehdrlen = ETHER_HDR_LEN;
104                 ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
105         } else
106                 ehdrlen = pi->ipi_ehdrlen;
107         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
108
109         pktlen = pi->ipi_len;
110         /* First check if TSO is to be used */
111         if (pi->ipi_csum_flags & CSUM_TSO) {
112                 /* This is used in the transmit desc in encap */
113                 pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
114                 mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
115                 mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
116         }
117
118         olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
119
120         if (pi->ipi_flags & IPI_TX_IPV4) {
121                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
122                 /* Tell transmit desc to also do IPv4 checksum. */
123                 if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
124                         olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
125         } else if (pi->ipi_flags & IPI_TX_IPV6)
126                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
127         else
128                 offload = FALSE;
129
130         vlan_macip_lens |= pi->ipi_ip_hlen;
131
132         switch (pi->ipi_ipproto) {
133         case IPPROTO_TCP:
134                 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
135                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
136                 else
137                         offload = FALSE;
138                 break;
139         case IPPROTO_UDP:
140                 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
141                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
142                 else
143                         offload = FALSE;
144                 break;
145         case IPPROTO_SCTP:
146                 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
147                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
148                 else
149                         offload = FALSE;
150                 break;
151         default:
152                 offload = FALSE;
153                 break;
154         }
155 /* Insert L4 checksum into data descriptors */
156         if (offload)
157                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
158
159         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
160
161         /* Now copy bits into descriptor */
162         TXD->vlan_macip_lens = htole32(vlan_macip_lens);
163         TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
164         TXD->seqnum_seed = htole32(0);
165         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
166
167         return (olinfo_status);
168 } /* ixgbe_tx_ctx_setup */
169
170 /************************************************************************
171  * ixgbe_isc_txd_encap
172  ************************************************************************/
173 static int
174 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
175 {
176         struct adapter                   *sc = arg;
177         if_softc_ctx_t                   scctx = sc->shared;
178         struct ix_tx_queue               *que = &sc->tx_queues[pi->ipi_qsidx];
179         struct tx_ring                   *txr = &que->txr;
180         int                              nsegs = pi->ipi_nsegs;
181         bus_dma_segment_t                *segs = pi->ipi_segs;
182         union ixgbe_adv_tx_desc          *txd = NULL;
183         struct ixgbe_adv_tx_context_desc *TXD;
184         int                              i, j, first, pidx_last;
185         u32                              olinfo_status, cmd, flags;
186         qidx_t                           ntxd;
187
188         cmd =  (IXGBE_ADVTXD_DTYP_DATA |
189                 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
190
191         if (pi->ipi_mflags & M_VLANTAG)
192                 cmd |= IXGBE_ADVTXD_DCMD_VLE;
193
194         i = first = pi->ipi_pidx;
195         flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
196         ntxd = scctx->isc_ntxd[0];
197
198         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
199         if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
200             (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
201             pi->ipi_vtag) {
202                 /*********************************************
203                  * Set up the appropriate offload context
204                  * this will consume the first descriptor
205                  *********************************************/
206                 olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
207                 if (pi->ipi_csum_flags & CSUM_TSO) {
208                         cmd |= IXGBE_ADVTXD_DCMD_TSE;
209                         ++txr->tso_tx;
210                 }
211
212                 if (++i == scctx->isc_ntxd[0])
213                         i = 0;
214         } else {
215                 /* Indicate the whole packet as payload when not doing TSO */
216                 olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
217         }
218
219         olinfo_status |= IXGBE_ADVTXD_CC;
220         pidx_last = 0;
221         for (j = 0; j < nsegs; j++) {
222                 bus_size_t seglen;
223
224                 txd = &txr->tx_base[i];
225                 seglen = segs[j].ds_len;
226
227                 txd->read.buffer_addr = htole64(segs[j].ds_addr);
228                 txd->read.cmd_type_len = htole32(cmd | seglen);
229                 txd->read.olinfo_status = htole32(olinfo_status);
230
231                 pidx_last = i;
232                 if (++i == scctx->isc_ntxd[0]) {
233                         i = 0;
234                 }
235         }
236
237         if (flags) {
238                 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
239                 txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
240         }
241         txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
242
243         txr->bytes += pi->ipi_len;
244         pi->ipi_new_pidx = i;
245
246         ++txr->total_packets;
247
248         return (0);
249 } /* ixgbe_isc_txd_encap */
250
251 /************************************************************************
252  * ixgbe_isc_txd_flush
253  ************************************************************************/
254 static void
255 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
256 {
257         struct adapter     *sc = arg;
258         struct ix_tx_queue *que = &sc->tx_queues[txqid];
259         struct tx_ring     *txr = &que->txr;
260
261         IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
262 } /* ixgbe_isc_txd_flush */
263
264 /************************************************************************
265  * ixgbe_isc_txd_credits_update
266  ************************************************************************/
267 static int
268 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
269 {
270         struct adapter     *sc = arg;
271         if_softc_ctx_t     scctx = sc->shared;
272         struct ix_tx_queue *que = &sc->tx_queues[txqid];
273         struct tx_ring     *txr = &que->txr;
274         qidx_t             processed = 0;
275         int                updated;
276         qidx_t             cur, prev, ntxd, rs_cidx;
277         int32_t            delta;
278         uint8_t            status;
279
280         rs_cidx = txr->tx_rs_cidx;
281         if (rs_cidx == txr->tx_rs_pidx)
282                 return (0);
283
284         cur = txr->tx_rsq[rs_cidx];
285         status = txr->tx_base[cur].wb.status;
286         updated = !!(status & IXGBE_TXD_STAT_DD);
287
288         if (clear == false || updated == 0)
289                 return (updated);
290
291         prev = txr->tx_cidx_processed;
292         ntxd = scctx->isc_ntxd[0];
293         do {
294                 delta = (int32_t)cur - (int32_t)prev;
295                 if (delta < 0)
296                         delta += ntxd;
297
298                 processed += delta;
299                 prev = cur;
300                 rs_cidx = (rs_cidx + 1) & (ntxd - 1);
301                 if (rs_cidx == txr->tx_rs_pidx)
302                         break;
303
304                 cur = txr->tx_rsq[rs_cidx];
305                 status = txr->tx_base[cur].wb.status;
306         } while ((status & IXGBE_TXD_STAT_DD));
307
308         txr->tx_rs_cidx = rs_cidx;
309         txr->tx_cidx_processed = prev;
310
311         return (processed);
312 } /* ixgbe_isc_txd_credits_update */
313
314 /************************************************************************
315  * ixgbe_isc_rxd_refill
316  ************************************************************************/
317 static void
318 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
319 {
320         struct adapter *sc       = arg;
321         struct ix_rx_queue *que  = &sc->rx_queues[iru->iru_qsidx];
322         struct rx_ring *rxr      = &que->rxr;
323         uint64_t *paddrs;
324         int i;
325         uint32_t next_pidx, pidx;
326         uint16_t count;
327
328         paddrs = iru->iru_paddrs;
329         pidx = iru->iru_pidx;
330         count = iru->iru_count;
331
332         for (i = 0, next_pidx = pidx; i < count; i++) {
333                 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
334                 if (++next_pidx == sc->shared->isc_nrxd[0])
335                         next_pidx = 0;
336         }
337 } /* ixgbe_isc_rxd_refill */
338
339 /************************************************************************
340  * ixgbe_isc_rxd_flush
341  ************************************************************************/
342 static void
343 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
344 {
345         struct adapter     *sc  = arg;
346         struct ix_rx_queue *que = &sc->rx_queues[qsidx];
347         struct rx_ring     *rxr = &que->rxr;
348
349         IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
350 } /* ixgbe_isc_rxd_flush */
351
352 /************************************************************************
353  * ixgbe_isc_rxd_available
354  ************************************************************************/
355 static int
356 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
357 {
358         struct adapter          *sc = arg;
359         struct ix_rx_queue      *que = &sc->rx_queues[qsidx];
360         struct rx_ring          *rxr = &que->rxr;
361         union ixgbe_adv_rx_desc *rxd;
362         u32                      staterr;
363         int                      cnt, i, nrxd;
364
365         if (budget == 1) {
366                 rxd = &rxr->rx_base[pidx];
367                 staterr = le32toh(rxd->wb.upper.status_error);
368
369                 return (staterr & IXGBE_RXD_STAT_DD);
370         }
371
372         nrxd = sc->shared->isc_nrxd[0];
373         // em has cnt < nrxd. off by 1 here or there?
374 //      for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
375         for (cnt = 0, i = pidx; cnt < nrxd-1 && cnt <= budget;) {
376                 rxd = &rxr->rx_base[i];
377                 staterr = le32toh(rxd->wb.upper.status_error);
378
379                 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
380                         break;
381                 if (++i == nrxd)
382                         i = 0;
383                 if (staterr & IXGBE_RXD_STAT_EOP)
384                         cnt++;
385         }
386
387         return (cnt);
388 } /* ixgbe_isc_rxd_available */
389
390 /************************************************************************
391  * ixgbe_isc_rxd_pkt_get
392  *
393  *   Routine sends data which has been dma'ed into host memory
394  *   to upper layer. Initialize ri structure.
395  *
396  *   Returns 0 upon success, errno on failure
397  ************************************************************************/
398
399 static int
400 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
401 {
402         struct adapter           *adapter = arg;
403         struct ix_rx_queue       *que = &adapter->rx_queues[ri->iri_qsidx];
404         struct rx_ring           *rxr = &que->rxr;
405         struct ifnet             *ifp = iflib_get_ifp(adapter->ctx);
406         union ixgbe_adv_rx_desc  *rxd;
407
408         u16                      pkt_info, len, cidx, i;
409         u16                      vtag = 0;
410         u32                      ptype;
411         u32                      staterr = 0;
412         bool                     eop;
413
414         i = 0;
415         cidx = ri->iri_cidx;
416         do {
417                 rxd = &rxr->rx_base[cidx];
418                 staterr = le32toh(rxd->wb.upper.status_error);
419                 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
420
421                 /* Error Checking then decrement count */
422                 MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
423
424                 len = le16toh(rxd->wb.upper.length);
425                 ptype = le32toh(rxd->wb.lower.lo_dword.data) &
426                         IXGBE_RXDADV_PKTTYPE_MASK;
427
428                 ri->iri_len += len;
429                 rxr->bytes += len;
430
431                 rxd->wb.upper.status_error = 0;
432                 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
433                 if (staterr & IXGBE_RXD_STAT_VP) {
434                         vtag = le16toh(rxd->wb.upper.vlan);
435                 } else {
436                         vtag = 0;
437                 }
438
439                 /* Make sure bad packets are discarded */
440                 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
441
442 #if __FreeBSD_version >= 1100036
443                         if (adapter->feat_en & IXGBE_FEATURE_VF)
444                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
445 #endif
446
447                         rxr->rx_discarded++;
448                         return (EBADMSG);
449                 }
450                 ri->iri_frags[i].irf_flid = 0;
451                 ri->iri_frags[i].irf_idx = cidx;
452                 ri->iri_frags[i].irf_len = len;
453                 if (++cidx == adapter->shared->isc_nrxd[0])
454                         cidx = 0;
455                 i++;
456                 /* even a 16K packet shouldn't consume more than 8 clusters */
457                 MPASS(i < 9);
458         } while (!eop);
459
460         rxr->rx_packets++;
461         rxr->packets++;
462         rxr->rx_bytes += ri->iri_len;
463
464         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
465                 ixgbe_rx_checksum(staterr, ri,  ptype);
466
467         ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
468         ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
469         ri->iri_vtag = vtag;
470         ri->iri_nfrags = i;
471         if (vtag)
472                 ri->iri_flags |= M_VLANTAG;
473         return (0);
474 } /* ixgbe_isc_rxd_pkt_get */
475
476 /************************************************************************
477  * ixgbe_rx_checksum
478  *
479  *   Verify that the hardware indicated that the checksum is valid.
480  *   Inform the stack about the status of checksum so that stack
481  *   doesn't spend time verifying the checksum.
482  ************************************************************************/
483 static void
484 ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
485 {
486         u16  status = (u16)staterr;
487         u8   errors = (u8)(staterr >> 24);
488         bool sctp = false;
489
490         if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
491             (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
492                 sctp = TRUE;
493
494         /* IPv4 checksum */
495         if (status & IXGBE_RXD_STAT_IPCS) {
496                 if (!(errors & IXGBE_RXD_ERR_IPE)) {
497                         /* IP Checksum Good */
498                         ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
499                 } else
500                         ri->iri_csum_flags = 0;
501         }
502         /* TCP/UDP/SCTP checksum */
503         if (status & IXGBE_RXD_STAT_L4CS) {
504                 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
505 #if __FreeBSD_version >= 800000
506                 if (sctp)
507                         type = CSUM_SCTP_VALID;
508 #endif
509                 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
510                         ri->iri_csum_flags |= type;
511                         if (!sctp)
512                                 ri->iri_csum_data = htons(0xffff);
513                 }
514         }
515 } /* ixgbe_rx_checksum */
516
517 /************************************************************************
518  * ixgbe_determine_rsstype
519  *
520  *   Parse the packet type to determine the appropriate hash
521  ************************************************************************/
522 static int
523 ixgbe_determine_rsstype(u16 pkt_info)
524 {
525         switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
526         case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
527                 return M_HASHTYPE_RSS_TCP_IPV4;
528         case IXGBE_RXDADV_RSSTYPE_IPV4:
529                 return M_HASHTYPE_RSS_IPV4;
530         case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
531                 return M_HASHTYPE_RSS_TCP_IPV6;
532         case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
533                 return M_HASHTYPE_RSS_IPV6_EX;
534         case IXGBE_RXDADV_RSSTYPE_IPV6:
535                 return M_HASHTYPE_RSS_IPV6;
536         case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
537                 return M_HASHTYPE_RSS_TCP_IPV6_EX;
538         case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
539                 return M_HASHTYPE_RSS_UDP_IPV4;
540         case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
541                 return M_HASHTYPE_RSS_UDP_IPV6;
542         case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
543                 return M_HASHTYPE_RSS_UDP_IPV6_EX;
544         default:
545                 return M_HASHTYPE_OPAQUE;
546         }
547 } /* ixgbe_determine_rsstype */