1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** IXL driver TX/RX Routines:
37 ** This was seperated to allow usage by
38 ** both the PF and VF drivers.
41 #ifndef IXL_STANDALONE_BUILD
43 #include "opt_inet6.h"
50 #include <net/rss_config.h>
53 /* Local Prototypes */
54 static void ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype);
56 static int ixl_isc_txd_encap(void *arg, if_pkt_info_t pi);
57 static void ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
58 static int ixl_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear);
59 static int ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear);
61 static void ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru);
62 static void ixl_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
64 static int ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
66 static int ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
68 extern int ixl_intr(void *arg);
70 struct if_txrx ixl_txrx_hwb = {
73 ixl_isc_txd_credits_update_hwb,
74 ixl_isc_rxd_available,
81 struct if_txrx ixl_txrx_dwb = {
84 ixl_isc_txd_credits_update_dwb,
85 ixl_isc_rxd_available,
93 * @key key is saved into this parameter
96 ixl_get_default_rss_key(u32 *key)
100 u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
101 0x183cfd8c, 0xce880440, 0x580cbc3c,
102 0x35897377, 0x328b25e1, 0x4fa98922,
103 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
106 bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
110 * i40e_vc_stat_str - convert virtchnl status err code to a string
111 * @hw: pointer to the HW structure
112 * @stat_err: the status error code to convert
115 i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err)
118 case VIRTCHNL_STATUS_SUCCESS:
120 case VIRTCHNL_ERR_PARAM:
121 return "VIRTCHNL_ERR_PARAM";
122 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
123 return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
124 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
125 return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
126 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
127 return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
128 case VIRTCHNL_STATUS_NOT_SUPPORTED:
129 return "VIRTCHNL_STATUS_NOT_SUPPORTED";
132 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
137 ixl_is_tx_desc_done(struct tx_ring *txr, int idx)
139 return (((txr->tx_base[idx].cmd_type_offset_bsz >> I40E_TXD_QW1_DTYPE_SHIFT)
140 & I40E_TXD_QW1_DTYPE_MASK) == I40E_TX_DESC_DTYPE_DESC_DONE);
144 ixl_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi)
146 int count, curseg, i, hlen, segsz, seglen, tsolen;
148 if (nsegs <= IXL_MAX_TX_SEGS-2)
150 segsz = pi->ipi_tso_segsz;
153 hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
154 tsolen = pi->ipi_len - hlen;
157 curseg = segs[0].ds_len;
160 if (count > IXL_MAX_TX_SEGS - 2)
164 if (__predict_false(i == nsegs))
167 curseg = segs[i].ds_len;
169 seglen = min(curseg, hlen);
172 // printf("H:seglen = %d, count=%d\n", seglen, count);
175 segsz = pi->ipi_tso_segsz;
176 while (segsz > 0 && tsolen != 0) {
178 if (count > IXL_MAX_TX_SEGS - 2) {
179 // printf("bad: count = %d\n", count);
184 if (__predict_false(i == nsegs)) {
185 // printf("bad: tsolen = %d", tsolen);
188 curseg = segs[i].ds_len;
190 seglen = min(curseg, segsz);
194 // printf("D:seglen = %d, count=%d\n", seglen, count);
202 /*********************************************************************
204 * Setup descriptor for hw offloads
206 **********************************************************************/
209 ixl_tx_setup_offload(struct ixl_tx_queue *que,
210 if_pkt_info_t pi, u32 *cmd, u32 *off)
212 switch (pi->ipi_etype) {
215 if (pi->ipi_csum_flags & CSUM_IP)
216 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
218 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
223 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
230 *off |= (pi->ipi_ehdrlen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
231 *off |= (pi->ipi_ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
233 switch (pi->ipi_ipproto) {
235 if (pi->ipi_csum_flags & IXL_CSUM_TCP) {
236 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
237 *off |= (pi->ipi_tcp_hlen >> 2) <<
238 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
242 if (pi->ipi_csum_flags & IXL_CSUM_UDP) {
243 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
244 *off |= (sizeof(struct udphdr) >> 2) <<
245 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
249 if (pi->ipi_csum_flags & IXL_CSUM_SCTP) {
250 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
251 *off |= (sizeof(struct sctphdr) >> 2) <<
252 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
260 /**********************************************************************
262 * Setup context for hardware segmentation offload (TSO)
264 **********************************************************************/
266 ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
268 if_softc_ctx_t scctx;
269 struct i40e_tx_context_desc *TXD;
270 u32 cmd, mss, type, tsolen;
272 u64 type_cmd_tso_mss;
275 TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx];
276 tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
277 scctx = txr->que->vsi->shared;
279 type = I40E_TX_DESC_DTYPE_CONTEXT;
280 cmd = I40E_TX_CTX_DESC_TSO;
281 /* TSO MSS must not be less than 64 */
282 if (pi->ipi_tso_segsz < IXL_MIN_TSO_MSS) {
283 txr->mss_too_small++;
284 pi->ipi_tso_segsz = IXL_MIN_TSO_MSS;
286 mss = pi->ipi_tso_segsz;
288 type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
289 ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
290 ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
291 ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
292 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
294 TXD->tunneling_params = htole32(0);
297 return ((idx + 1) & (scctx->isc_ntxd[0]-1));
300 /*********************************************************************
302 * This routine maps the mbufs to tx descriptors, allowing the
303 * TX engine to transmit the packets.
304 * - return 0 on success, positive on failure
306 **********************************************************************/
307 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
310 ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
312 struct ixl_vsi *vsi = arg;
313 if_softc_ctx_t scctx = vsi->shared;
314 struct ixl_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx];
315 struct tx_ring *txr = &que->txr;
316 int nsegs = pi->ipi_nsegs;
317 bus_dma_segment_t *segs = pi->ipi_segs;
318 struct i40e_tx_desc *txd = NULL;
319 int i, j, mask, pidx_last;
320 u32 cmd, off, tx_intr;
322 // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
327 tx_intr = (pi->ipi_flags & IPI_TX_INTR);
329 device_printf(iflib_get_dev(vsi->ctx), "%s: tx_intr %d\n", __func__, tx_intr);
332 /* Set up the TSO/CSUM offload */
333 if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
334 /* Set up the TSO context descriptor if required */
335 if (pi->ipi_csum_flags & CSUM_TSO) {
336 if (ixl_tso_detect_sparse(segs, nsegs, pi))
338 i = ixl_tso_setup(txr, pi);
340 ixl_tx_setup_offload(que, pi, &cmd, &off);
342 if (pi->ipi_mflags & M_VLANTAG)
343 cmd |= I40E_TX_DESC_CMD_IL2TAG1;
345 cmd |= I40E_TX_DESC_CMD_ICRC;
346 mask = scctx->isc_ntxd[0] - 1;
347 for (j = 0; j < nsegs; j++) {
350 txd = &txr->tx_base[i];
351 seglen = segs[j].ds_len;
353 txd->buffer_addr = htole64(segs[j].ds_addr);
354 txd->cmd_type_offset_bsz =
355 htole64(I40E_TX_DESC_DTYPE_DATA
356 | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
357 | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
358 | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
359 | ((u64)htole16(pi->ipi_vtag) << I40E_TXD_QW1_L2TAG1_SHIFT));
361 txr->tx_bytes += seglen;
365 /* Set the last descriptor for report */
366 txd->cmd_type_offset_bsz |=
367 htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
368 /* Add to report status array (if using TX interrupts) */
369 if (!vsi->enable_head_writeback && tx_intr) {
370 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
371 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask;
372 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
374 pi->ipi_new_pidx = i;
381 ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
383 struct ixl_vsi *vsi = arg;
384 struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
387 * Advance the Transmit Descriptor Tail (Tdt), this tells the
388 * hardware that this frame is available to transmit.
390 wr32(vsi->hw, txr->tail, pidx);
394 /*********************************************************************
396 * (Re)Initialize a queue transmit ring by clearing its memory.
398 **********************************************************************/
400 ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que)
402 struct tx_ring *txr = &que->txr;
404 /* Clear the old ring contents */
405 bzero((void *)txr->tx_base,
406 (sizeof(struct i40e_tx_desc)) *
407 (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
409 // TODO: Write max descriptor index instead of 0?
410 wr32(vsi->hw, txr->tail, 0);
411 wr32(vsi->hw, I40E_QTX_HEAD(txr->me), 0);
415 * ixl_get_tx_head - Retrieve the value from the
416 * location the HW records its HEAD index
419 ixl_get_tx_head(struct ixl_tx_queue *que)
421 if_softc_ctx_t scctx = que->vsi->shared;
422 struct tx_ring *txr = &que->txr;
423 void *head = &txr->tx_base[scctx->isc_ntxd[0]];
425 return LE32_TO_CPU(*(volatile __le32 *)head);
429 ixl_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear)
431 struct ixl_vsi *vsi = arg;
432 if_softc_ctx_t scctx = vsi->shared;
433 struct ixl_tx_queue *que = &vsi->tx_queues[qid];
434 struct tx_ring *txr = &que->txr;
437 /* Get the Head WB value */
438 head = ixl_get_tx_head(que);
440 credits = head - txr->tx_cidx_processed;
442 credits += scctx->isc_ntxd[0];
444 txr->tx_cidx_processed = head;
450 ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
452 struct ixl_vsi *vsi = arg;
453 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
454 if_softc_ctx_t scctx = vsi->shared;
455 struct tx_ring *txr = &tx_que->txr;
457 qidx_t processed = 0;
458 qidx_t cur, prev, ntxd, rs_cidx;
462 rs_cidx = txr->tx_rs_cidx;
464 device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) rs_cidx %d, txr->tx_rs_pidx %d\n", __func__,
465 txr->me, rs_cidx, txr->tx_rs_pidx);
467 if (rs_cidx == txr->tx_rs_pidx)
469 cur = txr->tx_rsq[rs_cidx];
470 MPASS(cur != QIDX_INVALID);
471 is_done = ixl_is_tx_desc_done(txr, cur);
473 if (clear == false || !is_done)
476 prev = txr->tx_cidx_processed;
477 ntxd = scctx->isc_ntxd[0];
479 delta = (int32_t)cur - (int32_t)prev;
480 MPASS(prev == 0 || delta != 0);
484 device_printf(iflib_get_dev(vsi->ctx),
485 "%s: (q%d) cidx_processed=%u cur=%u clear=%d delta=%d\n",
486 __func__, txr->me, prev, cur, clear, delta);
490 rs_cidx = (rs_cidx + 1) & (ntxd-1);
491 if (rs_cidx == txr->tx_rs_pidx)
493 cur = txr->tx_rsq[rs_cidx];
494 MPASS(cur != QIDX_INVALID);
495 is_done = ixl_is_tx_desc_done(txr, cur);
498 txr->tx_rs_cidx = rs_cidx;
499 txr->tx_cidx_processed = prev;
502 device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) processed %d\n", __func__, txr->me, processed);
508 ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru)
510 struct ixl_vsi *vsi = arg;
511 if_softc_ctx_t scctx = vsi->shared;
512 struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr);
514 uint32_t next_pidx, pidx;
518 paddrs = iru->iru_paddrs;
519 pidx = iru->iru_pidx;
520 count = iru->iru_count;
522 for (i = 0, next_pidx = pidx; i < count; i++) {
523 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
524 if (++next_pidx == scctx->isc_nrxd[0])
530 ixl_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
532 struct ixl_vsi *vsi = arg;
533 struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
535 wr32(vsi->hw, rxr->tail, pidx);
539 ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
541 struct ixl_vsi *vsi = arg;
542 struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr;
543 union i40e_rx_desc *rxd;
548 nrxd = vsi->shared->isc_nrxd[0];
551 rxd = &rxr->rx_base[idx];
552 qword = le64toh(rxd->wb.qword1.status_error_len);
553 status = (qword & I40E_RXD_QW1_STATUS_MASK)
554 >> I40E_RXD_QW1_STATUS_SHIFT;
555 return !!(status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
558 for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
559 rxd = &rxr->rx_base[i];
560 qword = le64toh(rxd->wb.qword1.status_error_len);
561 status = (qword & I40E_RXD_QW1_STATUS_MASK)
562 >> I40E_RXD_QW1_STATUS_SHIFT;
564 if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
568 if (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))
576 ** i40e_ptype_to_hash: parse the packet type
577 ** to determine the appropriate hash.
580 ixl_ptype_to_hash(u8 ptype)
582 struct i40e_rx_ptype_decoded decoded;
584 decoded = decode_rx_desc_ptype(ptype);
587 return M_HASHTYPE_OPAQUE;
589 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
590 return M_HASHTYPE_OPAQUE;
592 /* Note: anything that gets to this point is IP */
593 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
594 switch (decoded.inner_prot) {
595 case I40E_RX_PTYPE_INNER_PROT_TCP:
596 return M_HASHTYPE_RSS_TCP_IPV6;
597 case I40E_RX_PTYPE_INNER_PROT_UDP:
598 return M_HASHTYPE_RSS_UDP_IPV6;
600 return M_HASHTYPE_RSS_IPV6;
603 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
604 switch (decoded.inner_prot) {
605 case I40E_RX_PTYPE_INNER_PROT_TCP:
606 return M_HASHTYPE_RSS_TCP_IPV4;
607 case I40E_RX_PTYPE_INNER_PROT_UDP:
608 return M_HASHTYPE_RSS_UDP_IPV4;
610 return M_HASHTYPE_RSS_IPV4;
613 /* We should never get here!! */
614 return M_HASHTYPE_OPAQUE;
617 /*********************************************************************
619 * This routine executes in ithread context. It sends data which has been
620 * dma'ed into host memory to upper layer.
622 * Returns 0 upon success, errno on failure
624 *********************************************************************/
626 ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
628 struct ixl_vsi *vsi = arg;
629 struct ixl_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx];
630 struct rx_ring *rxr = &que->rxr;
631 union i40e_rx_desc *cur;
642 /* 5 descriptor receive limit */
643 MPASS(i < IXL_MAX_RX_SEGS);
645 cur = &rxr->rx_base[cidx];
646 qword = le64toh(cur->wb.qword1.status_error_len);
647 status = (qword & I40E_RXD_QW1_STATUS_MASK)
648 >> I40E_RXD_QW1_STATUS_SHIFT;
649 error = (qword & I40E_RXD_QW1_ERROR_MASK)
650 >> I40E_RXD_QW1_ERROR_SHIFT;
651 plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
652 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
653 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
654 >> I40E_RXD_QW1_PTYPE_SHIFT;
656 /* we should never be called without a valid descriptor */
657 MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0);
662 cur->wb.qword1.status_error_len = 0;
663 eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
664 if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
665 vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
670 ** Make sure bad packets are discarded,
671 ** note that only EOP descriptor has valid
674 if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
678 ri->iri_frags[i].irf_flid = 0;
679 ri->iri_frags[i].irf_idx = cidx;
680 ri->iri_frags[i].irf_len = plen;
681 if (++cidx == vsi->shared->isc_nrxd[0])
686 /* capture data for dynamic ITR adjustment */
690 if ((if_getcapenable(vsi->ifp) & IFCAP_RXCSUM) != 0)
691 ixl_rx_checksum(ri, status, error, ptype);
692 ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss);
693 ri->iri_rsstype = ixl_ptype_to_hash(ptype);
697 ri->iri_flags |= M_VLANTAG;
701 /*********************************************************************
703 * Verify that the hardware indicated that the checksum is valid.
704 * Inform the stack about the status of checksum so that stack
705 * doesn't spend time verifying the checksum.
707 *********************************************************************/
709 ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
711 struct i40e_rx_ptype_decoded decoded;
713 ri->iri_csum_flags = 0;
715 /* No L3 or L4 checksum was calculated */
716 if (!(status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
719 decoded = decode_rx_desc_ptype(ptype);
721 /* IPv6 with extension headers likely have bad csum */
722 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
723 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
725 (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
726 ri->iri_csum_flags = 0;
731 ri->iri_csum_flags |= CSUM_L3_CALC;
733 /* IPv4 checksum error */
734 if (error & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))
737 ri->iri_csum_flags |= CSUM_L3_VALID;
738 ri->iri_csum_flags |= CSUM_L4_CALC;
740 /* L4 checksum error */
741 if (error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
744 ri->iri_csum_flags |= CSUM_L4_VALID;
745 ri->iri_csum_data |= htons(0xffff);
749 * Input: bitmap of enum i40e_aq_link_speed
752 ixl_max_aq_speed_to_value(u8 link_speeds)
754 if (link_speeds & I40E_LINK_SPEED_40GB)
756 if (link_speeds & I40E_LINK_SPEED_25GB)
758 if (link_speeds & I40E_LINK_SPEED_20GB)
760 if (link_speeds & I40E_LINK_SPEED_10GB)
762 if (link_speeds & I40E_LINK_SPEED_1GB)
764 if (link_speeds & I40E_LINK_SPEED_100MB)
767 /* Minimum supported link speed */