1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 /************************************************************************
46 * Local Function prototypes
47 ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx,
54 uint8_t flidx __unused, qidx_t pidx);
55 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
57 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
59 static void ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri,
61 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *,
64 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
65 static int ixgbe_determine_rsstype(uint16_t pkt_info);
67 struct if_txrx ixgbe_txrx = {
68 .ift_txd_encap = ixgbe_isc_txd_encap,
69 .ift_txd_flush = ixgbe_isc_txd_flush,
70 .ift_txd_credits_update = ixgbe_isc_txd_credits_update,
71 .ift_rxd_available = ixgbe_isc_rxd_available,
72 .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
73 .ift_rxd_refill = ixgbe_isc_rxd_refill,
74 .ift_rxd_flush = ixgbe_isc_rxd_flush,
75 .ift_legacy_intr = NULL
78 /************************************************************************
81 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
83 ************************************************************************/
85 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
87 uint32_t vlan_macip_lens, type_tucmd_mlhl;
88 uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
92 olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
93 /* VLAN MACLEN IPLEN */
94 vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
97 * Some of our VF devices need a context descriptor for every
98 * packet. That means the ehdrlen needs to be non-zero in order
99 * for the host driver not to flag a malicious event. The stack
100 * will most likely populate this for all other reasons of why
101 * this function was called.
103 if (pi->ipi_ehdrlen == 0) {
104 ehdrlen = ETHER_HDR_LEN;
105 ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
107 ehdrlen = pi->ipi_ehdrlen;
108 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
110 pktlen = pi->ipi_len;
111 /* First check if TSO is to be used */
112 if (pi->ipi_csum_flags & CSUM_TSO) {
113 /* This is used in the transmit desc in encap */
114 pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
115 mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
116 mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
119 olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
121 if (pi->ipi_flags & IPI_TX_IPV4) {
122 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
123 /* Tell transmit desc to also do IPv4 checksum. */
124 if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
125 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
126 } else if (pi->ipi_flags & IPI_TX_IPV6)
127 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
131 vlan_macip_lens |= pi->ipi_ip_hlen;
133 switch (pi->ipi_ipproto) {
135 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
136 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
141 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
142 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
147 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
148 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
156 /* Insert L4 checksum into data descriptors */
158 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
160 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
162 /* Now copy bits into descriptor */
163 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
164 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
165 TXD->seqnum_seed = htole32(0);
166 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
168 return (olinfo_status);
169 } /* ixgbe_tx_ctx_setup */
171 /************************************************************************
172 * ixgbe_isc_txd_encap
173 ************************************************************************/
175 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
177 struct adapter *sc = arg;
178 if_softc_ctx_t scctx = sc->shared;
179 struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
180 struct tx_ring *txr = &que->txr;
181 int nsegs = pi->ipi_nsegs;
182 bus_dma_segment_t *segs = pi->ipi_segs;
183 union ixgbe_adv_tx_desc *txd = NULL;
184 struct ixgbe_adv_tx_context_desc *TXD;
185 int i, j, first, pidx_last;
186 uint32_t olinfo_status, cmd, flags;
189 cmd = (IXGBE_ADVTXD_DTYP_DATA |
190 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
192 if (pi->ipi_mflags & M_VLANTAG)
193 cmd |= IXGBE_ADVTXD_DCMD_VLE;
195 i = first = pi->ipi_pidx;
196 flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
197 ntxd = scctx->isc_ntxd[0];
199 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
200 if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
201 (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
203 /*********************************************
204 * Set up the appropriate offload context
205 * this will consume the first descriptor
206 *********************************************/
207 olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
208 if (pi->ipi_csum_flags & CSUM_TSO) {
209 cmd |= IXGBE_ADVTXD_DCMD_TSE;
213 if (++i == scctx->isc_ntxd[0])
216 /* Indicate the whole packet as payload when not doing TSO */
217 olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
220 olinfo_status |= IXGBE_ADVTXD_CC;
222 for (j = 0; j < nsegs; j++) {
225 txd = &txr->tx_base[i];
226 seglen = segs[j].ds_len;
228 txd->read.buffer_addr = htole64(segs[j].ds_addr);
229 txd->read.cmd_type_len = htole32(cmd | seglen);
230 txd->read.olinfo_status = htole32(olinfo_status);
233 if (++i == scctx->isc_ntxd[0]) {
239 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
240 txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
242 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
244 txr->bytes += pi->ipi_len;
245 pi->ipi_new_pidx = i;
247 ++txr->total_packets;
250 } /* ixgbe_isc_txd_encap */
252 /************************************************************************
253 * ixgbe_isc_txd_flush
254 ************************************************************************/
256 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
258 struct adapter *sc = arg;
259 struct ix_tx_queue *que = &sc->tx_queues[txqid];
260 struct tx_ring *txr = &que->txr;
262 IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
263 } /* ixgbe_isc_txd_flush */
265 /************************************************************************
266 * ixgbe_isc_txd_credits_update
267 ************************************************************************/
269 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
271 struct adapter *sc = arg;
272 if_softc_ctx_t scctx = sc->shared;
273 struct ix_tx_queue *que = &sc->tx_queues[txqid];
274 struct tx_ring *txr = &que->txr;
275 qidx_t processed = 0;
277 qidx_t cur, prev, ntxd, rs_cidx;
281 rs_cidx = txr->tx_rs_cidx;
282 if (rs_cidx == txr->tx_rs_pidx)
285 cur = txr->tx_rsq[rs_cidx];
286 status = txr->tx_base[cur].wb.status;
287 updated = !!(status & IXGBE_TXD_STAT_DD);
292 /* If clear is false just let caller know that there
293 * are descriptors to reclaim */
297 prev = txr->tx_cidx_processed;
298 ntxd = scctx->isc_ntxd[0];
301 delta = (int32_t)cur - (int32_t)prev;
308 rs_cidx = (rs_cidx + 1) & (ntxd - 1);
309 if (rs_cidx == txr->tx_rs_pidx)
312 cur = txr->tx_rsq[rs_cidx];
313 status = txr->tx_base[cur].wb.status;
314 } while ((status & IXGBE_TXD_STAT_DD));
316 txr->tx_rs_cidx = rs_cidx;
317 txr->tx_cidx_processed = prev;
320 } /* ixgbe_isc_txd_credits_update */
322 /************************************************************************
323 * ixgbe_isc_rxd_refill
324 ************************************************************************/
326 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
328 struct adapter *sc = arg;
329 struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
330 struct rx_ring *rxr = &que->rxr;
333 uint32_t next_pidx, pidx;
336 paddrs = iru->iru_paddrs;
337 pidx = iru->iru_pidx;
338 count = iru->iru_count;
340 for (i = 0, next_pidx = pidx; i < count; i++) {
341 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
342 if (++next_pidx == sc->shared->isc_nrxd[0])
345 } /* ixgbe_isc_rxd_refill */
347 /************************************************************************
348 * ixgbe_isc_rxd_flush
349 ************************************************************************/
351 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
353 struct adapter *sc = arg;
354 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
355 struct rx_ring *rxr = &que->rxr;
357 IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
358 } /* ixgbe_isc_rxd_flush */
360 /************************************************************************
361 * ixgbe_isc_rxd_available
362 ************************************************************************/
364 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
366 struct adapter *sc = arg;
367 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
368 struct rx_ring *rxr = &que->rxr;
369 union ixgbe_adv_rx_desc *rxd;
373 nrxd = sc->shared->isc_nrxd[0];
374 for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
375 rxd = &rxr->rx_base[i];
376 staterr = le32toh(rxd->wb.upper.status_error);
378 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
382 if (staterr & IXGBE_RXD_STAT_EOP)
386 } /* ixgbe_isc_rxd_available */
388 /************************************************************************
389 * ixgbe_isc_rxd_pkt_get
391 * Routine sends data which has been dma'ed into host memory
392 * to upper layer. Initialize ri structure.
394 * Returns 0 upon success, errno on failure
395 ************************************************************************/
398 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
400 struct adapter *adapter = arg;
401 struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
402 struct rx_ring *rxr = &que->rxr;
403 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
404 union ixgbe_adv_rx_desc *rxd;
406 uint16_t pkt_info, len, cidx, i;
409 uint32_t staterr = 0;
415 rxd = &rxr->rx_base[cidx];
416 staterr = le32toh(rxd->wb.upper.status_error);
417 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
419 /* Error Checking then decrement count */
420 MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
422 len = le16toh(rxd->wb.upper.length);
423 ptype = le32toh(rxd->wb.lower.lo_dword.data) &
424 IXGBE_RXDADV_PKTTYPE_MASK;
429 rxd->wb.upper.status_error = 0;
430 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
432 if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
433 vtag = le16toh(rxd->wb.upper.vlan);
438 /* Make sure bad packets are discarded */
439 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
440 if (adapter->feat_en & IXGBE_FEATURE_VF)
441 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
446 ri->iri_frags[i].irf_flid = 0;
447 ri->iri_frags[i].irf_idx = cidx;
448 ri->iri_frags[i].irf_len = len;
449 if (++cidx == adapter->shared->isc_nrxd[0])
452 /* even a 16K packet shouldn't consume more than 8 clusters */
458 rxr->rx_bytes += ri->iri_len;
460 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
461 ixgbe_rx_checksum(staterr, ri, ptype);
463 ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
464 ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
465 if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
466 if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
467 ri->iri_rsstype = M_HASHTYPE_NONE;
469 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
474 ri->iri_flags |= M_VLANTAG;
476 } /* ixgbe_isc_rxd_pkt_get */
478 /************************************************************************
481 * Verify that the hardware indicated that the checksum is valid.
482 * Inform the stack about the status of checksum so that stack
483 * doesn't spend time verifying the checksum.
484 ************************************************************************/
486 ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
488 uint16_t status = (uint16_t)staterr;
489 uint8_t errors = (uint8_t)(staterr >> 24);
491 /* If there is a layer 3 or 4 error we are done */
492 if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
495 /* IP Checksum Good */
496 if (status & IXGBE_RXD_STAT_IPCS)
497 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
499 /* Valid L4E checksum */
500 if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) {
501 /* SCTP header present. */
502 if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
503 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
504 ri->iri_csum_flags |= CSUM_SCTP_VALID;
506 ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
507 ri->iri_csum_data = htons(0xffff);
510 } /* ixgbe_rx_checksum */
512 /************************************************************************
513 * ixgbe_determine_rsstype
515 * Parse the packet type to determine the appropriate hash
516 ************************************************************************/
518 ixgbe_determine_rsstype(uint16_t pkt_info)
520 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
521 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
522 return M_HASHTYPE_RSS_TCP_IPV4;
523 case IXGBE_RXDADV_RSSTYPE_IPV4:
524 return M_HASHTYPE_RSS_IPV4;
525 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
526 return M_HASHTYPE_RSS_TCP_IPV6;
527 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
528 return M_HASHTYPE_RSS_IPV6_EX;
529 case IXGBE_RXDADV_RSSTYPE_IPV6:
530 return M_HASHTYPE_RSS_IPV6;
531 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
532 return M_HASHTYPE_RSS_TCP_IPV6_EX;
533 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
534 return M_HASHTYPE_RSS_UDP_IPV4;
535 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
536 return M_HASHTYPE_RSS_UDP_IPV6;
537 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
538 return M_HASHTYPE_RSS_UDP_IPV6_EX;
540 return M_HASHTYPE_OPAQUE;
542 } /* ixgbe_determine_rsstype */