1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
45 /************************************************************************
46 * Local Function prototypes
47 ************************************************************************/
48 static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
49 static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
50 static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
52 static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
54 static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
56 static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
58 static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
59 static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
61 extern void ixgbe_if_enable_intr(if_ctx_t ctx);
62 static int ixgbe_determine_rsstype(u16 pkt_info);
64 struct if_txrx ixgbe_txrx = {
65 .ift_txd_encap = ixgbe_isc_txd_encap,
66 .ift_txd_flush = ixgbe_isc_txd_flush,
67 .ift_txd_credits_update = ixgbe_isc_txd_credits_update,
68 .ift_rxd_available = ixgbe_isc_rxd_available,
69 .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
70 .ift_rxd_refill = ixgbe_isc_rxd_refill,
71 .ift_rxd_flush = ixgbe_isc_rxd_flush,
72 .ift_legacy_intr = NULL
75 extern if_shared_ctx_t ixgbe_sctx;
77 /************************************************************************
80 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
82 ************************************************************************/
84 ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
86 u32 vlan_macip_lens, type_tucmd_mlhl;
87 u32 olinfo_status, mss_l4len_idx, pktlen, offload;
91 olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
92 /* VLAN MACLEN IPLEN */
93 vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
96 * Some of our VF devices need a context descriptor for every
97 * packet. That means the ehdrlen needs to be non-zero in order
98 * for the host driver not to flag a malicious event. The stack
99 * will most likely populate this for all other reasons of why
100 * this function was called.
102 if (pi->ipi_ehdrlen == 0) {
103 ehdrlen = ETHER_HDR_LEN;
104 ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
106 ehdrlen = pi->ipi_ehdrlen;
107 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
109 pktlen = pi->ipi_len;
110 /* First check if TSO is to be used */
111 if (pi->ipi_csum_flags & CSUM_TSO) {
112 /* This is used in the transmit desc in encap */
113 pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
114 mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
115 mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
118 olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
120 if (pi->ipi_flags & IPI_TX_IPV4) {
121 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
122 /* Tell transmit desc to also do IPv4 checksum. */
123 if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
124 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
125 } else if (pi->ipi_flags & IPI_TX_IPV6)
126 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
130 vlan_macip_lens |= pi->ipi_ip_hlen;
132 switch (pi->ipi_ipproto) {
134 if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
135 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
140 if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
141 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
146 if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
147 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
155 /* Insert L4 checksum into data descriptors */
157 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
159 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
161 /* Now copy bits into descriptor */
162 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
163 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
164 TXD->seqnum_seed = htole32(0);
165 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
167 return (olinfo_status);
168 } /* ixgbe_tx_ctx_setup */
170 /************************************************************************
171 * ixgbe_isc_txd_encap
172 ************************************************************************/
174 ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
176 struct adapter *sc = arg;
177 if_softc_ctx_t scctx = sc->shared;
178 struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
179 struct tx_ring *txr = &que->txr;
180 int nsegs = pi->ipi_nsegs;
181 bus_dma_segment_t *segs = pi->ipi_segs;
182 union ixgbe_adv_tx_desc *txd = NULL;
183 struct ixgbe_adv_tx_context_desc *TXD;
184 int i, j, first, pidx_last;
185 u32 olinfo_status, cmd, flags;
188 cmd = (IXGBE_ADVTXD_DTYP_DATA |
189 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
191 if (pi->ipi_mflags & M_VLANTAG)
192 cmd |= IXGBE_ADVTXD_DCMD_VLE;
194 i = first = pi->ipi_pidx;
195 flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
196 ntxd = scctx->isc_ntxd[0];
198 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
199 if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
200 (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
202 /*********************************************
203 * Set up the appropriate offload context
204 * this will consume the first descriptor
205 *********************************************/
206 olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
207 if (pi->ipi_csum_flags & CSUM_TSO) {
208 cmd |= IXGBE_ADVTXD_DCMD_TSE;
212 if (++i == scctx->isc_ntxd[0])
215 /* Indicate the whole packet as payload when not doing TSO */
216 olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
219 olinfo_status |= IXGBE_ADVTXD_CC;
221 for (j = 0; j < nsegs; j++) {
224 txd = &txr->tx_base[i];
225 seglen = segs[j].ds_len;
227 txd->read.buffer_addr = htole64(segs[j].ds_addr);
228 txd->read.cmd_type_len = htole32(cmd | seglen);
229 txd->read.olinfo_status = htole32(olinfo_status);
232 if (++i == scctx->isc_ntxd[0]) {
238 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
239 txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
241 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
243 txr->bytes += pi->ipi_len;
244 pi->ipi_new_pidx = i;
246 ++txr->total_packets;
249 } /* ixgbe_isc_txd_encap */
251 /************************************************************************
252 * ixgbe_isc_txd_flush
253 ************************************************************************/
255 ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
257 struct adapter *sc = arg;
258 struct ix_tx_queue *que = &sc->tx_queues[txqid];
259 struct tx_ring *txr = &que->txr;
261 IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
262 } /* ixgbe_isc_txd_flush */
264 /************************************************************************
265 * ixgbe_isc_txd_credits_update
266 ************************************************************************/
268 ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
270 struct adapter *sc = arg;
271 if_softc_ctx_t scctx = sc->shared;
272 struct ix_tx_queue *que = &sc->tx_queues[txqid];
273 struct tx_ring *txr = &que->txr;
274 qidx_t processed = 0;
276 qidx_t cur, prev, ntxd, rs_cidx;
280 rs_cidx = txr->tx_rs_cidx;
281 if (rs_cidx == txr->tx_rs_pidx)
284 cur = txr->tx_rsq[rs_cidx];
285 status = txr->tx_base[cur].wb.status;
286 updated = !!(status & IXGBE_TXD_STAT_DD);
291 /* If clear is false just let caller know that there
292 * are descriptors to reclaim */
296 prev = txr->tx_cidx_processed;
297 ntxd = scctx->isc_ntxd[0];
300 delta = (int32_t)cur - (int32_t)prev;
307 rs_cidx = (rs_cidx + 1) & (ntxd - 1);
308 if (rs_cidx == txr->tx_rs_pidx)
311 cur = txr->tx_rsq[rs_cidx];
312 status = txr->tx_base[cur].wb.status;
313 } while ((status & IXGBE_TXD_STAT_DD));
315 txr->tx_rs_cidx = rs_cidx;
316 txr->tx_cidx_processed = prev;
319 } /* ixgbe_isc_txd_credits_update */
321 /************************************************************************
322 * ixgbe_isc_rxd_refill
323 ************************************************************************/
325 ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
327 struct adapter *sc = arg;
328 struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
329 struct rx_ring *rxr = &que->rxr;
332 uint32_t next_pidx, pidx;
335 paddrs = iru->iru_paddrs;
336 pidx = iru->iru_pidx;
337 count = iru->iru_count;
339 for (i = 0, next_pidx = pidx; i < count; i++) {
340 rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
341 if (++next_pidx == sc->shared->isc_nrxd[0])
344 } /* ixgbe_isc_rxd_refill */
346 /************************************************************************
347 * ixgbe_isc_rxd_flush
348 ************************************************************************/
350 ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
352 struct adapter *sc = arg;
353 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
354 struct rx_ring *rxr = &que->rxr;
356 IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
357 } /* ixgbe_isc_rxd_flush */
359 /************************************************************************
360 * ixgbe_isc_rxd_available
361 ************************************************************************/
363 ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
365 struct adapter *sc = arg;
366 struct ix_rx_queue *que = &sc->rx_queues[qsidx];
367 struct rx_ring *rxr = &que->rxr;
368 union ixgbe_adv_rx_desc *rxd;
372 nrxd = sc->shared->isc_nrxd[0];
373 for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
374 rxd = &rxr->rx_base[i];
375 staterr = le32toh(rxd->wb.upper.status_error);
377 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
381 if (staterr & IXGBE_RXD_STAT_EOP)
385 } /* ixgbe_isc_rxd_available */
387 /************************************************************************
388 * ixgbe_isc_rxd_pkt_get
390 * Routine sends data which has been dma'ed into host memory
391 * to upper layer. Initialize ri structure.
393 * Returns 0 upon success, errno on failure
394 ************************************************************************/
397 ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
399 struct adapter *adapter = arg;
400 struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
401 struct rx_ring *rxr = &que->rxr;
402 struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
403 union ixgbe_adv_rx_desc *rxd;
405 u16 pkt_info, len, cidx, i;
414 rxd = &rxr->rx_base[cidx];
415 staterr = le32toh(rxd->wb.upper.status_error);
416 pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
418 /* Error Checking then decrement count */
419 MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
421 len = le16toh(rxd->wb.upper.length);
422 ptype = le32toh(rxd->wb.lower.lo_dword.data) &
423 IXGBE_RXDADV_PKTTYPE_MASK;
428 rxd->wb.upper.status_error = 0;
429 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
431 if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
432 vtag = le16toh(rxd->wb.upper.vlan);
437 /* Make sure bad packets are discarded */
438 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
440 #if __FreeBSD_version >= 1100036
441 if (adapter->feat_en & IXGBE_FEATURE_VF)
442 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
448 ri->iri_frags[i].irf_flid = 0;
449 ri->iri_frags[i].irf_idx = cidx;
450 ri->iri_frags[i].irf_len = len;
451 if (++cidx == adapter->shared->isc_nrxd[0])
454 /* even a 16K packet shouldn't consume more than 8 clusters */
460 rxr->rx_bytes += ri->iri_len;
462 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
463 ixgbe_rx_checksum(staterr, ri, ptype);
465 ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
466 ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
467 if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
468 if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
469 ri->iri_rsstype = M_HASHTYPE_NONE;
471 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
476 ri->iri_flags |= M_VLANTAG;
478 } /* ixgbe_isc_rxd_pkt_get */
480 /************************************************************************
483 * Verify that the hardware indicated that the checksum is valid.
484 * Inform the stack about the status of checksum so that stack
485 * doesn't spend time verifying the checksum.
486 ************************************************************************/
488 ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
490 u16 status = (u16)staterr;
491 u8 errors = (u8)(staterr >> 24);
494 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
495 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
499 if (status & IXGBE_RXD_STAT_IPCS) {
500 if (!(errors & IXGBE_RXD_ERR_IPE)) {
501 /* IP Checksum Good */
502 ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
504 ri->iri_csum_flags = 0;
506 /* TCP/UDP/SCTP checksum */
507 if (status & IXGBE_RXD_STAT_L4CS) {
508 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
509 #if __FreeBSD_version >= 800000
511 type = CSUM_SCTP_VALID;
513 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
514 ri->iri_csum_flags |= type;
516 ri->iri_csum_data = htons(0xffff);
519 } /* ixgbe_rx_checksum */
521 /************************************************************************
522 * ixgbe_determine_rsstype
524 * Parse the packet type to determine the appropriate hash
525 ************************************************************************/
527 ixgbe_determine_rsstype(u16 pkt_info)
529 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
530 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
531 return M_HASHTYPE_RSS_TCP_IPV4;
532 case IXGBE_RXDADV_RSSTYPE_IPV4:
533 return M_HASHTYPE_RSS_IPV4;
534 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
535 return M_HASHTYPE_RSS_TCP_IPV6;
536 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
537 return M_HASHTYPE_RSS_IPV6_EX;
538 case IXGBE_RXDADV_RSSTYPE_IPV6:
539 return M_HASHTYPE_RSS_IPV6;
540 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
541 return M_HASHTYPE_RSS_TCP_IPV6_EX;
542 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
543 return M_HASHTYPE_RSS_UDP_IPV4;
544 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
545 return M_HASHTYPE_RSS_UDP_IPV6;
546 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
547 return M_HASHTYPE_RSS_UDP_IPV6_EX;
549 return M_HASHTYPE_OPAQUE;
551 } /* ixgbe_determine_rsstype */