1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** IXL driver TX/RX Routines:
37 ** This was seperated to allow usage by
38 ** both the BASE and the VF drivers.
41 #ifndef IXL_STANDALONE_BUILD
43 #include "opt_inet6.h"
49 #include <net/rss_config.h>
52 /* Local Prototypes */
53 static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
54 static void ixl_refresh_mbufs(struct ixl_queue *, int);
55 static int ixl_xmit(struct ixl_queue *, struct mbuf **);
56 static int ixl_tx_setup_offload(struct ixl_queue *,
57 struct mbuf *, u32 *, u32 *);
58 static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
60 static __inline void ixl_rx_discard(struct rx_ring *, int);
61 static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
65 #include <dev/netmap/if_ixl_netmap.h>
66 #endif /* DEV_NETMAP */
69 ** Multiqueue Transmit driver
73 ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
75 struct ixl_vsi *vsi = ifp->if_softc;
76 struct ixl_queue *que;
84 ** Which queue to use:
86 ** When doing RSS, map it to the same outbound
87 ** queue as the incoming flow would be mapped to.
88 ** If everything is setup correctly, it should be
89 ** the same bucket that the current CPU we're on is.
91 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
93 if (rss_hash2bucket(m->m_pkthdr.flowid,
94 M_HASHTYPE_GET(m), &bucket_id) == 0) {
95 i = bucket_id % vsi->num_queues;
98 i = m->m_pkthdr.flowid % vsi->num_queues;
100 i = curcpu % vsi->num_queues;
102 ** This may not be perfect, but until something
103 ** better comes along it will keep from scheduling
104 ** on stalled queues.
106 if (((1 << i) & vsi->active_queues) == 0)
107 i = ffsl(vsi->active_queues);
109 que = &vsi->queues[i];
112 err = drbr_enqueue(ifp, txr->br, m);
115 if (IXL_TX_TRYLOCK(txr)) {
116 ixl_mq_start_locked(ifp, txr);
119 taskqueue_enqueue(que->tq, &que->tx_task);
125 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
127 struct ixl_queue *que = txr->que;
128 struct ixl_vsi *vsi = que->vsi;
133 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
134 vsi->link_active == 0)
137 /* Process the transmit queue */
138 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
139 if ((err = ixl_xmit(que, &next)) != 0) {
141 drbr_advance(ifp, txr->br);
143 drbr_putback(ifp, txr->br, next);
146 drbr_advance(ifp, txr->br);
147 /* Send a copy of the frame to the BPF listener */
148 ETHER_BPF_MTAP(ifp, next);
149 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
153 if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
160 * Called from a taskqueue to drain queued transmit packets.
163 ixl_deferred_mq_start(void *arg, int pending)
165 struct ixl_queue *que = arg;
166 struct tx_ring *txr = &que->txr;
167 struct ixl_vsi *vsi = que->vsi;
168 struct ifnet *ifp = vsi->ifp;
171 if (!drbr_empty(ifp, txr->br))
172 ixl_mq_start_locked(ifp, txr);
177 ** Flush all queue ring buffers
180 ixl_qflush(struct ifnet *ifp)
182 struct ixl_vsi *vsi = ifp->if_softc;
184 for (int i = 0; i < vsi->num_queues; i++) {
185 struct ixl_queue *que = &vsi->queues[i];
186 struct tx_ring *txr = &que->txr;
189 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
197 ** Find mbuf chains passed to the driver
198 ** that are 'sparse', using more than 8
199 ** mbufs to deliver an mss-size chunk of data
202 ixl_tso_detect_sparse(struct mbuf *mp)
208 mss = mp->m_pkthdr.tso_segsz;
209 for (m = mp->m_next; m != NULL; m = m->m_next) {
214 if (m->m_next == NULL)
217 if (num > IXL_SPARSE_CHAIN)
224 /*********************************************************************
226 * This routine maps the mbufs to tx descriptors, allowing the
227 * TX engine to transmit the packets.
228 * - return 0 on success, positive on failure
230 **********************************************************************/
231 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
234 ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
236 struct ixl_vsi *vsi = que->vsi;
237 struct i40e_hw *hw = vsi->hw;
238 struct tx_ring *txr = &que->txr;
239 struct ixl_tx_buf *buf;
240 struct i40e_tx_desc *txd = NULL;
241 struct mbuf *m_head, *m;
242 int i, j, error, nsegs, maxsegs;
248 bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
255 * Important to capture the first descriptor
256 * used because it will contain the index of
257 * the one we tell the hardware to report back
259 first = txr->next_avail;
260 buf = &txr->buffers[first];
263 maxsegs = IXL_MAX_TX_SEGS;
265 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
266 /* Use larger mapping for TSO */
268 maxsegs = IXL_MAX_TSO_SEGS;
269 if (ixl_tso_detect_sparse(m_head)) {
270 m = m_defrag(m_head, M_NOWAIT);
281 * Map the packet for DMA.
283 error = bus_dmamap_load_mbuf_sg(tag, map,
284 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
286 if (error == EFBIG) {
289 m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
291 que->mbuf_defrag_failed++;
299 error = bus_dmamap_load_mbuf_sg(tag, map,
300 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
302 if (error == ENOMEM) {
305 } else if (error != 0) {
311 } else if (error == ENOMEM) {
314 } else if (error != 0) {
321 /* Make certain there are enough descriptors */
322 if (nsegs > txr->avail - 2) {
329 /* Set up the TSO/CSUM offload */
330 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
331 error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
336 cmd |= I40E_TX_DESC_CMD_ICRC;
337 /* Grab the VLAN tag */
338 if (m_head->m_flags & M_VLANTAG) {
339 cmd |= I40E_TX_DESC_CMD_IL2TAG1;
340 vtag = htole16(m_head->m_pkthdr.ether_vtag);
344 for (j = 0; j < nsegs; j++) {
347 buf = &txr->buffers[i];
348 buf->tag = tag; /* Keep track of the type tag */
350 seglen = segs[j].ds_len;
352 txd->buffer_addr = htole64(segs[j].ds_addr);
353 txd->cmd_type_offset_bsz =
354 htole64(I40E_TX_DESC_DTYPE_DATA
355 | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
356 | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
357 | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
358 | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
360 last = i; /* descriptor that will get completion IRQ */
362 if (++i == que->num_desc)
368 /* Set the last descriptor for report */
369 txd->cmd_type_offset_bsz |=
370 htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
374 buf->m_head = m_head;
375 /* Swap the dma map between the first and last descriptor */
376 txr->buffers[first].map = buf->map;
378 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
380 /* Set the index of the descriptor that will be marked done */
381 buf = &txr->buffers[first];
382 buf->eop_index = last;
384 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
387 * Advance the Transmit Descriptor Tail (Tdt), this tells the
388 * hardware that this frame is available to transmit.
390 ++txr->total_packets;
391 wr32(hw, txr->tail, i);
394 /* Mark outstanding work */
400 bus_dmamap_unload(tag, buf->map);
405 /*********************************************************************
407 * Allocate memory for tx_buffer structures. The tx_buffer stores all
408 * the information needed to transmit a packet on the wire. This is
409 * called only once at attach, setup is done every reset.
411 **********************************************************************/
413 ixl_allocate_tx_data(struct ixl_queue *que)
415 struct tx_ring *txr = &que->txr;
416 struct ixl_vsi *vsi = que->vsi;
417 device_t dev = vsi->dev;
418 struct ixl_tx_buf *buf;
422 * Setup DMA descriptor areas.
424 if ((error = bus_dma_tag_create(NULL, /* parent */
425 1, 0, /* alignment, bounds */
426 BUS_SPACE_MAXADDR, /* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 IXL_TSO_SIZE, /* maxsize */
430 IXL_MAX_TX_SEGS, /* nsegments */
431 PAGE_SIZE, /* maxsegsize */
434 NULL, /* lockfuncarg */
436 device_printf(dev,"Unable to allocate TX DMA tag\n");
440 /* Make a special tag for TSO */
441 if ((error = bus_dma_tag_create(NULL, /* parent */
442 1, 0, /* alignment, bounds */
443 BUS_SPACE_MAXADDR, /* lowaddr */
444 BUS_SPACE_MAXADDR, /* highaddr */
445 NULL, NULL, /* filter, filterarg */
446 IXL_TSO_SIZE, /* maxsize */
447 IXL_MAX_TSO_SEGS, /* nsegments */
448 PAGE_SIZE, /* maxsegsize */
451 NULL, /* lockfuncarg */
453 device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
458 (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
459 que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
460 device_printf(dev, "Unable to allocate tx_buffer memory\n");
465 /* Create the descriptor buffer default dma maps */
467 for (int i = 0; i < que->num_desc; i++, buf++) {
468 buf->tag = txr->tx_tag;
469 error = bus_dmamap_create(buf->tag, 0, &buf->map);
471 device_printf(dev, "Unable to create TX DMA map\n");
480 /*********************************************************************
482 * (Re)Initialize a queue transmit ring.
483 * - called by init, it clears the descriptor ring,
484 * and frees any stale mbufs
486 **********************************************************************/
488 ixl_init_tx_ring(struct ixl_queue *que)
490 struct tx_ring *txr = &que->txr;
491 struct ixl_tx_buf *buf;
493 struct netmap_adapter *na = NA(que->vsi->ifp);
494 struct netmap_slot *slot;
495 #endif /* DEV_NETMAP */
497 /* Clear the old ring contents */
501 * (under lock): if in netmap mode, do some consistency
502 * checks and set slot to entry 0 of the netmap ring.
504 slot = netmap_reset(na, NR_TX, que->me, 0);
505 #endif /* DEV_NETMAP */
507 bzero((void *)txr->base,
508 (sizeof(struct i40e_tx_desc)) * que->num_desc);
512 txr->next_to_clean = 0;
515 /* Initialize flow director */
516 txr->atr_rate = ixl_atr_rate;
520 /* Free any existing tx mbufs. */
522 for (int i = 0; i < que->num_desc; i++, buf++) {
523 if (buf->m_head != NULL) {
524 bus_dmamap_sync(buf->tag, buf->map,
525 BUS_DMASYNC_POSTWRITE);
526 bus_dmamap_unload(buf->tag, buf->map);
527 m_freem(buf->m_head);
532 * In netmap mode, set the map for the packet buffer.
533 * NOTE: Some drivers (not this one) also need to set
534 * the physical buffer address in the NIC ring.
535 * netmap_idx_n2k() maps a nic index, i, into the corresponding
536 * netmap slot index, si
539 int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
540 netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
542 #endif /* DEV_NETMAP */
543 /* Clear the EOP index */
547 /* Set number of descriptors available */
548 txr->avail = que->num_desc;
550 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
551 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
556 /*********************************************************************
558 * Free transmit ring related data structures.
560 **********************************************************************/
562 ixl_free_que_tx(struct ixl_queue *que)
564 struct tx_ring *txr = &que->txr;
565 struct ixl_tx_buf *buf;
567 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
569 for (int i = 0; i < que->num_desc; i++) {
570 buf = &txr->buffers[i];
571 if (buf->m_head != NULL) {
572 bus_dmamap_sync(buf->tag, buf->map,
573 BUS_DMASYNC_POSTWRITE);
574 bus_dmamap_unload(buf->tag,
576 m_freem(buf->m_head);
578 if (buf->map != NULL) {
579 bus_dmamap_destroy(buf->tag,
583 } else if (buf->map != NULL) {
584 bus_dmamap_unload(buf->tag,
586 bus_dmamap_destroy(buf->tag,
592 buf_ring_free(txr->br, M_DEVBUF);
593 if (txr->buffers != NULL) {
594 free(txr->buffers, M_DEVBUF);
597 if (txr->tx_tag != NULL) {
598 bus_dma_tag_destroy(txr->tx_tag);
601 if (txr->tso_tag != NULL) {
602 bus_dma_tag_destroy(txr->tso_tag);
606 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
610 /*********************************************************************
612 * Setup descriptor for hw offloads
614 **********************************************************************/
617 ixl_tx_setup_offload(struct ixl_queue *que,
618 struct mbuf *mp, u32 *cmd, u32 *off)
620 struct ether_vlan_header *eh;
622 struct ip *ip = NULL;
624 struct tcphdr *th = NULL;
628 int elen, ip_hlen = 0, tcp_hlen;
634 /* Set up the TSO context descriptor if required */
635 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
636 tso = ixl_tso_setup(que, mp);
644 * Determine where frame payload starts.
645 * Jump over vlan headers if already present,
646 * helpful for QinQ too.
648 eh = mtod(mp, struct ether_vlan_header *);
649 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
650 etype = ntohs(eh->evl_proto);
651 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
653 etype = ntohs(eh->evl_encap_proto);
654 elen = ETHER_HDR_LEN;
660 ip = (struct ip *)(mp->m_data + elen);
661 ip_hlen = ip->ip_hl << 2;
663 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
664 /* The IP checksum must be recalculated with TSO */
666 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
668 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
673 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
674 ip_hlen = sizeof(struct ip6_hdr);
675 ipproto = ip6->ip6_nxt;
676 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
677 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
684 *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
685 *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
689 tcp_hlen = th->th_off << 2;
690 if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
691 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
692 *off |= (tcp_hlen >> 2) <<
693 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
696 ixl_atr(que, th, etype);
700 if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
701 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
702 *off |= (sizeof(struct udphdr) >> 2) <<
703 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
708 if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
709 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
710 *off |= (sizeof(struct sctphdr) >> 2) <<
711 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
722 /**********************************************************************
724 * Setup context for hardware segmentation offload (TSO)
726 **********************************************************************/
728 ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
730 struct tx_ring *txr = &que->txr;
731 struct i40e_tx_context_desc *TXD;
732 struct ixl_tx_buf *buf;
733 u32 cmd, mss, type, tsolen;
735 int idx, elen, ip_hlen, tcp_hlen;
736 struct ether_vlan_header *eh;
743 #if defined(INET6) || defined(INET)
746 u64 type_cmd_tso_mss;
749 * Determine where frame payload starts.
750 * Jump over vlan headers if already present
752 eh = mtod(mp, struct ether_vlan_header *);
753 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
754 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
755 etype = eh->evl_proto;
757 elen = ETHER_HDR_LEN;
758 etype = eh->evl_encap_proto;
761 switch (ntohs(etype)) {
764 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
765 if (ip6->ip6_nxt != IPPROTO_TCP)
767 ip_hlen = sizeof(struct ip6_hdr);
768 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
769 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
770 tcp_hlen = th->th_off << 2;
775 ip = (struct ip *)(mp->m_data + elen);
776 if (ip->ip_p != IPPROTO_TCP)
779 ip_hlen = ip->ip_hl << 2;
780 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
781 th->th_sum = in_pseudo(ip->ip_src.s_addr,
782 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
783 tcp_hlen = th->th_off << 2;
787 printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
788 __func__, ntohs(etype));
792 /* Ensure we have at least the IP+TCP header in the first mbuf. */
793 if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
796 idx = txr->next_avail;
797 buf = &txr->buffers[idx];
798 TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
799 tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
801 type = I40E_TX_DESC_DTYPE_CONTEXT;
802 cmd = I40E_TX_CTX_DESC_TSO;
803 mss = mp->m_pkthdr.tso_segsz;
805 type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
806 ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
807 ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
808 ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
809 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
811 TXD->tunneling_params = htole32(0);
815 if (++idx == que->num_desc)
819 txr->next_avail = idx;
825 ** ixl_get_tx_head - Retrieve the value from the
826 ** location the HW records its HEAD index
829 ixl_get_tx_head(struct ixl_queue *que)
831 struct tx_ring *txr = &que->txr;
832 void *head = &txr->base[que->num_desc];
833 return LE32_TO_CPU(*(volatile __le32 *)head);
836 /**********************************************************************
838 * Examine each tx_buffer in the used queue. If the hardware is done
839 * processing the packet then free associated resources. The
840 * tx_buffer is put back on the free queue.
842 **********************************************************************/
844 ixl_txeof(struct ixl_queue *que)
846 struct tx_ring *txr = &que->txr;
847 u32 first, last, head, done, processed;
848 struct ixl_tx_buf *buf;
849 struct i40e_tx_desc *tx_desc, *eop_desc;
852 mtx_assert(&txr->mtx, MA_OWNED);
855 // XXX todo: implement moderation
856 if (netmap_tx_irq(que->vsi->ifp, que->me))
858 #endif /* DEF_NETMAP */
860 /* These are not the descriptors you seek, move along :) */
861 if (txr->avail == que->num_desc) {
867 first = txr->next_to_clean;
868 buf = &txr->buffers[first];
869 tx_desc = (struct i40e_tx_desc *)&txr->base[first];
870 last = buf->eop_index;
873 eop_desc = (struct i40e_tx_desc *)&txr->base[last];
875 /* Get the Head WB value */
876 head = ixl_get_tx_head(que);
879 ** Get the index of the first descriptor
880 ** BEYOND the EOP and call that 'done'.
881 ** I do this so the comparison in the
882 ** inner while loop below can be simple
884 if (++last == que->num_desc) last = 0;
887 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
888 BUS_DMASYNC_POSTREAD);
890 ** The HEAD index of the ring is written in a
891 ** defined location, this rather than a done bit
892 ** is what is used to keep track of what must be
895 while (first != head) {
896 /* We clean the range of the packet */
897 while (first != done) {
902 txr->bytes += /* for ITR adjustment */
903 buf->m_head->m_pkthdr.len;
904 txr->tx_bytes += /* for TX stats */
905 buf->m_head->m_pkthdr.len;
906 bus_dmamap_sync(buf->tag,
908 BUS_DMASYNC_POSTWRITE);
909 bus_dmamap_unload(buf->tag,
911 m_freem(buf->m_head);
917 if (++first == que->num_desc)
920 buf = &txr->buffers[first];
921 tx_desc = &txr->base[first];
924 /* See if there is more work now */
925 last = buf->eop_index;
927 eop_desc = &txr->base[last];
928 /* Get next done point */
929 if (++last == que->num_desc) last = 0;
934 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937 txr->next_to_clean = first;
941 ** Hang detection, we know there's
942 ** work outstanding or the first return
943 ** would have been taken, so indicate an
944 ** unsuccessful pass, in local_timer if
945 ** the value is too great the queue will
946 ** be considered hung. If anything has been
947 ** cleaned then reset the state.
949 if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
953 que->busy = 1; /* Note this turns off HUNG */
956 * If there are no pending descriptors, clear the timeout.
958 if (txr->avail == que->num_desc) {
966 /*********************************************************************
968 * Refresh mbuf buffers for RX descriptor rings
969 * - now keeps its own state so discards due to resource
970 * exhaustion are unnecessary, if an mbuf cannot be obtained
971 * it just returns, keeping its placeholder, thus it can simply
972 * be recalled to try again.
974 **********************************************************************/
976 ixl_refresh_mbufs(struct ixl_queue *que, int limit)
978 struct ixl_vsi *vsi = que->vsi;
979 struct rx_ring *rxr = &que->rxr;
980 bus_dma_segment_t hseg[1];
981 bus_dma_segment_t pseg[1];
982 struct ixl_rx_buf *buf;
983 struct mbuf *mh, *mp;
984 int i, j, nsegs, error;
985 bool refreshed = FALSE;
987 i = j = rxr->next_refresh;
988 /* Control the loop with one beyond */
989 if (++j == que->num_desc)
993 buf = &rxr->buffers[i];
994 if (rxr->hdr_split == FALSE)
997 if (buf->m_head == NULL) {
998 mh = m_gethdr(M_NOWAIT, MT_DATA);
1004 mh->m_pkthdr.len = mh->m_len = MHLEN;
1006 mh->m_flags |= M_PKTHDR;
1007 /* Get the memory mapping */
1008 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1009 buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
1011 printf("Refresh mbufs: hdr dmamap load"
1012 " failure - %d\n", error);
1018 bus_dmamap_sync(rxr->htag, buf->hmap,
1019 BUS_DMASYNC_PREREAD);
1020 rxr->base[i].read.hdr_addr =
1021 htole64(hseg[0].ds_addr);
1024 if (buf->m_pack == NULL) {
1025 mp = m_getjcl(M_NOWAIT, MT_DATA,
1026 M_PKTHDR, rxr->mbuf_sz);
1032 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1033 /* Get the memory mapping */
1034 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1035 buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
1037 printf("Refresh mbufs: payload dmamap load"
1038 " failure - %d\n", error);
1044 bus_dmamap_sync(rxr->ptag, buf->pmap,
1045 BUS_DMASYNC_PREREAD);
1046 rxr->base[i].read.pkt_addr =
1047 htole64(pseg[0].ds_addr);
1048 /* Used only when doing header split */
1049 rxr->base[i].read.hdr_addr = 0;
1052 /* Next is precalculated */
1054 rxr->next_refresh = i;
1055 if (++j == que->num_desc)
1059 if (refreshed) /* Update hardware tail index */
1060 wr32(vsi->hw, rxr->tail, rxr->next_refresh);
1065 /*********************************************************************
1067 * Allocate memory for rx_buffer structures. Since we use one
1068 * rx_buffer per descriptor, the maximum number of rx_buffer's
1069 * that we'll need is equal to the number of receive descriptors
1070 * that we've defined.
1072 **********************************************************************/
1074 ixl_allocate_rx_data(struct ixl_queue *que)
1076 struct rx_ring *rxr = &que->rxr;
1077 struct ixl_vsi *vsi = que->vsi;
1078 device_t dev = vsi->dev;
1079 struct ixl_rx_buf *buf;
1080 int i, bsize, error;
1082 bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
1083 if (!(rxr->buffers =
1084 (struct ixl_rx_buf *) malloc(bsize,
1085 M_DEVBUF, M_NOWAIT | M_ZERO))) {
1086 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1091 if ((error = bus_dma_tag_create(NULL, /* parent */
1092 1, 0, /* alignment, bounds */
1093 BUS_SPACE_MAXADDR, /* lowaddr */
1094 BUS_SPACE_MAXADDR, /* highaddr */
1095 NULL, NULL, /* filter, filterarg */
1096 MSIZE, /* maxsize */
1098 MSIZE, /* maxsegsize */
1100 NULL, /* lockfunc */
1101 NULL, /* lockfuncarg */
1103 device_printf(dev, "Unable to create RX DMA htag\n");
1107 if ((error = bus_dma_tag_create(NULL, /* parent */
1108 1, 0, /* alignment, bounds */
1109 BUS_SPACE_MAXADDR, /* lowaddr */
1110 BUS_SPACE_MAXADDR, /* highaddr */
1111 NULL, NULL, /* filter, filterarg */
1112 MJUM16BYTES, /* maxsize */
1114 MJUM16BYTES, /* maxsegsize */
1116 NULL, /* lockfunc */
1117 NULL, /* lockfuncarg */
1119 device_printf(dev, "Unable to create RX DMA ptag\n");
1123 for (i = 0; i < que->num_desc; i++) {
1124 buf = &rxr->buffers[i];
1125 error = bus_dmamap_create(rxr->htag,
1126 BUS_DMA_NOWAIT, &buf->hmap);
1128 device_printf(dev, "Unable to create RX head map\n");
1131 error = bus_dmamap_create(rxr->ptag,
1132 BUS_DMA_NOWAIT, &buf->pmap);
1134 device_printf(dev, "Unable to create RX pkt map\n");
1143 /*********************************************************************
1145 * (Re)Initialize the queue receive ring and its buffers.
1147 **********************************************************************/
1149 ixl_init_rx_ring(struct ixl_queue *que)
1151 struct rx_ring *rxr = &que->rxr;
1152 struct ixl_vsi *vsi = que->vsi;
1153 #if defined(INET6) || defined(INET)
1154 struct ifnet *ifp = vsi->ifp;
1155 struct lro_ctrl *lro = &rxr->lro;
1157 struct ixl_rx_buf *buf;
1158 bus_dma_segment_t pseg[1], hseg[1];
1159 int rsize, nsegs, error = 0;
1161 struct netmap_adapter *na = NA(que->vsi->ifp);
1162 struct netmap_slot *slot;
1163 #endif /* DEV_NETMAP */
1167 /* same as in ixl_init_tx_ring() */
1168 slot = netmap_reset(na, NR_RX, que->me, 0);
1169 #endif /* DEV_NETMAP */
1170 /* Clear the ring contents */
1171 rsize = roundup2(que->num_desc *
1172 sizeof(union i40e_rx_desc), DBA_ALIGN);
1173 bzero((void *)rxr->base, rsize);
1174 /* Cleanup any existing buffers */
1175 for (int i = 0; i < que->num_desc; i++) {
1176 buf = &rxr->buffers[i];
1177 if (buf->m_head != NULL) {
1178 bus_dmamap_sync(rxr->htag, buf->hmap,
1179 BUS_DMASYNC_POSTREAD);
1180 bus_dmamap_unload(rxr->htag, buf->hmap);
1181 buf->m_head->m_flags |= M_PKTHDR;
1182 m_freem(buf->m_head);
1184 if (buf->m_pack != NULL) {
1185 bus_dmamap_sync(rxr->ptag, buf->pmap,
1186 BUS_DMASYNC_POSTREAD);
1187 bus_dmamap_unload(rxr->ptag, buf->pmap);
1188 buf->m_pack->m_flags |= M_PKTHDR;
1189 m_freem(buf->m_pack);
1195 /* header split is off */
1196 rxr->hdr_split = FALSE;
1198 /* Now replenish the mbufs */
1199 for (int j = 0; j != que->num_desc; ++j) {
1200 struct mbuf *mh, *mp;
1202 buf = &rxr->buffers[j];
1205 * In netmap mode, fill the map and set the buffer
1206 * address in the NIC ring, considering the offset
1207 * between the netmap and NIC rings (see comment in
1208 * ixgbe_setup_transmit_ring() ). No need to allocate
1209 * an mbuf, so end the block with a continue;
1212 int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
1216 addr = PNMB(na, slot + sj, &paddr);
1217 netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
1218 /* Update descriptor and the cached value */
1219 rxr->base[j].read.pkt_addr = htole64(paddr);
1220 rxr->base[j].read.hdr_addr = 0;
1223 #endif /* DEV_NETMAP */
1226 ** Don't allocate mbufs if not
1227 ** doing header split, its wasteful
1229 if (rxr->hdr_split == FALSE)
1232 /* First the header */
1233 buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
1234 if (buf->m_head == NULL) {
1238 m_adj(buf->m_head, ETHER_ALIGN);
1240 mh->m_len = mh->m_pkthdr.len = MHLEN;
1241 mh->m_flags |= M_PKTHDR;
1242 /* Get the memory mapping */
1243 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1244 buf->hmap, buf->m_head, hseg,
1245 &nsegs, BUS_DMA_NOWAIT);
1246 if (error != 0) /* Nothing elegant to do here */
1248 bus_dmamap_sync(rxr->htag,
1249 buf->hmap, BUS_DMASYNC_PREREAD);
1250 /* Update descriptor */
1251 rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
1254 /* Now the payload cluster */
1255 buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
1256 M_PKTHDR, rxr->mbuf_sz);
1257 if (buf->m_pack == NULL) {
1262 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1263 /* Get the memory mapping */
1264 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1265 buf->pmap, mp, pseg,
1266 &nsegs, BUS_DMA_NOWAIT);
1269 bus_dmamap_sync(rxr->ptag,
1270 buf->pmap, BUS_DMASYNC_PREREAD);
1271 /* Update descriptor */
1272 rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
1273 rxr->base[j].read.hdr_addr = 0;
1277 /* Setup our descriptor indices */
1278 rxr->next_check = 0;
1279 rxr->next_refresh = 0;
1280 rxr->lro_enabled = FALSE;
1283 rxr->discard = FALSE;
1285 wr32(vsi->hw, rxr->tail, que->num_desc - 1);
1288 #if defined(INET6) || defined(INET)
1290 ** Now set up the LRO interface:
1292 if (ifp->if_capenable & IFCAP_LRO) {
1293 int err = tcp_lro_init(lro);
1295 if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
1298 INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
1299 rxr->lro_enabled = TRUE;
1300 lro->ifp = vsi->ifp;
1304 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1313 /*********************************************************************
1315 * Free station receive ring data structures
1317 **********************************************************************/
1319 ixl_free_que_rx(struct ixl_queue *que)
1321 struct rx_ring *rxr = &que->rxr;
1322 struct ixl_rx_buf *buf;
1324 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
1326 /* Cleanup any existing buffers */
1327 if (rxr->buffers != NULL) {
1328 for (int i = 0; i < que->num_desc; i++) {
1329 buf = &rxr->buffers[i];
1330 if (buf->m_head != NULL) {
1331 bus_dmamap_sync(rxr->htag, buf->hmap,
1332 BUS_DMASYNC_POSTREAD);
1333 bus_dmamap_unload(rxr->htag, buf->hmap);
1334 buf->m_head->m_flags |= M_PKTHDR;
1335 m_freem(buf->m_head);
1337 if (buf->m_pack != NULL) {
1338 bus_dmamap_sync(rxr->ptag, buf->pmap,
1339 BUS_DMASYNC_POSTREAD);
1340 bus_dmamap_unload(rxr->ptag, buf->pmap);
1341 buf->m_pack->m_flags |= M_PKTHDR;
1342 m_freem(buf->m_pack);
1346 if (buf->hmap != NULL) {
1347 bus_dmamap_destroy(rxr->htag, buf->hmap);
1350 if (buf->pmap != NULL) {
1351 bus_dmamap_destroy(rxr->ptag, buf->pmap);
1355 if (rxr->buffers != NULL) {
1356 free(rxr->buffers, M_DEVBUF);
1357 rxr->buffers = NULL;
1361 if (rxr->htag != NULL) {
1362 bus_dma_tag_destroy(rxr->htag);
1365 if (rxr->ptag != NULL) {
1366 bus_dma_tag_destroy(rxr->ptag);
1370 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
1374 static __inline void
1375 ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
1378 #if defined(INET6) || defined(INET)
1380 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
1381 * should be computed by hardware. Also it should not have VLAN tag in
1384 if (rxr->lro_enabled &&
1385 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1386 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1387 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1389 * Send to the stack if:
1390 ** - LRO not enabled, or
1391 ** - no LRO resources, or
1392 ** - lro enqueue fails
1394 if (rxr->lro.lro_cnt != 0)
1395 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1400 (*ifp->if_input)(ifp, m);
1405 static __inline void
1406 ixl_rx_discard(struct rx_ring *rxr, int i)
1408 struct ixl_rx_buf *rbuf;
1410 rbuf = &rxr->buffers[i];
1412 if (rbuf->fmp != NULL) {/* Partial chain ? */
1413 rbuf->fmp->m_flags |= M_PKTHDR;
1419 ** With advanced descriptors the writeback
1420 ** clobbers the buffer addrs, so its easier
1421 ** to just free the existing mbufs and take
1422 ** the normal refresh path to get new buffers
1426 m_free(rbuf->m_head);
1427 rbuf->m_head = NULL;
1431 m_free(rbuf->m_pack);
1432 rbuf->m_pack = NULL;
1440 ** i40e_ptype_to_hash: parse the packet type
1441 ** to determine the appropriate hash.
1444 ixl_ptype_to_hash(u8 ptype)
1446 struct i40e_rx_ptype_decoded decoded;
1449 decoded = decode_rx_desc_ptype(ptype);
1450 ex = decoded.outer_frag;
1453 return M_HASHTYPE_OPAQUE;
1455 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
1456 return M_HASHTYPE_OPAQUE;
1458 /* Note: anything that gets to this point is IP */
1459 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
1460 switch (decoded.inner_prot) {
1461 case I40E_RX_PTYPE_INNER_PROT_TCP:
1463 return M_HASHTYPE_RSS_TCP_IPV6_EX;
1465 return M_HASHTYPE_RSS_TCP_IPV6;
1466 case I40E_RX_PTYPE_INNER_PROT_UDP:
1468 return M_HASHTYPE_RSS_UDP_IPV6_EX;
1470 return M_HASHTYPE_RSS_UDP_IPV6;
1473 return M_HASHTYPE_RSS_IPV6_EX;
1475 return M_HASHTYPE_RSS_IPV6;
1478 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
1479 switch (decoded.inner_prot) {
1480 case I40E_RX_PTYPE_INNER_PROT_TCP:
1481 return M_HASHTYPE_RSS_TCP_IPV4;
1482 case I40E_RX_PTYPE_INNER_PROT_UDP:
1484 return M_HASHTYPE_RSS_UDP_IPV4_EX;
1486 return M_HASHTYPE_RSS_UDP_IPV4;
1488 return M_HASHTYPE_RSS_IPV4;
1491 /* We should never get here!! */
1492 return M_HASHTYPE_OPAQUE;
1496 /*********************************************************************
1498 * This routine executes in interrupt context. It replenishes
1499 * the mbufs in the descriptor and sends data which has been
1500 * dma'ed into host memory to upper layer.
1502 * We loop at most count times if count is > 0, or until done if
1505 * Return TRUE for more work, FALSE for all clean.
1506 *********************************************************************/
1508 ixl_rxeof(struct ixl_queue *que, int count)
1510 struct ixl_vsi *vsi = que->vsi;
1511 struct rx_ring *rxr = &que->rxr;
1512 struct ifnet *ifp = vsi->ifp;
1513 #if defined(INET6) || defined(INET)
1514 struct lro_ctrl *lro = &rxr->lro;
1515 struct lro_entry *queued;
1517 int i, nextp, processed = 0;
1518 union i40e_rx_desc *cur;
1519 struct ixl_rx_buf *rbuf, *nbuf;
1525 if (netmap_rx_irq(ifp, que->me, &count)) {
1529 #endif /* DEV_NETMAP */
1531 for (i = rxr->next_check; count != 0;) {
1532 struct mbuf *sendmp, *mh, *mp;
1533 u32 rsc, status, error;
1534 u16 hlen, plen, vtag;
1539 /* Sync the ring. */
1540 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1541 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1543 cur = &rxr->base[i];
1544 qword = le64toh(cur->wb.qword1.status_error_len);
1545 status = (qword & I40E_RXD_QW1_STATUS_MASK)
1546 >> I40E_RXD_QW1_STATUS_SHIFT;
1547 error = (qword & I40E_RXD_QW1_ERROR_MASK)
1548 >> I40E_RXD_QW1_ERROR_SHIFT;
1549 plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
1550 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1551 hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
1552 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1553 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
1554 >> I40E_RXD_QW1_PTYPE_SHIFT;
1556 if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
1560 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1567 cur->wb.qword1.status_error_len = 0;
1568 rbuf = &rxr->buffers[i];
1571 eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
1572 if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
1573 vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
1578 ** Make sure bad packets are discarded,
1579 ** note that only EOP descriptor has valid
1582 if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1584 ixl_rx_discard(rxr, i);
1588 /* Prefetch the next buffer */
1591 if (nextp == que->num_desc)
1593 nbuf = &rxr->buffers[nextp];
1598 ** The header mbuf is ONLY used when header
1599 ** split is enabled, otherwise we get normal
1600 ** behavior, ie, both header and payload
1601 ** are DMA'd into the payload buffer.
1603 ** Rather than using the fmp/lmp global pointers
1604 ** we now keep the head of a packet chain in the
1605 ** buffer struct and pass this along from one
1606 ** descriptor to the next, until we get EOP.
1608 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
1609 if (hlen > IXL_RX_HDR)
1612 mh->m_flags |= M_PKTHDR;
1614 mh->m_pkthdr.len = mh->m_len;
1615 /* Null buf pointer so it is refreshed */
1616 rbuf->m_head = NULL;
1618 ** Check the payload length, this
1619 ** could be zero if its a small
1625 mp->m_flags &= ~M_PKTHDR;
1627 mh->m_pkthdr.len += mp->m_len;
1628 /* Null buf pointer so it is refreshed */
1629 rbuf->m_pack = NULL;
1633 ** Now create the forward
1634 ** chain so when complete
1638 /* stash the chain head */
1640 /* Make forward chain */
1642 mp->m_next = nbuf->m_pack;
1644 mh->m_next = nbuf->m_pack;
1646 /* Singlet, prepare to send */
1649 sendmp->m_pkthdr.ether_vtag = vtag;
1650 sendmp->m_flags |= M_VLANTAG;
1655 ** Either no header split, or a
1656 ** secondary piece of a fragmented
1661 ** See if there is a stored head
1662 ** that determines what we are
1665 rbuf->m_pack = rbuf->fmp = NULL;
1667 if (sendmp != NULL) /* secondary frag */
1668 sendmp->m_pkthdr.len += mp->m_len;
1670 /* first desc of a non-ps chain */
1672 sendmp->m_flags |= M_PKTHDR;
1673 sendmp->m_pkthdr.len = mp->m_len;
1675 sendmp->m_pkthdr.ether_vtag = vtag;
1676 sendmp->m_flags |= M_VLANTAG;
1679 /* Pass the head pointer on */
1683 mp->m_next = nbuf->m_pack;
1687 /* Sending this frame? */
1689 sendmp->m_pkthdr.rcvif = ifp;
1692 rxr->rx_bytes += sendmp->m_pkthdr.len;
1693 /* capture data for dynamic ITR adjustment */
1695 rxr->bytes += sendmp->m_pkthdr.len;
1696 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1697 ixl_rx_checksum(sendmp, status, error, ptype);
1699 sendmp->m_pkthdr.flowid =
1700 le32toh(cur->wb.qword0.hi_dword.rss);
1701 M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
1703 sendmp->m_pkthdr.flowid = que->msix;
1704 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1708 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1709 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1711 /* Advance our pointers to the next descriptor. */
1712 if (++i == que->num_desc)
1715 /* Now send to the stack or do LRO */
1716 if (sendmp != NULL) {
1717 rxr->next_check = i;
1718 ixl_rx_input(rxr, ifp, sendmp, ptype);
1719 i = rxr->next_check;
1722 /* Every 8 descriptors we go to refresh mbufs */
1723 if (processed == 8) {
1724 ixl_refresh_mbufs(que, i);
1729 /* Refresh any remaining buf structs */
1730 if (ixl_rx_unrefreshed(que))
1731 ixl_refresh_mbufs(que, i);
1733 rxr->next_check = i;
1735 #if defined(INET6) || defined(INET)
1737 * Flush any outstanding LRO work
1739 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1740 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1741 tcp_lro_flush(lro, queued);
1750 /*********************************************************************
1752 * Verify that the hardware indicated that the checksum is valid.
1753 * Inform the stack about the status of checksum so that stack
1754 * doesn't spend time verifying the checksum.
1756 *********************************************************************/
1758 ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
1760 struct i40e_rx_ptype_decoded decoded;
1762 decoded = decode_rx_desc_ptype(ptype);
1765 if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1766 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
1767 mp->m_pkthdr.csum_flags = 0;
1771 /* IPv6 with extension headers likely have bad csum */
1772 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1773 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1775 (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
1776 mp->m_pkthdr.csum_flags = 0;
1781 /* IP Checksum Good */
1782 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1783 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1785 if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
1786 mp->m_pkthdr.csum_flags |=
1787 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1788 mp->m_pkthdr.csum_data |= htons(0xffff);
1793 #if __FreeBSD_version >= 1100000
1795 ixl_get_counter(if_t ifp, ift_counter cnt)
1797 struct ixl_vsi *vsi;
1799 vsi = if_getsoftc(ifp);
1802 case IFCOUNTER_IPACKETS:
1803 return (vsi->ipackets);
1804 case IFCOUNTER_IERRORS:
1805 return (vsi->ierrors);
1806 case IFCOUNTER_OPACKETS:
1807 return (vsi->opackets);
1808 case IFCOUNTER_OERRORS:
1809 return (vsi->oerrors);
1810 case IFCOUNTER_COLLISIONS:
1811 /* Collisions are by standard impossible in 40G/10G Ethernet */
1813 case IFCOUNTER_IBYTES:
1814 return (vsi->ibytes);
1815 case IFCOUNTER_OBYTES:
1816 return (vsi->obytes);
1817 case IFCOUNTER_IMCASTS:
1818 return (vsi->imcasts);
1819 case IFCOUNTER_OMCASTS:
1820 return (vsi->omcasts);
1821 case IFCOUNTER_IQDROPS:
1822 return (vsi->iqdrops);
1823 case IFCOUNTER_OQDROPS:
1824 return (vsi->oqdrops);
1825 case IFCOUNTER_NOPROTO:
1826 return (vsi->noproto);
1828 return (if_get_counter_default(ifp, cnt));