1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** IXL driver TX/RX Routines:
37 ** This was seperated to allow usage by
38 ** both the BASE and the VF drivers.
41 #ifndef IXL_STANDALONE_BUILD
43 #include "opt_inet6.h"
50 #include <net/rss_config.h>
53 /* Local Prototypes */
54 static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
55 static void ixl_refresh_mbufs(struct ixl_queue *, int);
56 static int ixl_xmit(struct ixl_queue *, struct mbuf **);
57 static int ixl_tx_setup_offload(struct ixl_queue *,
58 struct mbuf *, u32 *, u32 *);
59 static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
61 static __inline void ixl_rx_discard(struct rx_ring *, int);
62 static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
66 #include <dev/netmap/if_ixl_netmap.h>
67 #endif /* DEV_NETMAP */
70 ** Multiqueue Transmit driver
73 ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
75 struct ixl_vsi *vsi = ifp->if_softc;
76 struct ixl_queue *que;
84 ** Which queue to use:
86 ** When doing RSS, map it to the same outbound
87 ** queue as the incoming flow would be mapped to.
88 ** If everything is setup correctly, it should be
89 ** the same bucket that the current CPU we're on is.
91 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
93 if (rss_hash2bucket(m->m_pkthdr.flowid,
94 M_HASHTYPE_GET(m), &bucket_id) == 0) {
95 i = bucket_id % vsi->num_queues;
98 i = m->m_pkthdr.flowid % vsi->num_queues;
100 i = curcpu % vsi->num_queues;
102 ** This may not be perfect, but until something
103 ** better comes along it will keep from scheduling
104 ** on stalled queues.
106 if (((1 << i) & vsi->active_queues) == 0)
107 i = ffsl(vsi->active_queues);
109 que = &vsi->queues[i];
112 err = drbr_enqueue(ifp, txr->br, m);
115 if (IXL_TX_TRYLOCK(txr)) {
116 ixl_mq_start_locked(ifp, txr);
119 taskqueue_enqueue(que->tq, &que->tx_task);
125 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
127 struct ixl_queue *que = txr->que;
128 struct ixl_vsi *vsi = que->vsi;
133 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
134 vsi->link_active == 0)
137 /* Process the transmit queue */
138 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
139 if ((err = ixl_xmit(que, &next)) != 0) {
141 drbr_advance(ifp, txr->br);
143 drbr_putback(ifp, txr->br, next);
146 drbr_advance(ifp, txr->br);
147 /* Send a copy of the frame to the BPF listener */
148 ETHER_BPF_MTAP(ifp, next);
149 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
153 if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
160 * Called from a taskqueue to drain queued transmit packets.
163 ixl_deferred_mq_start(void *arg, int pending)
165 struct ixl_queue *que = arg;
166 struct tx_ring *txr = &que->txr;
167 struct ixl_vsi *vsi = que->vsi;
168 struct ifnet *ifp = vsi->ifp;
171 if (!drbr_empty(ifp, txr->br))
172 ixl_mq_start_locked(ifp, txr);
177 ** Flush all queue ring buffers
180 ixl_qflush(struct ifnet *ifp)
182 struct ixl_vsi *vsi = ifp->if_softc;
184 for (int i = 0; i < vsi->num_queues; i++) {
185 struct ixl_queue *que = &vsi->queues[i];
186 struct tx_ring *txr = &que->txr;
189 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
197 ** Find mbuf chains passed to the driver
198 ** that are 'sparse', using more than 8
199 ** mbufs to deliver an mss-size chunk of data
202 ixl_tso_detect_sparse(struct mbuf *mp)
208 mss = mp->m_pkthdr.tso_segsz;
209 for (m = mp->m_next; m != NULL; m = m->m_next) {
214 if (m->m_next == NULL)
217 if (num > IXL_SPARSE_CHAIN)
224 /*********************************************************************
226 * This routine maps the mbufs to tx descriptors, allowing the
227 * TX engine to transmit the packets.
228 * - return 0 on success, positive on failure
230 **********************************************************************/
231 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
234 ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
236 struct ixl_vsi *vsi = que->vsi;
237 struct i40e_hw *hw = vsi->hw;
238 struct tx_ring *txr = &que->txr;
239 struct ixl_tx_buf *buf;
240 struct i40e_tx_desc *txd = NULL;
241 struct mbuf *m_head, *m;
242 int i, j, error, nsegs, maxsegs;
248 bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
255 * Important to capture the first descriptor
256 * used because it will contain the index of
257 * the one we tell the hardware to report back
259 first = txr->next_avail;
260 buf = &txr->buffers[first];
263 maxsegs = IXL_MAX_TX_SEGS;
265 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
266 /* Use larger mapping for TSO */
268 maxsegs = IXL_MAX_TSO_SEGS;
269 if (ixl_tso_detect_sparse(m_head)) {
270 m = m_defrag(m_head, M_NOWAIT);
281 * Map the packet for DMA.
283 error = bus_dmamap_load_mbuf_sg(tag, map,
284 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
286 if (error == EFBIG) {
289 m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
291 que->mbuf_defrag_failed++;
299 error = bus_dmamap_load_mbuf_sg(tag, map,
300 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
302 if (error == ENOMEM) {
305 } else if (error != 0) {
311 } else if (error == ENOMEM) {
314 } else if (error != 0) {
321 /* Make certain there are enough descriptors */
322 if (nsegs > txr->avail - 2) {
329 /* Set up the TSO/CSUM offload */
330 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
331 error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
336 cmd |= I40E_TX_DESC_CMD_ICRC;
337 /* Grab the VLAN tag */
338 if (m_head->m_flags & M_VLANTAG) {
339 cmd |= I40E_TX_DESC_CMD_IL2TAG1;
340 vtag = htole16(m_head->m_pkthdr.ether_vtag);
344 for (j = 0; j < nsegs; j++) {
347 buf = &txr->buffers[i];
348 buf->tag = tag; /* Keep track of the type tag */
350 seglen = segs[j].ds_len;
352 txd->buffer_addr = htole64(segs[j].ds_addr);
353 txd->cmd_type_offset_bsz =
354 htole64(I40E_TX_DESC_DTYPE_DATA
355 | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
356 | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
357 | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
358 | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
360 last = i; /* descriptor that will get completion IRQ */
362 if (++i == que->num_desc)
368 /* Set the last descriptor for report */
369 txd->cmd_type_offset_bsz |=
370 htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
374 buf->m_head = m_head;
375 /* Swap the dma map between the first and last descriptor */
376 txr->buffers[first].map = buf->map;
378 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
380 /* Set the index of the descriptor that will be marked done */
381 buf = &txr->buffers[first];
382 buf->eop_index = last;
384 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
387 * Advance the Transmit Descriptor Tail (Tdt), this tells the
388 * hardware that this frame is available to transmit.
390 ++txr->total_packets;
391 wr32(hw, txr->tail, i);
394 /* Mark outstanding work */
400 bus_dmamap_unload(tag, buf->map);
405 /*********************************************************************
407 * Allocate memory for tx_buffer structures. The tx_buffer stores all
408 * the information needed to transmit a packet on the wire. This is
409 * called only once at attach, setup is done every reset.
411 **********************************************************************/
413 ixl_allocate_tx_data(struct ixl_queue *que)
415 struct tx_ring *txr = &que->txr;
416 struct ixl_vsi *vsi = que->vsi;
417 device_t dev = vsi->dev;
418 struct ixl_tx_buf *buf;
422 * Setup DMA descriptor areas.
424 if ((error = bus_dma_tag_create(NULL, /* parent */
425 1, 0, /* alignment, bounds */
426 BUS_SPACE_MAXADDR, /* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 IXL_TSO_SIZE, /* maxsize */
430 IXL_MAX_TX_SEGS, /* nsegments */
431 PAGE_SIZE, /* maxsegsize */
434 NULL, /* lockfuncarg */
436 device_printf(dev,"Unable to allocate TX DMA tag\n");
440 /* Make a special tag for TSO */
441 if ((error = bus_dma_tag_create(NULL, /* parent */
442 1, 0, /* alignment, bounds */
443 BUS_SPACE_MAXADDR, /* lowaddr */
444 BUS_SPACE_MAXADDR, /* highaddr */
445 NULL, NULL, /* filter, filterarg */
446 IXL_TSO_SIZE, /* maxsize */
447 IXL_MAX_TSO_SEGS, /* nsegments */
448 PAGE_SIZE, /* maxsegsize */
451 NULL, /* lockfuncarg */
453 device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
458 (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
459 que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
460 device_printf(dev, "Unable to allocate tx_buffer memory\n");
465 /* Create the descriptor buffer default dma maps */
467 for (int i = 0; i < que->num_desc; i++, buf++) {
468 buf->tag = txr->tx_tag;
469 error = bus_dmamap_create(buf->tag, 0, &buf->map);
471 device_printf(dev, "Unable to create TX DMA map\n");
480 /*********************************************************************
482 * (Re)Initialize a queue transmit ring.
483 * - called by init, it clears the descriptor ring,
484 * and frees any stale mbufs
486 **********************************************************************/
488 ixl_init_tx_ring(struct ixl_queue *que)
491 struct netmap_adapter *na = NA(que->vsi->ifp);
492 struct netmap_slot *slot;
493 #endif /* DEV_NETMAP */
494 struct tx_ring *txr = &que->txr;
495 struct ixl_tx_buf *buf;
497 /* Clear the old ring contents */
502 * (under lock): if in netmap mode, do some consistency
503 * checks and set slot to entry 0 of the netmap ring.
505 slot = netmap_reset(na, NR_TX, que->me, 0);
506 #endif /* DEV_NETMAP */
508 bzero((void *)txr->base,
509 (sizeof(struct i40e_tx_desc)) * que->num_desc);
513 txr->next_to_clean = 0;
516 /* Initialize flow director */
517 txr->atr_rate = ixl_atr_rate;
521 /* Free any existing tx mbufs. */
523 for (int i = 0; i < que->num_desc; i++, buf++) {
524 if (buf->m_head != NULL) {
525 bus_dmamap_sync(buf->tag, buf->map,
526 BUS_DMASYNC_POSTWRITE);
527 bus_dmamap_unload(buf->tag, buf->map);
528 m_freem(buf->m_head);
533 * In netmap mode, set the map for the packet buffer.
534 * NOTE: Some drivers (not this one) also need to set
535 * the physical buffer address in the NIC ring.
536 * netmap_idx_n2k() maps a nic index, i, into the corresponding
537 * netmap slot index, si
540 int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
541 netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
543 #endif /* DEV_NETMAP */
544 /* Clear the EOP index */
548 /* Set number of descriptors available */
549 txr->avail = que->num_desc;
551 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
552 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
557 /*********************************************************************
559 * Free transmit ring related data structures.
561 **********************************************************************/
563 ixl_free_que_tx(struct ixl_queue *que)
565 struct tx_ring *txr = &que->txr;
566 struct ixl_tx_buf *buf;
568 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
570 for (int i = 0; i < que->num_desc; i++) {
571 buf = &txr->buffers[i];
572 if (buf->m_head != NULL) {
573 bus_dmamap_sync(buf->tag, buf->map,
574 BUS_DMASYNC_POSTWRITE);
575 bus_dmamap_unload(buf->tag,
577 m_freem(buf->m_head);
579 if (buf->map != NULL) {
580 bus_dmamap_destroy(buf->tag,
584 } else if (buf->map != NULL) {
585 bus_dmamap_unload(buf->tag,
587 bus_dmamap_destroy(buf->tag,
593 buf_ring_free(txr->br, M_DEVBUF);
594 if (txr->buffers != NULL) {
595 free(txr->buffers, M_DEVBUF);
598 if (txr->tx_tag != NULL) {
599 bus_dma_tag_destroy(txr->tx_tag);
602 if (txr->tso_tag != NULL) {
603 bus_dma_tag_destroy(txr->tso_tag);
607 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
611 /*********************************************************************
613 * Setup descriptor for hw offloads
615 **********************************************************************/
618 ixl_tx_setup_offload(struct ixl_queue *que,
619 struct mbuf *mp, u32 *cmd, u32 *off)
621 struct ether_vlan_header *eh;
623 struct ip *ip = NULL;
625 struct tcphdr *th = NULL;
629 int elen, ip_hlen = 0, tcp_hlen;
635 /* Set up the TSO context descriptor if required */
636 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
637 tso = ixl_tso_setup(que, mp);
645 * Determine where frame payload starts.
646 * Jump over vlan headers if already present,
647 * helpful for QinQ too.
649 eh = mtod(mp, struct ether_vlan_header *);
650 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
651 etype = ntohs(eh->evl_proto);
652 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
654 etype = ntohs(eh->evl_encap_proto);
655 elen = ETHER_HDR_LEN;
661 ip = (struct ip *)(mp->m_data + elen);
662 ip_hlen = ip->ip_hl << 2;
664 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
665 /* The IP checksum must be recalculated with TSO */
667 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
669 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
674 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
675 ip_hlen = sizeof(struct ip6_hdr);
676 ipproto = ip6->ip6_nxt;
677 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
678 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
685 *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
686 *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
690 tcp_hlen = th->th_off << 2;
691 if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
692 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
693 *off |= (tcp_hlen >> 2) <<
694 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
697 ixl_atr(que, th, etype);
701 if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
702 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
703 *off |= (sizeof(struct udphdr) >> 2) <<
704 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
709 if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
710 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
711 *off |= (sizeof(struct sctphdr) >> 2) <<
712 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
723 /**********************************************************************
725 * Setup context for hardware segmentation offload (TSO)
727 **********************************************************************/
729 ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
731 struct tx_ring *txr = &que->txr;
732 struct i40e_tx_context_desc *TXD;
733 struct ixl_tx_buf *buf;
734 u32 cmd, mss, type, tsolen;
736 int idx, elen, ip_hlen, tcp_hlen;
737 struct ether_vlan_header *eh;
744 #if defined(INET6) || defined(INET)
747 u64 type_cmd_tso_mss;
750 * Determine where frame payload starts.
751 * Jump over vlan headers if already present
753 eh = mtod(mp, struct ether_vlan_header *);
754 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
755 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
756 etype = eh->evl_proto;
758 elen = ETHER_HDR_LEN;
759 etype = eh->evl_encap_proto;
762 switch (ntohs(etype)) {
765 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
766 if (ip6->ip6_nxt != IPPROTO_TCP)
768 ip_hlen = sizeof(struct ip6_hdr);
769 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
770 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
771 tcp_hlen = th->th_off << 2;
776 ip = (struct ip *)(mp->m_data + elen);
777 if (ip->ip_p != IPPROTO_TCP)
780 ip_hlen = ip->ip_hl << 2;
781 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
782 th->th_sum = in_pseudo(ip->ip_src.s_addr,
783 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
784 tcp_hlen = th->th_off << 2;
788 printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
789 __func__, ntohs(etype));
793 /* Ensure we have at least the IP+TCP header in the first mbuf. */
794 if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
797 idx = txr->next_avail;
798 buf = &txr->buffers[idx];
799 TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
800 tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
802 type = I40E_TX_DESC_DTYPE_CONTEXT;
803 cmd = I40E_TX_CTX_DESC_TSO;
804 mss = mp->m_pkthdr.tso_segsz;
806 type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
807 ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
808 ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
809 ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
810 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
812 TXD->tunneling_params = htole32(0);
816 if (++idx == que->num_desc)
820 txr->next_avail = idx;
826 ** ixl_get_tx_head - Retrieve the value from the
827 ** location the HW records its HEAD index
830 ixl_get_tx_head(struct ixl_queue *que)
832 struct tx_ring *txr = &que->txr;
833 void *head = &txr->base[que->num_desc];
834 return LE32_TO_CPU(*(volatile __le32 *)head);
837 /**********************************************************************
839 * Examine each tx_buffer in the used queue. If the hardware is done
840 * processing the packet then free associated resources. The
841 * tx_buffer is put back on the free queue.
843 **********************************************************************/
845 ixl_txeof(struct ixl_queue *que)
847 struct tx_ring *txr = &que->txr;
848 u32 first, last, head, done, processed;
849 struct ixl_tx_buf *buf;
850 struct i40e_tx_desc *tx_desc, *eop_desc;
853 mtx_assert(&txr->mtx, MA_OWNED);
856 // XXX todo: implement moderation
857 if (netmap_tx_irq(que->vsi->ifp, que->me))
859 #endif /* DEF_NETMAP */
861 /* These are not the descriptors you seek, move along :) */
862 if (txr->avail == que->num_desc) {
868 first = txr->next_to_clean;
869 buf = &txr->buffers[first];
870 tx_desc = (struct i40e_tx_desc *)&txr->base[first];
871 last = buf->eop_index;
874 eop_desc = (struct i40e_tx_desc *)&txr->base[last];
876 /* Get the Head WB value */
877 head = ixl_get_tx_head(que);
880 ** Get the index of the first descriptor
881 ** BEYOND the EOP and call that 'done'.
882 ** I do this so the comparison in the
883 ** inner while loop below can be simple
885 if (++last == que->num_desc) last = 0;
888 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
889 BUS_DMASYNC_POSTREAD);
891 ** The HEAD index of the ring is written in a
892 ** defined location, this rather than a done bit
893 ** is what is used to keep track of what must be
896 while (first != head) {
897 /* We clean the range of the packet */
898 while (first != done) {
903 txr->bytes += /* for ITR adjustment */
904 buf->m_head->m_pkthdr.len;
905 txr->tx_bytes += /* for TX stats */
906 buf->m_head->m_pkthdr.len;
907 bus_dmamap_sync(buf->tag,
909 BUS_DMASYNC_POSTWRITE);
910 bus_dmamap_unload(buf->tag,
912 m_freem(buf->m_head);
918 if (++first == que->num_desc)
921 buf = &txr->buffers[first];
922 tx_desc = &txr->base[first];
925 /* See if there is more work now */
926 last = buf->eop_index;
928 eop_desc = &txr->base[last];
929 /* Get next done point */
930 if (++last == que->num_desc) last = 0;
935 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 txr->next_to_clean = first;
942 ** Hang detection, we know there's
943 ** work outstanding or the first return
944 ** would have been taken, so indicate an
945 ** unsuccessful pass, in local_timer if
946 ** the value is too great the queue will
947 ** be considered hung. If anything has been
948 ** cleaned then reset the state.
950 if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
954 que->busy = 1; /* Note this turns off HUNG */
957 * If there are no pending descriptors, clear the timeout.
959 if (txr->avail == que->num_desc) {
967 /*********************************************************************
969 * Refresh mbuf buffers for RX descriptor rings
970 * - now keeps its own state so discards due to resource
971 * exhaustion are unnecessary, if an mbuf cannot be obtained
972 * it just returns, keeping its placeholder, thus it can simply
973 * be recalled to try again.
975 **********************************************************************/
977 ixl_refresh_mbufs(struct ixl_queue *que, int limit)
979 struct ixl_vsi *vsi = que->vsi;
980 struct rx_ring *rxr = &que->rxr;
981 bus_dma_segment_t hseg[1];
982 bus_dma_segment_t pseg[1];
983 struct ixl_rx_buf *buf;
984 struct mbuf *mh, *mp;
985 int i, j, nsegs, error;
986 bool refreshed = FALSE;
988 i = j = rxr->next_refresh;
989 /* Control the loop with one beyond */
990 if (++j == que->num_desc)
994 buf = &rxr->buffers[i];
995 if (rxr->hdr_split == FALSE)
998 if (buf->m_head == NULL) {
999 mh = m_gethdr(M_NOWAIT, MT_DATA);
1005 mh->m_pkthdr.len = mh->m_len = MHLEN;
1007 mh->m_flags |= M_PKTHDR;
1008 /* Get the memory mapping */
1009 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1010 buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
1012 printf("Refresh mbufs: hdr dmamap load"
1013 " failure - %d\n", error);
1019 bus_dmamap_sync(rxr->htag, buf->hmap,
1020 BUS_DMASYNC_PREREAD);
1021 rxr->base[i].read.hdr_addr =
1022 htole64(hseg[0].ds_addr);
1025 if (buf->m_pack == NULL) {
1026 mp = m_getjcl(M_NOWAIT, MT_DATA,
1027 M_PKTHDR, rxr->mbuf_sz);
1033 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1034 /* Get the memory mapping */
1035 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1036 buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
1038 printf("Refresh mbufs: payload dmamap load"
1039 " failure - %d\n", error);
1045 bus_dmamap_sync(rxr->ptag, buf->pmap,
1046 BUS_DMASYNC_PREREAD);
1047 rxr->base[i].read.pkt_addr =
1048 htole64(pseg[0].ds_addr);
1049 /* Used only when doing header split */
1050 rxr->base[i].read.hdr_addr = 0;
1053 /* Next is precalculated */
1055 rxr->next_refresh = i;
1056 if (++j == que->num_desc)
1060 if (refreshed) /* Update hardware tail index */
1061 wr32(vsi->hw, rxr->tail, rxr->next_refresh);
1066 /*********************************************************************
1068 * Allocate memory for rx_buffer structures. Since we use one
1069 * rx_buffer per descriptor, the maximum number of rx_buffer's
1070 * that we'll need is equal to the number of receive descriptors
1071 * that we've defined.
1073 **********************************************************************/
1075 ixl_allocate_rx_data(struct ixl_queue *que)
1077 struct rx_ring *rxr = &que->rxr;
1078 struct ixl_vsi *vsi = que->vsi;
1079 device_t dev = vsi->dev;
1080 struct ixl_rx_buf *buf;
1081 int i, bsize, error;
1083 bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
1084 if (!(rxr->buffers =
1085 (struct ixl_rx_buf *) malloc(bsize,
1086 M_DEVBUF, M_NOWAIT | M_ZERO))) {
1087 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1092 if ((error = bus_dma_tag_create(NULL, /* parent */
1093 1, 0, /* alignment, bounds */
1094 BUS_SPACE_MAXADDR, /* lowaddr */
1095 BUS_SPACE_MAXADDR, /* highaddr */
1096 NULL, NULL, /* filter, filterarg */
1097 MSIZE, /* maxsize */
1099 MSIZE, /* maxsegsize */
1101 NULL, /* lockfunc */
1102 NULL, /* lockfuncarg */
1104 device_printf(dev, "Unable to create RX DMA htag\n");
1108 if ((error = bus_dma_tag_create(NULL, /* parent */
1109 1, 0, /* alignment, bounds */
1110 BUS_SPACE_MAXADDR, /* lowaddr */
1111 BUS_SPACE_MAXADDR, /* highaddr */
1112 NULL, NULL, /* filter, filterarg */
1113 MJUM16BYTES, /* maxsize */
1115 MJUM16BYTES, /* maxsegsize */
1117 NULL, /* lockfunc */
1118 NULL, /* lockfuncarg */
1120 device_printf(dev, "Unable to create RX DMA ptag\n");
1124 for (i = 0; i < que->num_desc; i++) {
1125 buf = &rxr->buffers[i];
1126 error = bus_dmamap_create(rxr->htag,
1127 BUS_DMA_NOWAIT, &buf->hmap);
1129 device_printf(dev, "Unable to create RX head map\n");
1132 error = bus_dmamap_create(rxr->ptag,
1133 BUS_DMA_NOWAIT, &buf->pmap);
1135 device_printf(dev, "Unable to create RX pkt map\n");
1144 /*********************************************************************
1146 * (Re)Initialize the queue receive ring and its buffers.
1148 **********************************************************************/
1150 ixl_init_rx_ring(struct ixl_queue *que)
1152 struct rx_ring *rxr = &que->rxr;
1153 struct ixl_vsi *vsi = que->vsi;
1154 #if defined(INET6) || defined(INET)
1155 struct ifnet *ifp = vsi->ifp;
1156 struct lro_ctrl *lro = &rxr->lro;
1158 struct ixl_rx_buf *buf;
1159 bus_dma_segment_t pseg[1], hseg[1];
1160 int rsize, nsegs, error = 0;
1162 struct netmap_adapter *na = NA(que->vsi->ifp);
1163 struct netmap_slot *slot;
1164 #endif /* DEV_NETMAP */
1168 /* same as in ixl_init_tx_ring() */
1169 slot = netmap_reset(na, NR_RX, que->me, 0);
1170 #endif /* DEV_NETMAP */
1171 /* Clear the ring contents */
1172 rsize = roundup2(que->num_desc *
1173 sizeof(union i40e_rx_desc), DBA_ALIGN);
1174 bzero((void *)rxr->base, rsize);
1175 /* Cleanup any existing buffers */
1176 for (int i = 0; i < que->num_desc; i++) {
1177 buf = &rxr->buffers[i];
1178 if (buf->m_head != NULL) {
1179 bus_dmamap_sync(rxr->htag, buf->hmap,
1180 BUS_DMASYNC_POSTREAD);
1181 bus_dmamap_unload(rxr->htag, buf->hmap);
1182 buf->m_head->m_flags |= M_PKTHDR;
1183 m_freem(buf->m_head);
1185 if (buf->m_pack != NULL) {
1186 bus_dmamap_sync(rxr->ptag, buf->pmap,
1187 BUS_DMASYNC_POSTREAD);
1188 bus_dmamap_unload(rxr->ptag, buf->pmap);
1189 buf->m_pack->m_flags |= M_PKTHDR;
1190 m_freem(buf->m_pack);
1196 /* header split is off */
1197 rxr->hdr_split = FALSE;
1199 /* Now replenish the mbufs */
1200 for (int j = 0; j != que->num_desc; ++j) {
1201 struct mbuf *mh, *mp;
1203 buf = &rxr->buffers[j];
1206 * In netmap mode, fill the map and set the buffer
1207 * address in the NIC ring, considering the offset
1208 * between the netmap and NIC rings (see comment in
1209 * ixgbe_setup_transmit_ring() ). No need to allocate
1210 * an mbuf, so end the block with a continue;
1213 int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
1217 addr = PNMB(na, slot + sj, &paddr);
1218 netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
1219 /* Update descriptor and the cached value */
1220 rxr->base[j].read.pkt_addr = htole64(paddr);
1221 rxr->base[j].read.hdr_addr = 0;
1224 #endif /* DEV_NETMAP */
1226 ** Don't allocate mbufs if not
1227 ** doing header split, its wasteful
1229 if (rxr->hdr_split == FALSE)
1232 /* First the header */
1233 buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
1234 if (buf->m_head == NULL) {
1238 m_adj(buf->m_head, ETHER_ALIGN);
1240 mh->m_len = mh->m_pkthdr.len = MHLEN;
1241 mh->m_flags |= M_PKTHDR;
1242 /* Get the memory mapping */
1243 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1244 buf->hmap, buf->m_head, hseg,
1245 &nsegs, BUS_DMA_NOWAIT);
1246 if (error != 0) /* Nothing elegant to do here */
1248 bus_dmamap_sync(rxr->htag,
1249 buf->hmap, BUS_DMASYNC_PREREAD);
1250 /* Update descriptor */
1251 rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
1254 /* Now the payload cluster */
1255 buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
1256 M_PKTHDR, rxr->mbuf_sz);
1257 if (buf->m_pack == NULL) {
1262 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1263 /* Get the memory mapping */
1264 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1265 buf->pmap, mp, pseg,
1266 &nsegs, BUS_DMA_NOWAIT);
1269 bus_dmamap_sync(rxr->ptag,
1270 buf->pmap, BUS_DMASYNC_PREREAD);
1271 /* Update descriptor */
1272 rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
1273 rxr->base[j].read.hdr_addr = 0;
1277 /* Setup our descriptor indices */
1278 rxr->next_check = 0;
1279 rxr->next_refresh = 0;
1280 rxr->lro_enabled = FALSE;
1283 rxr->discard = FALSE;
1285 wr32(vsi->hw, rxr->tail, que->num_desc - 1);
1288 #if defined(INET6) || defined(INET)
1290 ** Now set up the LRO interface:
1292 if (ifp->if_capenable & IFCAP_LRO) {
1293 int err = tcp_lro_init(lro);
1295 if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
1298 INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
1299 rxr->lro_enabled = TRUE;
1300 lro->ifp = vsi->ifp;
1304 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1313 /*********************************************************************
1315 * Free station receive ring data structures
1317 **********************************************************************/
1319 ixl_free_que_rx(struct ixl_queue *que)
1321 struct rx_ring *rxr = &que->rxr;
1322 struct ixl_rx_buf *buf;
1324 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
1326 /* Cleanup any existing buffers */
1327 if (rxr->buffers != NULL) {
1328 for (int i = 0; i < que->num_desc; i++) {
1329 buf = &rxr->buffers[i];
1330 if (buf->m_head != NULL) {
1331 bus_dmamap_sync(rxr->htag, buf->hmap,
1332 BUS_DMASYNC_POSTREAD);
1333 bus_dmamap_unload(rxr->htag, buf->hmap);
1334 buf->m_head->m_flags |= M_PKTHDR;
1335 m_freem(buf->m_head);
1337 if (buf->m_pack != NULL) {
1338 bus_dmamap_sync(rxr->ptag, buf->pmap,
1339 BUS_DMASYNC_POSTREAD);
1340 bus_dmamap_unload(rxr->ptag, buf->pmap);
1341 buf->m_pack->m_flags |= M_PKTHDR;
1342 m_freem(buf->m_pack);
1346 if (buf->hmap != NULL) {
1347 bus_dmamap_destroy(rxr->htag, buf->hmap);
1350 if (buf->pmap != NULL) {
1351 bus_dmamap_destroy(rxr->ptag, buf->pmap);
1355 if (rxr->buffers != NULL) {
1356 free(rxr->buffers, M_DEVBUF);
1357 rxr->buffers = NULL;
1361 if (rxr->htag != NULL) {
1362 bus_dma_tag_destroy(rxr->htag);
1365 if (rxr->ptag != NULL) {
1366 bus_dma_tag_destroy(rxr->ptag);
1370 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
1374 static __inline void
1375 ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
1378 #if defined(INET6) || defined(INET)
1380 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
1381 * should be computed by hardware. Also it should not have VLAN tag in
1384 if (rxr->lro_enabled &&
1385 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1386 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1387 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1389 * Send to the stack if:
1390 ** - LRO not enabled, or
1391 ** - no LRO resources, or
1392 ** - lro enqueue fails
1394 if (rxr->lro.lro_cnt != 0)
1395 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1400 (*ifp->if_input)(ifp, m);
1405 static __inline void
1406 ixl_rx_discard(struct rx_ring *rxr, int i)
1408 struct ixl_rx_buf *rbuf;
1410 rbuf = &rxr->buffers[i];
1412 if (rbuf->fmp != NULL) {/* Partial chain ? */
1413 rbuf->fmp->m_flags |= M_PKTHDR;
1419 ** With advanced descriptors the writeback
1420 ** clobbers the buffer addrs, so its easier
1421 ** to just free the existing mbufs and take
1422 ** the normal refresh path to get new buffers
1426 m_free(rbuf->m_head);
1427 rbuf->m_head = NULL;
1431 m_free(rbuf->m_pack);
1432 rbuf->m_pack = NULL;
1440 ** i40e_ptype_to_hash: parse the packet type
1441 ** to determine the appropriate hash.
1444 ixl_ptype_to_hash(u8 ptype)
1446 struct i40e_rx_ptype_decoded decoded;
1449 decoded = decode_rx_desc_ptype(ptype);
1450 ex = decoded.outer_frag;
1453 return M_HASHTYPE_OPAQUE;
1455 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
1456 return M_HASHTYPE_OPAQUE;
1458 /* Note: anything that gets to this point is IP */
1459 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
1460 switch (decoded.inner_prot) {
1461 case I40E_RX_PTYPE_INNER_PROT_TCP:
1463 return M_HASHTYPE_RSS_TCP_IPV6_EX;
1465 return M_HASHTYPE_RSS_TCP_IPV6;
1466 case I40E_RX_PTYPE_INNER_PROT_UDP:
1468 return M_HASHTYPE_RSS_UDP_IPV6_EX;
1470 return M_HASHTYPE_RSS_UDP_IPV6;
1473 return M_HASHTYPE_RSS_IPV6_EX;
1475 return M_HASHTYPE_RSS_IPV6;
1478 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
1479 switch (decoded.inner_prot) {
1480 case I40E_RX_PTYPE_INNER_PROT_TCP:
1481 return M_HASHTYPE_RSS_TCP_IPV4;
1482 case I40E_RX_PTYPE_INNER_PROT_UDP:
1484 return M_HASHTYPE_RSS_UDP_IPV4_EX;
1486 return M_HASHTYPE_RSS_UDP_IPV4;
1488 return M_HASHTYPE_RSS_IPV4;
1491 /* We should never get here!! */
1492 return M_HASHTYPE_OPAQUE;
1496 /*********************************************************************
1498 * This routine executes in interrupt context. It replenishes
1499 * the mbufs in the descriptor and sends data which has been
1500 * dma'ed into host memory to upper layer.
1502 * We loop at most count times if count is > 0, or until done if
1505 * Return TRUE for more work, FALSE for all clean.
1506 *********************************************************************/
1508 ixl_rxeof(struct ixl_queue *que, int count)
1510 struct ixl_vsi *vsi = que->vsi;
1511 struct rx_ring *rxr = &que->rxr;
1512 struct ifnet *ifp = vsi->ifp;
1513 #if defined(INET6) || defined(INET)
1514 struct lro_ctrl *lro = &rxr->lro;
1515 struct lro_entry *queued;
1517 int i, nextp, processed = 0;
1518 union i40e_rx_desc *cur;
1519 struct ixl_rx_buf *rbuf, *nbuf;
1525 if (netmap_rx_irq(ifp, que->me, &count)) {
1529 #endif /* DEV_NETMAP */
1531 for (i = rxr->next_check; count != 0;) {
1532 struct mbuf *sendmp, *mh, *mp;
1533 u32 rsc, status, error;
1534 u16 hlen, plen, vtag;
1539 /* Sync the ring. */
1540 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1541 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1543 cur = &rxr->base[i];
1544 qword = le64toh(cur->wb.qword1.status_error_len);
1545 status = (qword & I40E_RXD_QW1_STATUS_MASK)
1546 >> I40E_RXD_QW1_STATUS_SHIFT;
1547 error = (qword & I40E_RXD_QW1_ERROR_MASK)
1548 >> I40E_RXD_QW1_ERROR_SHIFT;
1549 plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
1550 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1551 hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
1552 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1553 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
1554 >> I40E_RXD_QW1_PTYPE_SHIFT;
1556 if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
1560 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1567 cur->wb.qword1.status_error_len = 0;
1568 rbuf = &rxr->buffers[i];
1571 eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
1572 if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
1573 vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
1578 ** Make sure bad packets are discarded,
1579 ** note that only EOP descriptor has valid
1582 if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1584 ixl_rx_discard(rxr, i);
1588 /* Prefetch the next buffer */
1591 if (nextp == que->num_desc)
1593 nbuf = &rxr->buffers[nextp];
1598 ** The header mbuf is ONLY used when header
1599 ** split is enabled, otherwise we get normal
1600 ** behavior, ie, both header and payload
1601 ** are DMA'd into the payload buffer.
1603 ** Rather than using the fmp/lmp global pointers
1604 ** we now keep the head of a packet chain in the
1605 ** buffer struct and pass this along from one
1606 ** descriptor to the next, until we get EOP.
1608 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
1609 if (hlen > IXL_RX_HDR)
1612 mh->m_flags |= M_PKTHDR;
1614 mh->m_pkthdr.len = mh->m_len;
1615 /* Null buf pointer so it is refreshed */
1616 rbuf->m_head = NULL;
1618 ** Check the payload length, this
1619 ** could be zero if its a small
1625 mp->m_flags &= ~M_PKTHDR;
1627 mh->m_pkthdr.len += mp->m_len;
1628 /* Null buf pointer so it is refreshed */
1629 rbuf->m_pack = NULL;
1633 ** Now create the forward
1634 ** chain so when complete
1638 /* stash the chain head */
1640 /* Make forward chain */
1642 mp->m_next = nbuf->m_pack;
1644 mh->m_next = nbuf->m_pack;
1646 /* Singlet, prepare to send */
1649 sendmp->m_pkthdr.ether_vtag = vtag;
1650 sendmp->m_flags |= M_VLANTAG;
1655 ** Either no header split, or a
1656 ** secondary piece of a fragmented
1661 ** See if there is a stored head
1662 ** that determines what we are
1665 rbuf->m_pack = rbuf->fmp = NULL;
1667 if (sendmp != NULL) /* secondary frag */
1668 sendmp->m_pkthdr.len += mp->m_len;
1670 /* first desc of a non-ps chain */
1672 sendmp->m_flags |= M_PKTHDR;
1673 sendmp->m_pkthdr.len = mp->m_len;
1675 sendmp->m_pkthdr.ether_vtag = vtag;
1676 sendmp->m_flags |= M_VLANTAG;
1679 /* Pass the head pointer on */
1683 mp->m_next = nbuf->m_pack;
1687 /* Sending this frame? */
1689 sendmp->m_pkthdr.rcvif = ifp;
1692 rxr->rx_bytes += sendmp->m_pkthdr.len;
1693 /* capture data for dynamic ITR adjustment */
1695 rxr->bytes += sendmp->m_pkthdr.len;
1696 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1697 ixl_rx_checksum(sendmp, status, error, ptype);
1699 sendmp->m_pkthdr.flowid =
1700 le32toh(cur->wb.qword0.hi_dword.rss);
1701 M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
1703 sendmp->m_pkthdr.flowid = que->msix;
1704 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1708 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1709 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1711 /* Advance our pointers to the next descriptor. */
1712 if (++i == que->num_desc)
1715 /* Now send to the stack or do LRO */
1716 if (sendmp != NULL) {
1717 rxr->next_check = i;
1718 ixl_rx_input(rxr, ifp, sendmp, ptype);
1719 i = rxr->next_check;
1722 /* Every 8 descriptors we go to refresh mbufs */
1723 if (processed == 8) {
1724 ixl_refresh_mbufs(que, i);
1729 /* Refresh any remaining buf structs */
1730 if (ixl_rx_unrefreshed(que))
1731 ixl_refresh_mbufs(que, i);
1733 rxr->next_check = i;
1735 #if defined(INET6) || defined(INET)
1737 * Flush any outstanding LRO work
1739 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1740 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1741 tcp_lro_flush(lro, queued);
1750 /*********************************************************************
1752 * Verify that the hardware indicated that the checksum is valid.
1753 * Inform the stack about the status of checksum so that stack
1754 * doesn't spend time verifying the checksum.
1756 *********************************************************************/
1758 ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
1760 struct i40e_rx_ptype_decoded decoded;
1762 decoded = decode_rx_desc_ptype(ptype);
1765 if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1766 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
1767 mp->m_pkthdr.csum_flags = 0;
1771 /* IPv6 with extension headers likely have bad csum */
1772 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1773 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1775 (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
1776 mp->m_pkthdr.csum_flags = 0;
1781 /* IP Checksum Good */
1782 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1783 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1785 if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
1786 mp->m_pkthdr.csum_flags |=
1787 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1788 mp->m_pkthdr.csum_data |= htons(0xffff);
1793 #if __FreeBSD_version >= 1100000
1795 ixl_get_counter(if_t ifp, ift_counter cnt)
1797 struct ixl_vsi *vsi;
1799 vsi = if_getsoftc(ifp);
1802 case IFCOUNTER_IPACKETS:
1803 return (vsi->ipackets);
1804 case IFCOUNTER_IERRORS:
1805 return (vsi->ierrors);
1806 case IFCOUNTER_OPACKETS:
1807 return (vsi->opackets);
1808 case IFCOUNTER_OERRORS:
1809 return (vsi->oerrors);
1810 case IFCOUNTER_COLLISIONS:
1811 /* Collisions are by standard impossible in 40G/10G Ethernet */
1813 case IFCOUNTER_IBYTES:
1814 return (vsi->ibytes);
1815 case IFCOUNTER_OBYTES:
1816 return (vsi->obytes);
1817 case IFCOUNTER_IMCASTS:
1818 return (vsi->imcasts);
1819 case IFCOUNTER_OMCASTS:
1820 return (vsi->omcasts);
1821 case IFCOUNTER_IQDROPS:
1822 return (vsi->iqdrops);
1823 case IFCOUNTER_OQDROPS:
1824 return (vsi->oqdrops);
1825 case IFCOUNTER_NOPROTO:
1826 return (vsi->noproto);
1828 return (if_get_counter_default(ifp, cnt));