1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** IXL driver TX/RX Routines:
37 ** This was seperated to allow usage by
38 ** both the BASE and the VF drivers.
41 #ifndef IXL_STANDALONE_BUILD
43 #include "opt_inet6.h"
50 #include <net/rss_config.h>
53 /* Local Prototypes */
54 static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
55 static void ixl_refresh_mbufs(struct ixl_queue *, int);
56 static int ixl_xmit(struct ixl_queue *, struct mbuf **);
57 static int ixl_tx_setup_offload(struct ixl_queue *,
58 struct mbuf *, u32 *, u32 *);
59 static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
61 static __inline void ixl_rx_discard(struct rx_ring *, int);
62 static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
66 #include <dev/netmap/if_ixl_netmap.h>
67 #endif /* DEV_NETMAP */
70 ** Multiqueue Transmit driver
74 ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
76 struct ixl_vsi *vsi = ifp->if_softc;
77 struct ixl_queue *que;
85 ** Which queue to use:
87 ** When doing RSS, map it to the same outbound
88 ** queue as the incoming flow would be mapped to.
89 ** If everything is setup correctly, it should be
90 ** the same bucket that the current CPU we're on is.
92 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
94 if (rss_hash2bucket(m->m_pkthdr.flowid,
95 M_HASHTYPE_GET(m), &bucket_id) == 0) {
96 i = bucket_id % vsi->num_queues;
99 i = m->m_pkthdr.flowid % vsi->num_queues;
101 i = curcpu % vsi->num_queues;
103 ** This may not be perfect, but until something
104 ** better comes along it will keep from scheduling
105 ** on stalled queues.
107 if (((1 << i) & vsi->active_queues) == 0)
108 i = ffsl(vsi->active_queues);
110 que = &vsi->queues[i];
113 err = drbr_enqueue(ifp, txr->br, m);
116 if (IXL_TX_TRYLOCK(txr)) {
117 ixl_mq_start_locked(ifp, txr);
120 taskqueue_enqueue(que->tq, &que->tx_task);
126 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
128 struct ixl_queue *que = txr->que;
129 struct ixl_vsi *vsi = que->vsi;
134 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
135 vsi->link_active == 0)
138 /* Process the transmit queue */
139 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
140 if ((err = ixl_xmit(que, &next)) != 0) {
142 drbr_advance(ifp, txr->br);
144 drbr_putback(ifp, txr->br, next);
147 drbr_advance(ifp, txr->br);
148 /* Send a copy of the frame to the BPF listener */
149 ETHER_BPF_MTAP(ifp, next);
150 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
154 if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
161 * Called from a taskqueue to drain queued transmit packets.
164 ixl_deferred_mq_start(void *arg, int pending)
166 struct ixl_queue *que = arg;
167 struct tx_ring *txr = &que->txr;
168 struct ixl_vsi *vsi = que->vsi;
169 struct ifnet *ifp = vsi->ifp;
172 if (!drbr_empty(ifp, txr->br))
173 ixl_mq_start_locked(ifp, txr);
178 ** Flush all queue ring buffers
181 ixl_qflush(struct ifnet *ifp)
183 struct ixl_vsi *vsi = ifp->if_softc;
185 for (int i = 0; i < vsi->num_queues; i++) {
186 struct ixl_queue *que = &vsi->queues[i];
187 struct tx_ring *txr = &que->txr;
190 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
198 ** Find mbuf chains passed to the driver
199 ** that are 'sparse', using more than 8
200 ** mbufs to deliver an mss-size chunk of data
203 ixl_tso_detect_sparse(struct mbuf *mp)
209 mss = mp->m_pkthdr.tso_segsz;
210 for (m = mp->m_next; m != NULL; m = m->m_next) {
215 if (m->m_next == NULL)
218 if (num > IXL_SPARSE_CHAIN)
225 /*********************************************************************
227 * This routine maps the mbufs to tx descriptors, allowing the
228 * TX engine to transmit the packets.
229 * - return 0 on success, positive on failure
231 **********************************************************************/
232 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
235 ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
237 struct ixl_vsi *vsi = que->vsi;
238 struct i40e_hw *hw = vsi->hw;
239 struct tx_ring *txr = &que->txr;
240 struct ixl_tx_buf *buf;
241 struct i40e_tx_desc *txd = NULL;
242 struct mbuf *m_head, *m;
243 int i, j, error, nsegs, maxsegs;
249 bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
256 * Important to capture the first descriptor
257 * used because it will contain the index of
258 * the one we tell the hardware to report back
260 first = txr->next_avail;
261 buf = &txr->buffers[first];
264 maxsegs = IXL_MAX_TX_SEGS;
266 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
267 /* Use larger mapping for TSO */
269 maxsegs = IXL_MAX_TSO_SEGS;
270 if (ixl_tso_detect_sparse(m_head)) {
271 m = m_defrag(m_head, M_NOWAIT);
282 * Map the packet for DMA.
284 error = bus_dmamap_load_mbuf_sg(tag, map,
285 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
287 if (error == EFBIG) {
290 m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
292 que->mbuf_defrag_failed++;
300 error = bus_dmamap_load_mbuf_sg(tag, map,
301 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
303 if (error == ENOMEM) {
306 } else if (error != 0) {
312 } else if (error == ENOMEM) {
315 } else if (error != 0) {
322 /* Make certain there are enough descriptors */
323 if (nsegs > txr->avail - 2) {
330 /* Set up the TSO/CSUM offload */
331 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
332 error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
337 cmd |= I40E_TX_DESC_CMD_ICRC;
338 /* Grab the VLAN tag */
339 if (m_head->m_flags & M_VLANTAG) {
340 cmd |= I40E_TX_DESC_CMD_IL2TAG1;
341 vtag = htole16(m_head->m_pkthdr.ether_vtag);
345 for (j = 0; j < nsegs; j++) {
348 buf = &txr->buffers[i];
349 buf->tag = tag; /* Keep track of the type tag */
351 seglen = segs[j].ds_len;
353 txd->buffer_addr = htole64(segs[j].ds_addr);
354 txd->cmd_type_offset_bsz =
355 htole64(I40E_TX_DESC_DTYPE_DATA
356 | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
357 | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
358 | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
359 | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
361 last = i; /* descriptor that will get completion IRQ */
363 if (++i == que->num_desc)
369 /* Set the last descriptor for report */
370 txd->cmd_type_offset_bsz |=
371 htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
375 buf->m_head = m_head;
376 /* Swap the dma map between the first and last descriptor */
377 txr->buffers[first].map = buf->map;
379 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
381 /* Set the index of the descriptor that will be marked done */
382 buf = &txr->buffers[first];
383 buf->eop_index = last;
385 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
388 * Advance the Transmit Descriptor Tail (Tdt), this tells the
389 * hardware that this frame is available to transmit.
391 ++txr->total_packets;
392 wr32(hw, txr->tail, i);
395 /* Mark outstanding work */
401 bus_dmamap_unload(tag, buf->map);
406 /*********************************************************************
408 * Allocate memory for tx_buffer structures. The tx_buffer stores all
409 * the information needed to transmit a packet on the wire. This is
410 * called only once at attach, setup is done every reset.
412 **********************************************************************/
414 ixl_allocate_tx_data(struct ixl_queue *que)
416 struct tx_ring *txr = &que->txr;
417 struct ixl_vsi *vsi = que->vsi;
418 device_t dev = vsi->dev;
419 struct ixl_tx_buf *buf;
423 * Setup DMA descriptor areas.
425 if ((error = bus_dma_tag_create(NULL, /* parent */
426 1, 0, /* alignment, bounds */
427 BUS_SPACE_MAXADDR, /* lowaddr */
428 BUS_SPACE_MAXADDR, /* highaddr */
429 NULL, NULL, /* filter, filterarg */
430 IXL_TSO_SIZE, /* maxsize */
431 IXL_MAX_TX_SEGS, /* nsegments */
432 PAGE_SIZE, /* maxsegsize */
435 NULL, /* lockfuncarg */
437 device_printf(dev,"Unable to allocate TX DMA tag\n");
441 /* Make a special tag for TSO */
442 if ((error = bus_dma_tag_create(NULL, /* parent */
443 1, 0, /* alignment, bounds */
444 BUS_SPACE_MAXADDR, /* lowaddr */
445 BUS_SPACE_MAXADDR, /* highaddr */
446 NULL, NULL, /* filter, filterarg */
447 IXL_TSO_SIZE, /* maxsize */
448 IXL_MAX_TSO_SEGS, /* nsegments */
449 PAGE_SIZE, /* maxsegsize */
452 NULL, /* lockfuncarg */
454 device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
459 (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
460 que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
461 device_printf(dev, "Unable to allocate tx_buffer memory\n");
466 /* Create the descriptor buffer default dma maps */
468 for (int i = 0; i < que->num_desc; i++, buf++) {
469 buf->tag = txr->tx_tag;
470 error = bus_dmamap_create(buf->tag, 0, &buf->map);
472 device_printf(dev, "Unable to create TX DMA map\n");
481 /*********************************************************************
483 * (Re)Initialize a queue transmit ring.
484 * - called by init, it clears the descriptor ring,
485 * and frees any stale mbufs
487 **********************************************************************/
489 ixl_init_tx_ring(struct ixl_queue *que)
491 struct tx_ring *txr = &que->txr;
492 struct ixl_tx_buf *buf;
494 struct netmap_adapter *na = NA(que->vsi->ifp);
495 struct netmap_slot *slot;
496 #endif /* DEV_NETMAP */
498 /* Clear the old ring contents */
502 * (under lock): if in netmap mode, do some consistency
503 * checks and set slot to entry 0 of the netmap ring.
505 slot = netmap_reset(na, NR_TX, que->me, 0);
506 #endif /* DEV_NETMAP */
508 bzero((void *)txr->base,
509 (sizeof(struct i40e_tx_desc)) * que->num_desc);
513 txr->next_to_clean = 0;
516 /* Initialize flow director */
517 txr->atr_rate = ixl_atr_rate;
521 /* Free any existing tx mbufs. */
523 for (int i = 0; i < que->num_desc; i++, buf++) {
524 if (buf->m_head != NULL) {
525 bus_dmamap_sync(buf->tag, buf->map,
526 BUS_DMASYNC_POSTWRITE);
527 bus_dmamap_unload(buf->tag, buf->map);
528 m_freem(buf->m_head);
533 * In netmap mode, set the map for the packet buffer.
534 * NOTE: Some drivers (not this one) also need to set
535 * the physical buffer address in the NIC ring.
536 * netmap_idx_n2k() maps a nic index, i, into the corresponding
537 * netmap slot index, si
540 int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
541 netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
543 #endif /* DEV_NETMAP */
544 /* Clear the EOP index */
548 /* Set number of descriptors available */
549 txr->avail = que->num_desc;
551 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
552 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
557 /*********************************************************************
559 * Free transmit ring related data structures.
561 **********************************************************************/
563 ixl_free_que_tx(struct ixl_queue *que)
565 struct tx_ring *txr = &que->txr;
566 struct ixl_tx_buf *buf;
568 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
570 for (int i = 0; i < que->num_desc; i++) {
571 buf = &txr->buffers[i];
572 if (buf->m_head != NULL) {
573 bus_dmamap_sync(buf->tag, buf->map,
574 BUS_DMASYNC_POSTWRITE);
575 bus_dmamap_unload(buf->tag,
577 m_freem(buf->m_head);
579 if (buf->map != NULL) {
580 bus_dmamap_destroy(buf->tag,
584 } else if (buf->map != NULL) {
585 bus_dmamap_unload(buf->tag,
587 bus_dmamap_destroy(buf->tag,
593 buf_ring_free(txr->br, M_DEVBUF);
594 if (txr->buffers != NULL) {
595 free(txr->buffers, M_DEVBUF);
598 if (txr->tx_tag != NULL) {
599 bus_dma_tag_destroy(txr->tx_tag);
602 if (txr->tso_tag != NULL) {
603 bus_dma_tag_destroy(txr->tso_tag);
607 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
611 /*********************************************************************
613 * Setup descriptor for hw offloads
615 **********************************************************************/
618 ixl_tx_setup_offload(struct ixl_queue *que,
619 struct mbuf *mp, u32 *cmd, u32 *off)
621 struct ether_vlan_header *eh;
623 struct ip *ip = NULL;
625 struct tcphdr *th = NULL;
629 int elen, ip_hlen = 0, tcp_hlen;
635 /* Set up the TSO context descriptor if required */
636 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
637 tso = ixl_tso_setup(que, mp);
645 * Determine where frame payload starts.
646 * Jump over vlan headers if already present,
647 * helpful for QinQ too.
649 eh = mtod(mp, struct ether_vlan_header *);
650 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
651 etype = ntohs(eh->evl_proto);
652 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
654 etype = ntohs(eh->evl_encap_proto);
655 elen = ETHER_HDR_LEN;
661 ip = (struct ip *)(mp->m_data + elen);
662 ip_hlen = ip->ip_hl << 2;
664 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
665 /* The IP checksum must be recalculated with TSO */
667 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
669 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
674 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
675 ip_hlen = sizeof(struct ip6_hdr);
676 ipproto = ip6->ip6_nxt;
677 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
678 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
685 *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
686 *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
690 tcp_hlen = th->th_off << 2;
691 if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
692 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
693 *off |= (tcp_hlen >> 2) <<
694 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
697 ixl_atr(que, th, etype);
701 if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
702 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
703 *off |= (sizeof(struct udphdr) >> 2) <<
704 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
709 if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
710 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
711 *off |= (sizeof(struct sctphdr) >> 2) <<
712 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
723 /**********************************************************************
725 * Setup context for hardware segmentation offload (TSO)
727 **********************************************************************/
729 ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
731 struct tx_ring *txr = &que->txr;
732 struct i40e_tx_context_desc *TXD;
733 struct ixl_tx_buf *buf;
734 u32 cmd, mss, type, tsolen;
736 int idx, elen, ip_hlen, tcp_hlen;
737 struct ether_vlan_header *eh;
744 #if defined(INET6) || defined(INET)
747 u64 type_cmd_tso_mss;
750 * Determine where frame payload starts.
751 * Jump over vlan headers if already present
753 eh = mtod(mp, struct ether_vlan_header *);
754 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
755 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
756 etype = eh->evl_proto;
758 elen = ETHER_HDR_LEN;
759 etype = eh->evl_encap_proto;
762 switch (ntohs(etype)) {
765 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
766 if (ip6->ip6_nxt != IPPROTO_TCP)
768 ip_hlen = sizeof(struct ip6_hdr);
769 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
770 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
771 tcp_hlen = th->th_off << 2;
776 ip = (struct ip *)(mp->m_data + elen);
777 if (ip->ip_p != IPPROTO_TCP)
780 ip_hlen = ip->ip_hl << 2;
781 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
782 th->th_sum = in_pseudo(ip->ip_src.s_addr,
783 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
784 tcp_hlen = th->th_off << 2;
788 printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
789 __func__, ntohs(etype));
793 /* Ensure we have at least the IP+TCP header in the first mbuf. */
794 if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
797 idx = txr->next_avail;
798 buf = &txr->buffers[idx];
799 TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
800 tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
802 type = I40E_TX_DESC_DTYPE_CONTEXT;
803 cmd = I40E_TX_CTX_DESC_TSO;
804 mss = mp->m_pkthdr.tso_segsz;
806 type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
807 ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
808 ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
809 ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
810 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
812 TXD->tunneling_params = htole32(0);
816 if (++idx == que->num_desc)
820 txr->next_avail = idx;
826 ** ixl_get_tx_head - Retrieve the value from the
827 ** location the HW records its HEAD index
830 ixl_get_tx_head(struct ixl_queue *que)
832 struct tx_ring *txr = &que->txr;
833 void *head = &txr->base[que->num_desc];
834 return LE32_TO_CPU(*(volatile __le32 *)head);
837 /**********************************************************************
839 * Examine each tx_buffer in the used queue. If the hardware is done
840 * processing the packet then free associated resources. The
841 * tx_buffer is put back on the free queue.
843 **********************************************************************/
845 ixl_txeof(struct ixl_queue *que)
847 struct tx_ring *txr = &que->txr;
848 u32 first, last, head, done, processed;
849 struct ixl_tx_buf *buf;
850 struct i40e_tx_desc *tx_desc, *eop_desc;
853 mtx_assert(&txr->mtx, MA_OWNED);
856 // XXX todo: implement moderation
857 if (netmap_tx_irq(que->vsi->ifp, que->me))
859 #endif /* DEF_NETMAP */
861 /* These are not the descriptors you seek, move along :) */
862 if (txr->avail == que->num_desc) {
868 first = txr->next_to_clean;
869 buf = &txr->buffers[first];
870 tx_desc = (struct i40e_tx_desc *)&txr->base[first];
871 last = buf->eop_index;
874 eop_desc = (struct i40e_tx_desc *)&txr->base[last];
876 /* Get the Head WB value */
877 head = ixl_get_tx_head(que);
880 ** Get the index of the first descriptor
881 ** BEYOND the EOP and call that 'done'.
882 ** I do this so the comparison in the
883 ** inner while loop below can be simple
885 if (++last == que->num_desc) last = 0;
888 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
889 BUS_DMASYNC_POSTREAD);
891 ** The HEAD index of the ring is written in a
892 ** defined location, this rather than a done bit
893 ** is what is used to keep track of what must be
896 while (first != head) {
897 /* We clean the range of the packet */
898 while (first != done) {
903 txr->bytes += /* for ITR adjustment */
904 buf->m_head->m_pkthdr.len;
905 txr->tx_bytes += /* for TX stats */
906 buf->m_head->m_pkthdr.len;
907 bus_dmamap_sync(buf->tag,
909 BUS_DMASYNC_POSTWRITE);
910 bus_dmamap_unload(buf->tag,
912 m_freem(buf->m_head);
918 if (++first == que->num_desc)
921 buf = &txr->buffers[first];
922 tx_desc = &txr->base[first];
925 /* See if there is more work now */
926 last = buf->eop_index;
928 eop_desc = &txr->base[last];
929 /* Get next done point */
930 if (++last == que->num_desc) last = 0;
935 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 txr->next_to_clean = first;
942 ** Hang detection, we know there's
943 ** work outstanding or the first return
944 ** would have been taken, so indicate an
945 ** unsuccessful pass, in local_timer if
946 ** the value is too great the queue will
947 ** be considered hung. If anything has been
948 ** cleaned then reset the state.
950 if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
954 que->busy = 1; /* Note this turns off HUNG */
957 * If there are no pending descriptors, clear the timeout.
959 if (txr->avail == que->num_desc) {
967 /*********************************************************************
969 * Refresh mbuf buffers for RX descriptor rings
970 * - now keeps its own state so discards due to resource
971 * exhaustion are unnecessary, if an mbuf cannot be obtained
972 * it just returns, keeping its placeholder, thus it can simply
973 * be recalled to try again.
975 **********************************************************************/
977 ixl_refresh_mbufs(struct ixl_queue *que, int limit)
979 struct ixl_vsi *vsi = que->vsi;
980 struct rx_ring *rxr = &que->rxr;
981 bus_dma_segment_t hseg[1];
982 bus_dma_segment_t pseg[1];
983 struct ixl_rx_buf *buf;
984 struct mbuf *mh, *mp;
985 int i, j, nsegs, error;
986 bool refreshed = FALSE;
988 i = j = rxr->next_refresh;
989 /* Control the loop with one beyond */
990 if (++j == que->num_desc)
994 buf = &rxr->buffers[i];
995 if (rxr->hdr_split == FALSE)
998 if (buf->m_head == NULL) {
999 mh = m_gethdr(M_NOWAIT, MT_DATA);
1005 mh->m_pkthdr.len = mh->m_len = MHLEN;
1007 mh->m_flags |= M_PKTHDR;
1008 /* Get the memory mapping */
1009 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1010 buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
1012 printf("Refresh mbufs: hdr dmamap load"
1013 " failure - %d\n", error);
1019 bus_dmamap_sync(rxr->htag, buf->hmap,
1020 BUS_DMASYNC_PREREAD);
1021 rxr->base[i].read.hdr_addr =
1022 htole64(hseg[0].ds_addr);
1025 if (buf->m_pack == NULL) {
1026 mp = m_getjcl(M_NOWAIT, MT_DATA,
1027 M_PKTHDR, rxr->mbuf_sz);
1033 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1034 /* Get the memory mapping */
1035 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1036 buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
1038 printf("Refresh mbufs: payload dmamap load"
1039 " failure - %d\n", error);
1045 bus_dmamap_sync(rxr->ptag, buf->pmap,
1046 BUS_DMASYNC_PREREAD);
1047 rxr->base[i].read.pkt_addr =
1048 htole64(pseg[0].ds_addr);
1049 /* Used only when doing header split */
1050 rxr->base[i].read.hdr_addr = 0;
1053 /* Next is precalculated */
1055 rxr->next_refresh = i;
1056 if (++j == que->num_desc)
1060 if (refreshed) /* Update hardware tail index */
1061 wr32(vsi->hw, rxr->tail, rxr->next_refresh);
1066 /*********************************************************************
1068 * Allocate memory for rx_buffer structures. Since we use one
1069 * rx_buffer per descriptor, the maximum number of rx_buffer's
1070 * that we'll need is equal to the number of receive descriptors
1071 * that we've defined.
1073 **********************************************************************/
1075 ixl_allocate_rx_data(struct ixl_queue *que)
1077 struct rx_ring *rxr = &que->rxr;
1078 struct ixl_vsi *vsi = que->vsi;
1079 device_t dev = vsi->dev;
1080 struct ixl_rx_buf *buf;
1081 int i, bsize, error;
1083 bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
1084 if (!(rxr->buffers =
1085 (struct ixl_rx_buf *) malloc(bsize,
1086 M_DEVBUF, M_NOWAIT | M_ZERO))) {
1087 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1092 if ((error = bus_dma_tag_create(NULL, /* parent */
1093 1, 0, /* alignment, bounds */
1094 BUS_SPACE_MAXADDR, /* lowaddr */
1095 BUS_SPACE_MAXADDR, /* highaddr */
1096 NULL, NULL, /* filter, filterarg */
1097 MSIZE, /* maxsize */
1099 MSIZE, /* maxsegsize */
1101 NULL, /* lockfunc */
1102 NULL, /* lockfuncarg */
1104 device_printf(dev, "Unable to create RX DMA htag\n");
1108 if ((error = bus_dma_tag_create(NULL, /* parent */
1109 1, 0, /* alignment, bounds */
1110 BUS_SPACE_MAXADDR, /* lowaddr */
1111 BUS_SPACE_MAXADDR, /* highaddr */
1112 NULL, NULL, /* filter, filterarg */
1113 MJUM16BYTES, /* maxsize */
1115 MJUM16BYTES, /* maxsegsize */
1117 NULL, /* lockfunc */
1118 NULL, /* lockfuncarg */
1120 device_printf(dev, "Unable to create RX DMA ptag\n");
1124 for (i = 0; i < que->num_desc; i++) {
1125 buf = &rxr->buffers[i];
1126 error = bus_dmamap_create(rxr->htag,
1127 BUS_DMA_NOWAIT, &buf->hmap);
1129 device_printf(dev, "Unable to create RX head map\n");
1132 error = bus_dmamap_create(rxr->ptag,
1133 BUS_DMA_NOWAIT, &buf->pmap);
1135 device_printf(dev, "Unable to create RX pkt map\n");
1144 /*********************************************************************
1146 * (Re)Initialize the queue receive ring and its buffers.
1148 **********************************************************************/
1150 ixl_init_rx_ring(struct ixl_queue *que)
1152 struct rx_ring *rxr = &que->rxr;
1153 struct ixl_vsi *vsi = que->vsi;
1154 #if defined(INET6) || defined(INET)
1155 struct ifnet *ifp = vsi->ifp;
1156 struct lro_ctrl *lro = &rxr->lro;
1158 struct ixl_rx_buf *buf;
1159 bus_dma_segment_t pseg[1], hseg[1];
1160 int rsize, nsegs, error = 0;
1162 struct netmap_adapter *na = NA(que->vsi->ifp);
1163 struct netmap_slot *slot;
1164 #endif /* DEV_NETMAP */
1168 /* same as in ixl_init_tx_ring() */
1169 slot = netmap_reset(na, NR_RX, que->me, 0);
1170 #endif /* DEV_NETMAP */
1171 /* Clear the ring contents */
1172 rsize = roundup2(que->num_desc *
1173 sizeof(union i40e_rx_desc), DBA_ALIGN);
1174 bzero((void *)rxr->base, rsize);
1175 /* Cleanup any existing buffers */
1176 for (int i = 0; i < que->num_desc; i++) {
1177 buf = &rxr->buffers[i];
1178 if (buf->m_head != NULL) {
1179 bus_dmamap_sync(rxr->htag, buf->hmap,
1180 BUS_DMASYNC_POSTREAD);
1181 bus_dmamap_unload(rxr->htag, buf->hmap);
1182 buf->m_head->m_flags |= M_PKTHDR;
1183 m_freem(buf->m_head);
1185 if (buf->m_pack != NULL) {
1186 bus_dmamap_sync(rxr->ptag, buf->pmap,
1187 BUS_DMASYNC_POSTREAD);
1188 bus_dmamap_unload(rxr->ptag, buf->pmap);
1189 buf->m_pack->m_flags |= M_PKTHDR;
1190 m_freem(buf->m_pack);
1196 /* header split is off */
1197 rxr->hdr_split = FALSE;
1199 /* Now replenish the mbufs */
1200 for (int j = 0; j != que->num_desc; ++j) {
1201 struct mbuf *mh, *mp;
1203 buf = &rxr->buffers[j];
1206 * In netmap mode, fill the map and set the buffer
1207 * address in the NIC ring, considering the offset
1208 * between the netmap and NIC rings (see comment in
1209 * ixgbe_setup_transmit_ring() ). No need to allocate
1210 * an mbuf, so end the block with a continue;
1213 int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
1217 addr = PNMB(na, slot + sj, &paddr);
1218 netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
1219 /* Update descriptor and the cached value */
1220 rxr->base[j].read.pkt_addr = htole64(paddr);
1221 rxr->base[j].read.hdr_addr = 0;
1224 #endif /* DEV_NETMAP */
1227 ** Don't allocate mbufs if not
1228 ** doing header split, its wasteful
1230 if (rxr->hdr_split == FALSE)
1233 /* First the header */
1234 buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
1235 if (buf->m_head == NULL) {
1239 m_adj(buf->m_head, ETHER_ALIGN);
1241 mh->m_len = mh->m_pkthdr.len = MHLEN;
1242 mh->m_flags |= M_PKTHDR;
1243 /* Get the memory mapping */
1244 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1245 buf->hmap, buf->m_head, hseg,
1246 &nsegs, BUS_DMA_NOWAIT);
1247 if (error != 0) /* Nothing elegant to do here */
1249 bus_dmamap_sync(rxr->htag,
1250 buf->hmap, BUS_DMASYNC_PREREAD);
1251 /* Update descriptor */
1252 rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
1255 /* Now the payload cluster */
1256 buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
1257 M_PKTHDR, rxr->mbuf_sz);
1258 if (buf->m_pack == NULL) {
1263 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1264 /* Get the memory mapping */
1265 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1266 buf->pmap, mp, pseg,
1267 &nsegs, BUS_DMA_NOWAIT);
1270 bus_dmamap_sync(rxr->ptag,
1271 buf->pmap, BUS_DMASYNC_PREREAD);
1272 /* Update descriptor */
1273 rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
1274 rxr->base[j].read.hdr_addr = 0;
1278 /* Setup our descriptor indices */
1279 rxr->next_check = 0;
1280 rxr->next_refresh = 0;
1281 rxr->lro_enabled = FALSE;
1284 rxr->discard = FALSE;
1286 wr32(vsi->hw, rxr->tail, que->num_desc - 1);
1289 #if defined(INET6) || defined(INET)
1291 ** Now set up the LRO interface:
1293 if (ifp->if_capenable & IFCAP_LRO) {
1294 int err = tcp_lro_init(lro);
1296 if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
1299 INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
1300 rxr->lro_enabled = TRUE;
1301 lro->ifp = vsi->ifp;
1305 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1314 /*********************************************************************
1316 * Free station receive ring data structures
1318 **********************************************************************/
1320 ixl_free_que_rx(struct ixl_queue *que)
1322 struct rx_ring *rxr = &que->rxr;
1323 struct ixl_rx_buf *buf;
1325 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
1327 /* Cleanup any existing buffers */
1328 if (rxr->buffers != NULL) {
1329 for (int i = 0; i < que->num_desc; i++) {
1330 buf = &rxr->buffers[i];
1331 if (buf->m_head != NULL) {
1332 bus_dmamap_sync(rxr->htag, buf->hmap,
1333 BUS_DMASYNC_POSTREAD);
1334 bus_dmamap_unload(rxr->htag, buf->hmap);
1335 buf->m_head->m_flags |= M_PKTHDR;
1336 m_freem(buf->m_head);
1338 if (buf->m_pack != NULL) {
1339 bus_dmamap_sync(rxr->ptag, buf->pmap,
1340 BUS_DMASYNC_POSTREAD);
1341 bus_dmamap_unload(rxr->ptag, buf->pmap);
1342 buf->m_pack->m_flags |= M_PKTHDR;
1343 m_freem(buf->m_pack);
1347 if (buf->hmap != NULL) {
1348 bus_dmamap_destroy(rxr->htag, buf->hmap);
1351 if (buf->pmap != NULL) {
1352 bus_dmamap_destroy(rxr->ptag, buf->pmap);
1356 if (rxr->buffers != NULL) {
1357 free(rxr->buffers, M_DEVBUF);
1358 rxr->buffers = NULL;
1362 if (rxr->htag != NULL) {
1363 bus_dma_tag_destroy(rxr->htag);
1366 if (rxr->ptag != NULL) {
1367 bus_dma_tag_destroy(rxr->ptag);
1371 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
1375 static __inline void
1376 ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
1379 #if defined(INET6) || defined(INET)
1381 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
1382 * should be computed by hardware. Also it should not have VLAN tag in
1385 if (rxr->lro_enabled &&
1386 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1387 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1388 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1390 * Send to the stack if:
1391 ** - LRO not enabled, or
1392 ** - no LRO resources, or
1393 ** - lro enqueue fails
1395 if (rxr->lro.lro_cnt != 0)
1396 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1401 (*ifp->if_input)(ifp, m);
1406 static __inline void
1407 ixl_rx_discard(struct rx_ring *rxr, int i)
1409 struct ixl_rx_buf *rbuf;
1411 rbuf = &rxr->buffers[i];
1413 if (rbuf->fmp != NULL) {/* Partial chain ? */
1414 rbuf->fmp->m_flags |= M_PKTHDR;
1420 ** With advanced descriptors the writeback
1421 ** clobbers the buffer addrs, so its easier
1422 ** to just free the existing mbufs and take
1423 ** the normal refresh path to get new buffers
1427 m_free(rbuf->m_head);
1428 rbuf->m_head = NULL;
1432 m_free(rbuf->m_pack);
1433 rbuf->m_pack = NULL;
1441 ** i40e_ptype_to_hash: parse the packet type
1442 ** to determine the appropriate hash.
1445 ixl_ptype_to_hash(u8 ptype)
1447 struct i40e_rx_ptype_decoded decoded;
1450 decoded = decode_rx_desc_ptype(ptype);
1451 ex = decoded.outer_frag;
1454 return M_HASHTYPE_OPAQUE;
1456 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
1457 return M_HASHTYPE_OPAQUE;
1459 /* Note: anything that gets to this point is IP */
1460 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
1461 switch (decoded.inner_prot) {
1462 case I40E_RX_PTYPE_INNER_PROT_TCP:
1464 return M_HASHTYPE_RSS_TCP_IPV6_EX;
1466 return M_HASHTYPE_RSS_TCP_IPV6;
1467 case I40E_RX_PTYPE_INNER_PROT_UDP:
1469 return M_HASHTYPE_RSS_UDP_IPV6_EX;
1471 return M_HASHTYPE_RSS_UDP_IPV6;
1474 return M_HASHTYPE_RSS_IPV6_EX;
1476 return M_HASHTYPE_RSS_IPV6;
1479 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
1480 switch (decoded.inner_prot) {
1481 case I40E_RX_PTYPE_INNER_PROT_TCP:
1482 return M_HASHTYPE_RSS_TCP_IPV4;
1483 case I40E_RX_PTYPE_INNER_PROT_UDP:
1485 return M_HASHTYPE_RSS_UDP_IPV4_EX;
1487 return M_HASHTYPE_RSS_UDP_IPV4;
1489 return M_HASHTYPE_RSS_IPV4;
1492 /* We should never get here!! */
1493 return M_HASHTYPE_OPAQUE;
1497 /*********************************************************************
1499 * This routine executes in interrupt context. It replenishes
1500 * the mbufs in the descriptor and sends data which has been
1501 * dma'ed into host memory to upper layer.
1503 * We loop at most count times if count is > 0, or until done if
1506 * Return TRUE for more work, FALSE for all clean.
1507 *********************************************************************/
1509 ixl_rxeof(struct ixl_queue *que, int count)
1511 struct ixl_vsi *vsi = que->vsi;
1512 struct rx_ring *rxr = &que->rxr;
1513 struct ifnet *ifp = vsi->ifp;
1514 #if defined(INET6) || defined(INET)
1515 struct lro_ctrl *lro = &rxr->lro;
1516 struct lro_entry *queued;
1518 int i, nextp, processed = 0;
1519 union i40e_rx_desc *cur;
1520 struct ixl_rx_buf *rbuf, *nbuf;
1526 if (netmap_rx_irq(ifp, que->me, &count)) {
1530 #endif /* DEV_NETMAP */
1532 for (i = rxr->next_check; count != 0;) {
1533 struct mbuf *sendmp, *mh, *mp;
1534 u32 rsc, status, error;
1535 u16 hlen, plen, vtag;
1540 /* Sync the ring. */
1541 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1542 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1544 cur = &rxr->base[i];
1545 qword = le64toh(cur->wb.qword1.status_error_len);
1546 status = (qword & I40E_RXD_QW1_STATUS_MASK)
1547 >> I40E_RXD_QW1_STATUS_SHIFT;
1548 error = (qword & I40E_RXD_QW1_ERROR_MASK)
1549 >> I40E_RXD_QW1_ERROR_SHIFT;
1550 plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
1551 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1552 hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
1553 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1554 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
1555 >> I40E_RXD_QW1_PTYPE_SHIFT;
1557 if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
1561 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1568 cur->wb.qword1.status_error_len = 0;
1569 rbuf = &rxr->buffers[i];
1572 eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
1573 if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
1574 vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
1579 ** Make sure bad packets are discarded,
1580 ** note that only EOP descriptor has valid
1583 if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1585 ixl_rx_discard(rxr, i);
1589 /* Prefetch the next buffer */
1592 if (nextp == que->num_desc)
1594 nbuf = &rxr->buffers[nextp];
1599 ** The header mbuf is ONLY used when header
1600 ** split is enabled, otherwise we get normal
1601 ** behavior, ie, both header and payload
1602 ** are DMA'd into the payload buffer.
1604 ** Rather than using the fmp/lmp global pointers
1605 ** we now keep the head of a packet chain in the
1606 ** buffer struct and pass this along from one
1607 ** descriptor to the next, until we get EOP.
1609 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
1610 if (hlen > IXL_RX_HDR)
1613 mh->m_flags |= M_PKTHDR;
1615 mh->m_pkthdr.len = mh->m_len;
1616 /* Null buf pointer so it is refreshed */
1617 rbuf->m_head = NULL;
1619 ** Check the payload length, this
1620 ** could be zero if its a small
1626 mp->m_flags &= ~M_PKTHDR;
1628 mh->m_pkthdr.len += mp->m_len;
1629 /* Null buf pointer so it is refreshed */
1630 rbuf->m_pack = NULL;
1634 ** Now create the forward
1635 ** chain so when complete
1639 /* stash the chain head */
1641 /* Make forward chain */
1643 mp->m_next = nbuf->m_pack;
1645 mh->m_next = nbuf->m_pack;
1647 /* Singlet, prepare to send */
1650 sendmp->m_pkthdr.ether_vtag = vtag;
1651 sendmp->m_flags |= M_VLANTAG;
1656 ** Either no header split, or a
1657 ** secondary piece of a fragmented
1662 ** See if there is a stored head
1663 ** that determines what we are
1666 rbuf->m_pack = rbuf->fmp = NULL;
1668 if (sendmp != NULL) /* secondary frag */
1669 sendmp->m_pkthdr.len += mp->m_len;
1671 /* first desc of a non-ps chain */
1673 sendmp->m_flags |= M_PKTHDR;
1674 sendmp->m_pkthdr.len = mp->m_len;
1676 sendmp->m_pkthdr.ether_vtag = vtag;
1677 sendmp->m_flags |= M_VLANTAG;
1680 /* Pass the head pointer on */
1684 mp->m_next = nbuf->m_pack;
1688 /* Sending this frame? */
1690 sendmp->m_pkthdr.rcvif = ifp;
1693 rxr->rx_bytes += sendmp->m_pkthdr.len;
1694 /* capture data for dynamic ITR adjustment */
1696 rxr->bytes += sendmp->m_pkthdr.len;
1697 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1698 ixl_rx_checksum(sendmp, status, error, ptype);
1700 sendmp->m_pkthdr.flowid =
1701 le32toh(cur->wb.qword0.hi_dword.rss);
1702 M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
1704 sendmp->m_pkthdr.flowid = que->msix;
1705 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1709 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1712 /* Advance our pointers to the next descriptor. */
1713 if (++i == que->num_desc)
1716 /* Now send to the stack or do LRO */
1717 if (sendmp != NULL) {
1718 rxr->next_check = i;
1719 ixl_rx_input(rxr, ifp, sendmp, ptype);
1720 i = rxr->next_check;
1723 /* Every 8 descriptors we go to refresh mbufs */
1724 if (processed == 8) {
1725 ixl_refresh_mbufs(que, i);
1730 /* Refresh any remaining buf structs */
1731 if (ixl_rx_unrefreshed(que))
1732 ixl_refresh_mbufs(que, i);
1734 rxr->next_check = i;
1736 #if defined(INET6) || defined(INET)
1738 * Flush any outstanding LRO work
1740 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1741 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1742 tcp_lro_flush(lro, queued);
1751 /*********************************************************************
1753 * Verify that the hardware indicated that the checksum is valid.
1754 * Inform the stack about the status of checksum so that stack
1755 * doesn't spend time verifying the checksum.
1757 *********************************************************************/
1759 ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
1761 struct i40e_rx_ptype_decoded decoded;
1763 decoded = decode_rx_desc_ptype(ptype);
1766 if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1767 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
1768 mp->m_pkthdr.csum_flags = 0;
1772 /* IPv6 with extension headers likely have bad csum */
1773 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1774 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1776 (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
1777 mp->m_pkthdr.csum_flags = 0;
1782 /* IP Checksum Good */
1783 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1784 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1786 if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
1787 mp->m_pkthdr.csum_flags |=
1788 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1789 mp->m_pkthdr.csum_data |= htons(0xffff);
1794 #if __FreeBSD_version >= 1100000
1796 ixl_get_counter(if_t ifp, ift_counter cnt)
1798 struct ixl_vsi *vsi;
1800 vsi = if_getsoftc(ifp);
1803 case IFCOUNTER_IPACKETS:
1804 return (vsi->ipackets);
1805 case IFCOUNTER_IERRORS:
1806 return (vsi->ierrors);
1807 case IFCOUNTER_OPACKETS:
1808 return (vsi->opackets);
1809 case IFCOUNTER_OERRORS:
1810 return (vsi->oerrors);
1811 case IFCOUNTER_COLLISIONS:
1812 /* Collisions are by standard impossible in 40G/10G Ethernet */
1814 case IFCOUNTER_IBYTES:
1815 return (vsi->ibytes);
1816 case IFCOUNTER_OBYTES:
1817 return (vsi->obytes);
1818 case IFCOUNTER_IMCASTS:
1819 return (vsi->imcasts);
1820 case IFCOUNTER_OMCASTS:
1821 return (vsi->omcasts);
1822 case IFCOUNTER_IQDROPS:
1823 return (vsi->iqdrops);
1824 case IFCOUNTER_OQDROPS:
1825 return (vsi->oqdrops);
1826 case IFCOUNTER_NOPROTO:
1827 return (vsi->noproto);
1829 return (if_get_counter_default(ifp, cnt));