1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** IXL driver TX/RX Routines:
37 ** This was seperated to allow usage by
38 ** both the BASE and the VF drivers.
42 #include "opt_inet6.h"
46 #include <net/rss_config.h>
49 /* Local Prototypes */
50 static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
51 static void ixl_refresh_mbufs(struct ixl_queue *, int);
52 static int ixl_xmit(struct ixl_queue *, struct mbuf **);
53 static int ixl_tx_setup_offload(struct ixl_queue *,
54 struct mbuf *, u32 *, u32 *);
55 static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
57 static __inline void ixl_rx_discard(struct rx_ring *, int);
58 static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
62 ** Multiqueue Transmit driver
66 ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
68 struct ixl_vsi *vsi = ifp->if_softc;
69 struct ixl_queue *que;
77 ** Which queue to use:
79 ** When doing RSS, map it to the same outbound
80 ** queue as the incoming flow would be mapped to.
81 ** If everything is setup correctly, it should be
82 ** the same bucket that the current CPU we're on is.
84 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
86 if (rss_hash2bucket(m->m_pkthdr.flowid,
87 M_HASHTYPE_GET(m), &bucket_id) == 0) {
88 i = bucket_id % vsi->num_queues;
91 i = m->m_pkthdr.flowid % vsi->num_queues;
93 i = curcpu % vsi->num_queues;
95 ** This may not be perfect, but until something
96 ** better comes along it will keep from scheduling
99 if (((1 << i) & vsi->active_queues) == 0)
100 i = ffsl(vsi->active_queues);
102 que = &vsi->queues[i];
105 err = drbr_enqueue(ifp, txr->br, m);
108 if (IXL_TX_TRYLOCK(txr)) {
109 ixl_mq_start_locked(ifp, txr);
112 taskqueue_enqueue(que->tq, &que->tx_task);
118 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
120 struct ixl_queue *que = txr->que;
121 struct ixl_vsi *vsi = que->vsi;
126 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
127 vsi->link_active == 0)
130 /* Process the transmit queue */
131 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
132 if ((err = ixl_xmit(que, &next)) != 0) {
134 drbr_advance(ifp, txr->br);
136 drbr_putback(ifp, txr->br, next);
139 drbr_advance(ifp, txr->br);
140 /* Send a copy of the frame to the BPF listener */
141 ETHER_BPF_MTAP(ifp, next);
142 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
146 if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
153 * Called from a taskqueue to drain queued transmit packets.
156 ixl_deferred_mq_start(void *arg, int pending)
158 struct ixl_queue *que = arg;
159 struct tx_ring *txr = &que->txr;
160 struct ixl_vsi *vsi = que->vsi;
161 struct ifnet *ifp = vsi->ifp;
164 if (!drbr_empty(ifp, txr->br))
165 ixl_mq_start_locked(ifp, txr);
170 ** Flush all queue ring buffers
173 ixl_qflush(struct ifnet *ifp)
175 struct ixl_vsi *vsi = ifp->if_softc;
177 for (int i = 0; i < vsi->num_queues; i++) {
178 struct ixl_queue *que = &vsi->queues[i];
179 struct tx_ring *txr = &que->txr;
182 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
190 ** Find mbuf chains passed to the driver
191 ** that are 'sparse', using more than 8
192 ** mbufs to deliver an mss-size chunk of data
195 ixl_tso_detect_sparse(struct mbuf *mp)
201 mss = mp->m_pkthdr.tso_segsz;
202 for (m = mp->m_next; m != NULL; m = m->m_next) {
207 if (m->m_next == NULL)
210 if (num > IXL_SPARSE_CHAIN)
217 /*********************************************************************
219 * This routine maps the mbufs to tx descriptors, allowing the
220 * TX engine to transmit the packets.
221 * - return 0 on success, positive on failure
223 **********************************************************************/
224 #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
227 ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
229 struct ixl_vsi *vsi = que->vsi;
230 struct i40e_hw *hw = vsi->hw;
231 struct tx_ring *txr = &que->txr;
232 struct ixl_tx_buf *buf;
233 struct i40e_tx_desc *txd = NULL;
234 struct mbuf *m_head, *m;
235 int i, j, error, nsegs, maxsegs;
241 bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
248 * Important to capture the first descriptor
249 * used because it will contain the index of
250 * the one we tell the hardware to report back
252 first = txr->next_avail;
253 buf = &txr->buffers[first];
256 maxsegs = IXL_MAX_TX_SEGS;
258 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
259 /* Use larger mapping for TSO */
261 maxsegs = IXL_MAX_TSO_SEGS;
262 if (ixl_tso_detect_sparse(m_head)) {
263 m = m_defrag(m_head, M_NOWAIT);
274 * Map the packet for DMA.
276 error = bus_dmamap_load_mbuf_sg(tag, map,
277 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
279 if (error == EFBIG) {
282 m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
284 que->mbuf_defrag_failed++;
292 error = bus_dmamap_load_mbuf_sg(tag, map,
293 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
295 if (error == ENOMEM) {
298 } else if (error != 0) {
304 } else if (error == ENOMEM) {
307 } else if (error != 0) {
314 /* Make certain there are enough descriptors */
315 if (nsegs > txr->avail - 2) {
322 /* Set up the TSO/CSUM offload */
323 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
324 error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
329 cmd |= I40E_TX_DESC_CMD_ICRC;
330 /* Grab the VLAN tag */
331 if (m_head->m_flags & M_VLANTAG) {
332 cmd |= I40E_TX_DESC_CMD_IL2TAG1;
333 vtag = htole16(m_head->m_pkthdr.ether_vtag);
337 for (j = 0; j < nsegs; j++) {
340 buf = &txr->buffers[i];
341 buf->tag = tag; /* Keep track of the type tag */
343 seglen = segs[j].ds_len;
345 txd->buffer_addr = htole64(segs[j].ds_addr);
346 txd->cmd_type_offset_bsz =
347 htole64(I40E_TX_DESC_DTYPE_DATA
348 | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
349 | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
350 | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
351 | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
353 last = i; /* descriptor that will get completion IRQ */
355 if (++i == que->num_desc)
361 /* Set the last descriptor for report */
362 txd->cmd_type_offset_bsz |=
363 htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
367 buf->m_head = m_head;
368 /* Swap the dma map between the first and last descriptor */
369 txr->buffers[first].map = buf->map;
371 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
373 /* Set the index of the descriptor that will be marked done */
374 buf = &txr->buffers[first];
375 buf->eop_index = last;
377 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
380 * Advance the Transmit Descriptor Tail (Tdt), this tells the
381 * hardware that this frame is available to transmit.
383 ++txr->total_packets;
384 wr32(hw, txr->tail, i);
387 /* Mark outstanding work */
393 bus_dmamap_unload(tag, buf->map);
398 /*********************************************************************
400 * Allocate memory for tx_buffer structures. The tx_buffer stores all
401 * the information needed to transmit a packet on the wire. This is
402 * called only once at attach, setup is done every reset.
404 **********************************************************************/
406 ixl_allocate_tx_data(struct ixl_queue *que)
408 struct tx_ring *txr = &que->txr;
409 struct ixl_vsi *vsi = que->vsi;
410 device_t dev = vsi->dev;
411 struct ixl_tx_buf *buf;
415 * Setup DMA descriptor areas.
417 if ((error = bus_dma_tag_create(NULL, /* parent */
418 1, 0, /* alignment, bounds */
419 BUS_SPACE_MAXADDR, /* lowaddr */
420 BUS_SPACE_MAXADDR, /* highaddr */
421 NULL, NULL, /* filter, filterarg */
422 IXL_TSO_SIZE, /* maxsize */
423 IXL_MAX_TX_SEGS, /* nsegments */
424 PAGE_SIZE, /* maxsegsize */
427 NULL, /* lockfuncarg */
429 device_printf(dev,"Unable to allocate TX DMA tag\n");
433 /* Make a special tag for TSO */
434 if ((error = bus_dma_tag_create(NULL, /* parent */
435 1, 0, /* alignment, bounds */
436 BUS_SPACE_MAXADDR, /* lowaddr */
437 BUS_SPACE_MAXADDR, /* highaddr */
438 NULL, NULL, /* filter, filterarg */
439 IXL_TSO_SIZE, /* maxsize */
440 IXL_MAX_TSO_SEGS, /* nsegments */
441 PAGE_SIZE, /* maxsegsize */
444 NULL, /* lockfuncarg */
446 device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
451 (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
452 que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
453 device_printf(dev, "Unable to allocate tx_buffer memory\n");
458 /* Create the descriptor buffer default dma maps */
460 for (int i = 0; i < que->num_desc; i++, buf++) {
461 buf->tag = txr->tx_tag;
462 error = bus_dmamap_create(buf->tag, 0, &buf->map);
464 device_printf(dev, "Unable to create TX DMA map\n");
473 /*********************************************************************
475 * (Re)Initialize a queue transmit ring.
476 * - called by init, it clears the descriptor ring,
477 * and frees any stale mbufs
479 **********************************************************************/
481 ixl_init_tx_ring(struct ixl_queue *que)
483 struct tx_ring *txr = &que->txr;
484 struct ixl_tx_buf *buf;
486 /* Clear the old ring contents */
488 bzero((void *)txr->base,
489 (sizeof(struct i40e_tx_desc)) * que->num_desc);
493 txr->next_to_clean = 0;
496 /* Initialize flow director */
497 txr->atr_rate = ixl_atr_rate;
501 /* Free any existing tx mbufs. */
503 for (int i = 0; i < que->num_desc; i++, buf++) {
504 if (buf->m_head != NULL) {
505 bus_dmamap_sync(buf->tag, buf->map,
506 BUS_DMASYNC_POSTWRITE);
507 bus_dmamap_unload(buf->tag, buf->map);
508 m_freem(buf->m_head);
511 /* Clear the EOP index */
515 /* Set number of descriptors available */
516 txr->avail = que->num_desc;
518 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
519 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
524 /*********************************************************************
526 * Free transmit ring related data structures.
528 **********************************************************************/
530 ixl_free_que_tx(struct ixl_queue *que)
532 struct tx_ring *txr = &que->txr;
533 struct ixl_tx_buf *buf;
535 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
537 for (int i = 0; i < que->num_desc; i++) {
538 buf = &txr->buffers[i];
539 if (buf->m_head != NULL) {
540 bus_dmamap_sync(buf->tag, buf->map,
541 BUS_DMASYNC_POSTWRITE);
542 bus_dmamap_unload(buf->tag,
544 m_freem(buf->m_head);
546 if (buf->map != NULL) {
547 bus_dmamap_destroy(buf->tag,
551 } else if (buf->map != NULL) {
552 bus_dmamap_unload(buf->tag,
554 bus_dmamap_destroy(buf->tag,
560 buf_ring_free(txr->br, M_DEVBUF);
561 if (txr->buffers != NULL) {
562 free(txr->buffers, M_DEVBUF);
565 if (txr->tx_tag != NULL) {
566 bus_dma_tag_destroy(txr->tx_tag);
569 if (txr->tso_tag != NULL) {
570 bus_dma_tag_destroy(txr->tso_tag);
574 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
578 /*********************************************************************
580 * Setup descriptor for hw offloads
582 **********************************************************************/
585 ixl_tx_setup_offload(struct ixl_queue *que,
586 struct mbuf *mp, u32 *cmd, u32 *off)
588 struct ether_vlan_header *eh;
590 struct ip *ip = NULL;
592 struct tcphdr *th = NULL;
596 int elen, ip_hlen = 0, tcp_hlen;
602 /* Set up the TSO context descriptor if required */
603 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
604 tso = ixl_tso_setup(que, mp);
612 * Determine where frame payload starts.
613 * Jump over vlan headers if already present,
614 * helpful for QinQ too.
616 eh = mtod(mp, struct ether_vlan_header *);
617 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
618 etype = ntohs(eh->evl_proto);
619 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
621 etype = ntohs(eh->evl_encap_proto);
622 elen = ETHER_HDR_LEN;
628 ip = (struct ip *)(mp->m_data + elen);
629 ip_hlen = ip->ip_hl << 2;
631 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
632 /* The IP checksum must be recalculated with TSO */
634 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
636 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
641 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
642 ip_hlen = sizeof(struct ip6_hdr);
643 ipproto = ip6->ip6_nxt;
644 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
645 *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
652 *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
653 *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
657 tcp_hlen = th->th_off << 2;
658 if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
659 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
660 *off |= (tcp_hlen >> 2) <<
661 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
664 ixl_atr(que, th, etype);
668 if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
669 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
670 *off |= (sizeof(struct udphdr) >> 2) <<
671 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
676 if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
677 *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
678 *off |= (sizeof(struct sctphdr) >> 2) <<
679 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
690 /**********************************************************************
692 * Setup context for hardware segmentation offload (TSO)
694 **********************************************************************/
696 ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
698 struct tx_ring *txr = &que->txr;
699 struct i40e_tx_context_desc *TXD;
700 struct ixl_tx_buf *buf;
701 u32 cmd, mss, type, tsolen;
703 int idx, elen, ip_hlen, tcp_hlen;
704 struct ether_vlan_header *eh;
711 #if defined(INET6) || defined(INET)
714 u64 type_cmd_tso_mss;
717 * Determine where frame payload starts.
718 * Jump over vlan headers if already present
720 eh = mtod(mp, struct ether_vlan_header *);
721 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
722 elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
723 etype = eh->evl_proto;
725 elen = ETHER_HDR_LEN;
726 etype = eh->evl_encap_proto;
729 switch (ntohs(etype)) {
732 ip6 = (struct ip6_hdr *)(mp->m_data + elen);
733 if (ip6->ip6_nxt != IPPROTO_TCP)
735 ip_hlen = sizeof(struct ip6_hdr);
736 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
737 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
738 tcp_hlen = th->th_off << 2;
743 ip = (struct ip *)(mp->m_data + elen);
744 if (ip->ip_p != IPPROTO_TCP)
747 ip_hlen = ip->ip_hl << 2;
748 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
749 th->th_sum = in_pseudo(ip->ip_src.s_addr,
750 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
751 tcp_hlen = th->th_off << 2;
755 printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
756 __func__, ntohs(etype));
760 /* Ensure we have at least the IP+TCP header in the first mbuf. */
761 if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
764 idx = txr->next_avail;
765 buf = &txr->buffers[idx];
766 TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
767 tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
769 type = I40E_TX_DESC_DTYPE_CONTEXT;
770 cmd = I40E_TX_CTX_DESC_TSO;
771 mss = mp->m_pkthdr.tso_segsz;
773 type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
774 ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
775 ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
776 ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
777 TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
779 TXD->tunneling_params = htole32(0);
783 if (++idx == que->num_desc)
787 txr->next_avail = idx;
793 ** ixl_get_tx_head - Retrieve the value from the
794 ** location the HW records its HEAD index
797 ixl_get_tx_head(struct ixl_queue *que)
799 struct tx_ring *txr = &que->txr;
800 void *head = &txr->base[que->num_desc];
801 return LE32_TO_CPU(*(volatile __le32 *)head);
804 /**********************************************************************
806 * Examine each tx_buffer in the used queue. If the hardware is done
807 * processing the packet then free associated resources. The
808 * tx_buffer is put back on the free queue.
810 **********************************************************************/
812 ixl_txeof(struct ixl_queue *que)
814 struct tx_ring *txr = &que->txr;
815 u32 first, last, head, done, processed;
816 struct ixl_tx_buf *buf;
817 struct i40e_tx_desc *tx_desc, *eop_desc;
820 mtx_assert(&txr->mtx, MA_OWNED);
823 /* These are not the descriptors you seek, move along :) */
824 if (txr->avail == que->num_desc) {
830 first = txr->next_to_clean;
831 buf = &txr->buffers[first];
832 tx_desc = (struct i40e_tx_desc *)&txr->base[first];
833 last = buf->eop_index;
836 eop_desc = (struct i40e_tx_desc *)&txr->base[last];
838 /* Get the Head WB value */
839 head = ixl_get_tx_head(que);
842 ** Get the index of the first descriptor
843 ** BEYOND the EOP and call that 'done'.
844 ** I do this so the comparison in the
845 ** inner while loop below can be simple
847 if (++last == que->num_desc) last = 0;
850 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
851 BUS_DMASYNC_POSTREAD);
853 ** The HEAD index of the ring is written in a
854 ** defined location, this rather than a done bit
855 ** is what is used to keep track of what must be
858 while (first != head) {
859 /* We clean the range of the packet */
860 while (first != done) {
865 txr->bytes += /* for ITR adjustment */
866 buf->m_head->m_pkthdr.len;
867 txr->tx_bytes += /* for TX stats */
868 buf->m_head->m_pkthdr.len;
869 bus_dmamap_sync(buf->tag,
871 BUS_DMASYNC_POSTWRITE);
872 bus_dmamap_unload(buf->tag,
874 m_freem(buf->m_head);
880 if (++first == que->num_desc)
883 buf = &txr->buffers[first];
884 tx_desc = &txr->base[first];
887 /* See if there is more work now */
888 last = buf->eop_index;
890 eop_desc = &txr->base[last];
891 /* Get next done point */
892 if (++last == que->num_desc) last = 0;
897 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
898 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
900 txr->next_to_clean = first;
904 ** Hang detection, we know there's
905 ** work outstanding or the first return
906 ** would have been taken, so indicate an
907 ** unsuccessful pass, in local_timer if
908 ** the value is too great the queue will
909 ** be considered hung. If anything has been
910 ** cleaned then reset the state.
912 if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
916 que->busy = 1; /* Note this turns off HUNG */
919 * If there are no pending descriptors, clear the timeout.
921 if (txr->avail == que->num_desc) {
929 /*********************************************************************
931 * Refresh mbuf buffers for RX descriptor rings
932 * - now keeps its own state so discards due to resource
933 * exhaustion are unnecessary, if an mbuf cannot be obtained
934 * it just returns, keeping its placeholder, thus it can simply
935 * be recalled to try again.
937 **********************************************************************/
939 ixl_refresh_mbufs(struct ixl_queue *que, int limit)
941 struct ixl_vsi *vsi = que->vsi;
942 struct rx_ring *rxr = &que->rxr;
943 bus_dma_segment_t hseg[1];
944 bus_dma_segment_t pseg[1];
945 struct ixl_rx_buf *buf;
946 struct mbuf *mh, *mp;
947 int i, j, nsegs, error;
948 bool refreshed = FALSE;
950 i = j = rxr->next_refresh;
951 /* Control the loop with one beyond */
952 if (++j == que->num_desc)
956 buf = &rxr->buffers[i];
957 if (rxr->hdr_split == FALSE)
960 if (buf->m_head == NULL) {
961 mh = m_gethdr(M_NOWAIT, MT_DATA);
967 mh->m_pkthdr.len = mh->m_len = MHLEN;
969 mh->m_flags |= M_PKTHDR;
970 /* Get the memory mapping */
971 error = bus_dmamap_load_mbuf_sg(rxr->htag,
972 buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
974 printf("Refresh mbufs: hdr dmamap load"
975 " failure - %d\n", error);
981 bus_dmamap_sync(rxr->htag, buf->hmap,
982 BUS_DMASYNC_PREREAD);
983 rxr->base[i].read.hdr_addr =
984 htole64(hseg[0].ds_addr);
987 if (buf->m_pack == NULL) {
988 mp = m_getjcl(M_NOWAIT, MT_DATA,
989 M_PKTHDR, rxr->mbuf_sz);
995 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
996 /* Get the memory mapping */
997 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
998 buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
1000 printf("Refresh mbufs: payload dmamap load"
1001 " failure - %d\n", error);
1007 bus_dmamap_sync(rxr->ptag, buf->pmap,
1008 BUS_DMASYNC_PREREAD);
1009 rxr->base[i].read.pkt_addr =
1010 htole64(pseg[0].ds_addr);
1011 /* Used only when doing header split */
1012 rxr->base[i].read.hdr_addr = 0;
1015 /* Next is precalculated */
1017 rxr->next_refresh = i;
1018 if (++j == que->num_desc)
1022 if (refreshed) /* Update hardware tail index */
1023 wr32(vsi->hw, rxr->tail, rxr->next_refresh);
1028 /*********************************************************************
1030 * Allocate memory for rx_buffer structures. Since we use one
1031 * rx_buffer per descriptor, the maximum number of rx_buffer's
1032 * that we'll need is equal to the number of receive descriptors
1033 * that we've defined.
1035 **********************************************************************/
1037 ixl_allocate_rx_data(struct ixl_queue *que)
1039 struct rx_ring *rxr = &que->rxr;
1040 struct ixl_vsi *vsi = que->vsi;
1041 device_t dev = vsi->dev;
1042 struct ixl_rx_buf *buf;
1043 int i, bsize, error;
1045 bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
1046 if (!(rxr->buffers =
1047 (struct ixl_rx_buf *) malloc(bsize,
1048 M_DEVBUF, M_NOWAIT | M_ZERO))) {
1049 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1054 if ((error = bus_dma_tag_create(NULL, /* parent */
1055 1, 0, /* alignment, bounds */
1056 BUS_SPACE_MAXADDR, /* lowaddr */
1057 BUS_SPACE_MAXADDR, /* highaddr */
1058 NULL, NULL, /* filter, filterarg */
1059 MSIZE, /* maxsize */
1061 MSIZE, /* maxsegsize */
1063 NULL, /* lockfunc */
1064 NULL, /* lockfuncarg */
1066 device_printf(dev, "Unable to create RX DMA htag\n");
1070 if ((error = bus_dma_tag_create(NULL, /* parent */
1071 1, 0, /* alignment, bounds */
1072 BUS_SPACE_MAXADDR, /* lowaddr */
1073 BUS_SPACE_MAXADDR, /* highaddr */
1074 NULL, NULL, /* filter, filterarg */
1075 MJUM16BYTES, /* maxsize */
1077 MJUM16BYTES, /* maxsegsize */
1079 NULL, /* lockfunc */
1080 NULL, /* lockfuncarg */
1082 device_printf(dev, "Unable to create RX DMA ptag\n");
1086 for (i = 0; i < que->num_desc; i++) {
1087 buf = &rxr->buffers[i];
1088 error = bus_dmamap_create(rxr->htag,
1089 BUS_DMA_NOWAIT, &buf->hmap);
1091 device_printf(dev, "Unable to create RX head map\n");
1094 error = bus_dmamap_create(rxr->ptag,
1095 BUS_DMA_NOWAIT, &buf->pmap);
1097 device_printf(dev, "Unable to create RX pkt map\n");
1106 /*********************************************************************
1108 * (Re)Initialize the queue receive ring and its buffers.
1110 **********************************************************************/
1112 ixl_init_rx_ring(struct ixl_queue *que)
1114 struct rx_ring *rxr = &que->rxr;
1115 struct ixl_vsi *vsi = que->vsi;
1116 #if defined(INET6) || defined(INET)
1117 struct ifnet *ifp = vsi->ifp;
1118 struct lro_ctrl *lro = &rxr->lro;
1120 struct ixl_rx_buf *buf;
1121 bus_dma_segment_t pseg[1], hseg[1];
1122 int rsize, nsegs, error = 0;
1125 /* Clear the ring contents */
1126 rsize = roundup2(que->num_desc *
1127 sizeof(union i40e_rx_desc), DBA_ALIGN);
1128 bzero((void *)rxr->base, rsize);
1129 /* Cleanup any existing buffers */
1130 for (int i = 0; i < que->num_desc; i++) {
1131 buf = &rxr->buffers[i];
1132 if (buf->m_head != NULL) {
1133 bus_dmamap_sync(rxr->htag, buf->hmap,
1134 BUS_DMASYNC_POSTREAD);
1135 bus_dmamap_unload(rxr->htag, buf->hmap);
1136 buf->m_head->m_flags |= M_PKTHDR;
1137 m_freem(buf->m_head);
1139 if (buf->m_pack != NULL) {
1140 bus_dmamap_sync(rxr->ptag, buf->pmap,
1141 BUS_DMASYNC_POSTREAD);
1142 bus_dmamap_unload(rxr->ptag, buf->pmap);
1143 buf->m_pack->m_flags |= M_PKTHDR;
1144 m_freem(buf->m_pack);
1150 /* header split is off */
1151 rxr->hdr_split = FALSE;
1153 /* Now replenish the mbufs */
1154 for (int j = 0; j != que->num_desc; ++j) {
1155 struct mbuf *mh, *mp;
1157 buf = &rxr->buffers[j];
1159 ** Don't allocate mbufs if not
1160 ** doing header split, its wasteful
1162 if (rxr->hdr_split == FALSE)
1165 /* First the header */
1166 buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
1167 if (buf->m_head == NULL) {
1171 m_adj(buf->m_head, ETHER_ALIGN);
1173 mh->m_len = mh->m_pkthdr.len = MHLEN;
1174 mh->m_flags |= M_PKTHDR;
1175 /* Get the memory mapping */
1176 error = bus_dmamap_load_mbuf_sg(rxr->htag,
1177 buf->hmap, buf->m_head, hseg,
1178 &nsegs, BUS_DMA_NOWAIT);
1179 if (error != 0) /* Nothing elegant to do here */
1181 bus_dmamap_sync(rxr->htag,
1182 buf->hmap, BUS_DMASYNC_PREREAD);
1183 /* Update descriptor */
1184 rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
1187 /* Now the payload cluster */
1188 buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
1189 M_PKTHDR, rxr->mbuf_sz);
1190 if (buf->m_pack == NULL) {
1195 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1196 /* Get the memory mapping */
1197 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1198 buf->pmap, mp, pseg,
1199 &nsegs, BUS_DMA_NOWAIT);
1202 bus_dmamap_sync(rxr->ptag,
1203 buf->pmap, BUS_DMASYNC_PREREAD);
1204 /* Update descriptor */
1205 rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
1206 rxr->base[j].read.hdr_addr = 0;
1210 /* Setup our descriptor indices */
1211 rxr->next_check = 0;
1212 rxr->next_refresh = 0;
1213 rxr->lro_enabled = FALSE;
1216 rxr->discard = FALSE;
1218 wr32(vsi->hw, rxr->tail, que->num_desc - 1);
1221 #if defined(INET6) || defined(INET)
1223 ** Now set up the LRO interface:
1225 if (ifp->if_capenable & IFCAP_LRO) {
1226 int err = tcp_lro_init(lro);
1228 if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
1231 INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
1232 rxr->lro_enabled = TRUE;
1233 lro->ifp = vsi->ifp;
1237 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1238 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1246 /*********************************************************************
1248 * Free station receive ring data structures
1250 **********************************************************************/
1252 ixl_free_que_rx(struct ixl_queue *que)
1254 struct rx_ring *rxr = &que->rxr;
1255 struct ixl_rx_buf *buf;
1257 INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
1259 /* Cleanup any existing buffers */
1260 if (rxr->buffers != NULL) {
1261 for (int i = 0; i < que->num_desc; i++) {
1262 buf = &rxr->buffers[i];
1263 if (buf->m_head != NULL) {
1264 bus_dmamap_sync(rxr->htag, buf->hmap,
1265 BUS_DMASYNC_POSTREAD);
1266 bus_dmamap_unload(rxr->htag, buf->hmap);
1267 buf->m_head->m_flags |= M_PKTHDR;
1268 m_freem(buf->m_head);
1270 if (buf->m_pack != NULL) {
1271 bus_dmamap_sync(rxr->ptag, buf->pmap,
1272 BUS_DMASYNC_POSTREAD);
1273 bus_dmamap_unload(rxr->ptag, buf->pmap);
1274 buf->m_pack->m_flags |= M_PKTHDR;
1275 m_freem(buf->m_pack);
1279 if (buf->hmap != NULL) {
1280 bus_dmamap_destroy(rxr->htag, buf->hmap);
1283 if (buf->pmap != NULL) {
1284 bus_dmamap_destroy(rxr->ptag, buf->pmap);
1288 if (rxr->buffers != NULL) {
1289 free(rxr->buffers, M_DEVBUF);
1290 rxr->buffers = NULL;
1294 if (rxr->htag != NULL) {
1295 bus_dma_tag_destroy(rxr->htag);
1298 if (rxr->ptag != NULL) {
1299 bus_dma_tag_destroy(rxr->ptag);
1303 INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
1307 static __inline void
1308 ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
1311 #if defined(INET6) || defined(INET)
1313 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
1314 * should be computed by hardware. Also it should not have VLAN tag in
1317 if (rxr->lro_enabled &&
1318 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1319 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1320 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1322 * Send to the stack if:
1323 ** - LRO not enabled, or
1324 ** - no LRO resources, or
1325 ** - lro enqueue fails
1327 if (rxr->lro.lro_cnt != 0)
1328 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1333 (*ifp->if_input)(ifp, m);
1338 static __inline void
1339 ixl_rx_discard(struct rx_ring *rxr, int i)
1341 struct ixl_rx_buf *rbuf;
1343 rbuf = &rxr->buffers[i];
1345 if (rbuf->fmp != NULL) {/* Partial chain ? */
1346 rbuf->fmp->m_flags |= M_PKTHDR;
1352 ** With advanced descriptors the writeback
1353 ** clobbers the buffer addrs, so its easier
1354 ** to just free the existing mbufs and take
1355 ** the normal refresh path to get new buffers
1359 m_free(rbuf->m_head);
1360 rbuf->m_head = NULL;
1364 m_free(rbuf->m_pack);
1365 rbuf->m_pack = NULL;
1373 ** ixl_ptype_to_hash: parse the packet type
1374 ** to determine the appropriate hash.
1377 ixl_ptype_to_hash(u8 ptype)
1379 struct i40e_rx_ptype_decoded decoded;
1382 decoded = decode_rx_desc_ptype(ptype);
1383 ex = decoded.outer_frag;
1386 return M_HASHTYPE_OPAQUE;
1388 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
1389 return M_HASHTYPE_OPAQUE;
1391 /* Note: anything that gets to this point is IP */
1392 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
1393 switch (decoded.inner_prot) {
1394 case I40E_RX_PTYPE_INNER_PROT_TCP:
1396 return M_HASHTYPE_RSS_TCP_IPV6_EX;
1398 return M_HASHTYPE_RSS_TCP_IPV6;
1399 case I40E_RX_PTYPE_INNER_PROT_UDP:
1401 return M_HASHTYPE_RSS_UDP_IPV6_EX;
1403 return M_HASHTYPE_RSS_UDP_IPV6;
1406 return M_HASHTYPE_RSS_IPV6_EX;
1408 return M_HASHTYPE_RSS_IPV6;
1411 if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
1412 switch (decoded.inner_prot) {
1413 case I40E_RX_PTYPE_INNER_PROT_TCP:
1414 return M_HASHTYPE_RSS_TCP_IPV4;
1415 case I40E_RX_PTYPE_INNER_PROT_UDP:
1417 return M_HASHTYPE_RSS_UDP_IPV4_EX;
1419 return M_HASHTYPE_RSS_UDP_IPV4;
1421 return M_HASHTYPE_RSS_IPV4;
1424 /* We should never get here!! */
1425 return M_HASHTYPE_OPAQUE;
1429 /*********************************************************************
1431 * This routine executes in interrupt context. It replenishes
1432 * the mbufs in the descriptor and sends data which has been
1433 * dma'ed into host memory to upper layer.
1435 * We loop at most count times if count is > 0, or until done if
1438 * Return TRUE for more work, FALSE for all clean.
1439 *********************************************************************/
1441 ixl_rxeof(struct ixl_queue *que, int count)
1443 struct ixl_vsi *vsi = que->vsi;
1444 struct rx_ring *rxr = &que->rxr;
1445 struct ifnet *ifp = vsi->ifp;
1446 #if defined(INET6) || defined(INET)
1447 struct lro_ctrl *lro = &rxr->lro;
1448 struct lro_entry *queued;
1450 int i, nextp, processed = 0;
1451 union i40e_rx_desc *cur;
1452 struct ixl_rx_buf *rbuf, *nbuf;
1458 for (i = rxr->next_check; count != 0;) {
1459 struct mbuf *sendmp, *mh, *mp;
1460 u32 rsc, status, error;
1461 u16 hlen, plen, vtag;
1466 /* Sync the ring. */
1467 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1468 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1470 cur = &rxr->base[i];
1471 qword = le64toh(cur->wb.qword1.status_error_len);
1472 status = (qword & I40E_RXD_QW1_STATUS_MASK)
1473 >> I40E_RXD_QW1_STATUS_SHIFT;
1474 error = (qword & I40E_RXD_QW1_ERROR_MASK)
1475 >> I40E_RXD_QW1_ERROR_SHIFT;
1476 plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
1477 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1478 hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
1479 >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1480 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
1481 >> I40E_RXD_QW1_PTYPE_SHIFT;
1483 if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
1487 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1494 cur->wb.qword1.status_error_len = 0;
1495 rbuf = &rxr->buffers[i];
1498 eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
1499 if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
1500 vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
1505 ** Make sure bad packets are discarded,
1506 ** note that only EOP descriptor has valid
1509 if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1511 ixl_rx_discard(rxr, i);
1515 /* Prefetch the next buffer */
1518 if (nextp == que->num_desc)
1520 nbuf = &rxr->buffers[nextp];
1525 ** The header mbuf is ONLY used when header
1526 ** split is enabled, otherwise we get normal
1527 ** behavior, ie, both header and payload
1528 ** are DMA'd into the payload buffer.
1530 ** Rather than using the fmp/lmp global pointers
1531 ** we now keep the head of a packet chain in the
1532 ** buffer struct and pass this along from one
1533 ** descriptor to the next, until we get EOP.
1535 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
1536 if (hlen > IXL_RX_HDR)
1539 mh->m_flags |= M_PKTHDR;
1541 mh->m_pkthdr.len = mh->m_len;
1542 /* Null buf pointer so it is refreshed */
1543 rbuf->m_head = NULL;
1545 ** Check the payload length, this
1546 ** could be zero if its a small
1552 mp->m_flags &= ~M_PKTHDR;
1554 mh->m_pkthdr.len += mp->m_len;
1555 /* Null buf pointer so it is refreshed */
1556 rbuf->m_pack = NULL;
1560 ** Now create the forward
1561 ** chain so when complete
1565 /* stash the chain head */
1567 /* Make forward chain */
1569 mp->m_next = nbuf->m_pack;
1571 mh->m_next = nbuf->m_pack;
1573 /* Singlet, prepare to send */
1576 sendmp->m_pkthdr.ether_vtag = vtag;
1577 sendmp->m_flags |= M_VLANTAG;
1582 ** Either no header split, or a
1583 ** secondary piece of a fragmented
1588 ** See if there is a stored head
1589 ** that determines what we are
1592 rbuf->m_pack = rbuf->fmp = NULL;
1594 if (sendmp != NULL) /* secondary frag */
1595 sendmp->m_pkthdr.len += mp->m_len;
1597 /* first desc of a non-ps chain */
1599 sendmp->m_flags |= M_PKTHDR;
1600 sendmp->m_pkthdr.len = mp->m_len;
1602 sendmp->m_pkthdr.ether_vtag = vtag;
1603 sendmp->m_flags |= M_VLANTAG;
1606 /* Pass the head pointer on */
1610 mp->m_next = nbuf->m_pack;
1614 /* Sending this frame? */
1616 sendmp->m_pkthdr.rcvif = ifp;
1619 rxr->rx_bytes += sendmp->m_pkthdr.len;
1620 /* capture data for dynamic ITR adjustment */
1622 rxr->bytes += sendmp->m_pkthdr.len;
1623 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1624 ixl_rx_checksum(sendmp, status, error, ptype);
1626 sendmp->m_pkthdr.flowid =
1627 le32toh(cur->wb.qword0.hi_dword.rss);
1628 M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
1630 sendmp->m_pkthdr.flowid = que->msix;
1631 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1635 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
1636 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1638 /* Advance our pointers to the next descriptor. */
1639 if (++i == que->num_desc)
1642 /* Now send to the stack or do LRO */
1643 if (sendmp != NULL) {
1644 rxr->next_check = i;
1645 ixl_rx_input(rxr, ifp, sendmp, ptype);
1646 i = rxr->next_check;
1649 /* Every 8 descriptors we go to refresh mbufs */
1650 if (processed == 8) {
1651 ixl_refresh_mbufs(que, i);
1656 /* Refresh any remaining buf structs */
1657 if (ixl_rx_unrefreshed(que))
1658 ixl_refresh_mbufs(que, i);
1660 rxr->next_check = i;
1662 #if defined(INET6) || defined(INET)
1664 * Flush any outstanding LRO work
1666 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1667 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1668 tcp_lro_flush(lro, queued);
1677 /*********************************************************************
1679 * Verify that the hardware indicated that the checksum is valid.
1680 * Inform the stack about the status of checksum so that stack
1681 * doesn't spend time verifying the checksum.
1683 *********************************************************************/
1685 ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
1687 struct i40e_rx_ptype_decoded decoded;
1689 decoded = decode_rx_desc_ptype(ptype);
1692 if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1693 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
1694 mp->m_pkthdr.csum_flags = 0;
1698 /* IPv6 with extension headers likely have bad csum */
1699 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1700 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1702 (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
1703 mp->m_pkthdr.csum_flags = 0;
1708 /* IP Checksum Good */
1709 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1710 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1712 if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
1713 mp->m_pkthdr.csum_flags |=
1714 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1715 mp->m_pkthdr.csum_data |= htons(0xffff);
1720 #if __FreeBSD_version >= 1100000
1722 ixl_get_counter(if_t ifp, ift_counter cnt)
1724 struct ixl_vsi *vsi;
1726 vsi = if_getsoftc(ifp);
1729 case IFCOUNTER_IPACKETS:
1730 return (vsi->ipackets);
1731 case IFCOUNTER_IERRORS:
1732 return (vsi->ierrors);
1733 case IFCOUNTER_OPACKETS:
1734 return (vsi->opackets);
1735 case IFCOUNTER_OERRORS:
1736 return (vsi->oerrors);
1737 case IFCOUNTER_COLLISIONS:
1738 /* Collisions are by standard impossible in 40G/10G Ethernet */
1740 case IFCOUNTER_IBYTES:
1741 return (vsi->ibytes);
1742 case IFCOUNTER_OBYTES:
1743 return (vsi->obytes);
1744 case IFCOUNTER_IMCASTS:
1745 return (vsi->imcasts);
1746 case IFCOUNTER_OMCASTS:
1747 return (vsi->omcasts);
1748 case IFCOUNTER_IQDROPS:
1749 return (vsi->iqdrops);
1750 case IFCOUNTER_OQDROPS:
1751 return (vsi->oqdrops);
1752 case IFCOUNTER_NOPROTO:
1753 return (vsi->noproto);
1755 return (if_get_counter_default(ifp, cnt));