2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/buf_ring.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
46 #include <sys/pciio.h>
49 #include <sys/sockio.h>
50 #include <sys/socket.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
56 #include <sys/taskqueue.h>
61 #include <machine/bus.h>
62 #include <machine/vmparam.h>
65 #include <net/if_var.h>
66 #include <net/if_media.h>
69 #include <net/ethernet.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/sctp.h>
77 #include <netinet/tcp.h>
78 #include <netinet/tcp_lro.h>
79 #include <netinet/udp.h>
81 #include <netinet6/ip6_var.h>
83 #include <dev/pci/pcireg.h>
84 #include <dev/pci/pcivar.h>
86 #include "thunder_bgx.h"
90 #include "nicvf_queues.h"
96 #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__)
98 #define dprintf(dev, fmt, ...)
101 MALLOC_DECLARE(M_NICVF);
103 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
104 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
105 static void nicvf_sq_disable(struct nicvf *, int);
106 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
107 static void nicvf_put_sq_desc(struct snd_queue *, int);
108 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
110 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
112 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
114 static void nicvf_rbdr_task(void *, int);
115 static void nicvf_rbdr_task_nowait(void *, int);
123 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
125 /* Poll a register for a specific value */
126 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
127 uint64_t reg, int bit_pos, int bits, int val)
133 bit_mask = (1UL << bits) - 1;
134 bit_mask = (bit_mask << bit_pos);
137 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
138 if (((reg_val & bit_mask) >> bit_pos) == val)
144 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
148 /* Callback for bus_dmamap_load() */
150 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
154 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
156 *paddr = segs->ds_addr;
159 /* Allocate memory for a queue's descriptors */
161 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
162 int q_len, int desc_size, int align_bytes)
166 /* Create DMA tag first */
167 err = bus_dma_tag_create(
168 bus_get_dma_tag(nic->dev), /* parent tag */
169 align_bytes, /* alignment */
171 BUS_SPACE_MAXADDR, /* lowaddr */
172 BUS_SPACE_MAXADDR, /* highaddr */
173 NULL, NULL, /* filtfunc, filtfuncarg */
174 (q_len * desc_size), /* maxsize */
176 (q_len * desc_size), /* maxsegsize */
178 NULL, NULL, /* lockfunc, lockfuncarg */
179 &dmem->dmat); /* dmat */
182 device_printf(nic->dev,
183 "Failed to create busdma tag for descriptors ring\n");
187 /* Allocate segment of continuous DMA safe memory */
188 err = bus_dmamem_alloc(
189 dmem->dmat, /* DMA tag */
190 &dmem->base, /* virtual address */
191 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */
192 &dmem->dmap); /* DMA map */
194 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
195 "descriptors ring\n");
199 err = bus_dmamap_load(
203 (q_len * desc_size), /* allocation size */
204 nicvf_dmamap_q_cb, /* map to DMA address cb. */
205 &dmem->phys_base, /* physical address */
208 device_printf(nic->dev,
209 "Cannot load DMA map of descriptors ring\n");
214 dmem->size = (desc_size * q_len);
219 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
222 err_dmat = bus_dma_tag_destroy(dmem->dmat);
224 KASSERT(err_dmat == 0,
225 ("%s: Trying to destroy BUSY DMA tag", __func__));
230 /* Free queue's descriptor memory */
232 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
236 if ((dmem == NULL) || (dmem->base == NULL))
240 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
241 bus_dmamap_unload(dmem->dmat, dmem->dmap);
242 /* Free DMA memory */
243 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
244 /* Destroy DMA tag */
245 err = bus_dma_tag_destroy(dmem->dmat);
248 ("%s: Trying to destroy BUSY DMA tag", __func__));
255 * Allocate buffer for packet reception
256 * HW returns memory address where packet is DMA'ed but not a pointer
257 * into RBDR ring, so save buffer address at the start of fragment and
258 * align the start address to a cache aligned address
261 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
262 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
265 struct rbuf_info *rinfo;
266 bus_dma_segment_t segs[1];
270 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
275 * The length is equal to the actual length + one 128b line
276 * used as a room for rbuf_info structure.
278 mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
280 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
281 &nsegs, BUS_DMA_NOWAIT);
283 device_printf(nic->dev,
284 "Failed to map mbuf into DMA visible memory, err: %d\n",
287 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
291 panic("Unexpected number of DMA segments for RB: %d", nsegs);
293 * Now use the room for rbuf_info structure
294 * and adjust mbuf data and length.
296 rinfo = (struct rbuf_info *)mbuf->m_data;
297 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
299 rinfo->dmat = rbdr->rbdr_buff_dmat;
303 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
308 /* Retrieve mbuf for received packet */
310 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
313 struct rbuf_info *rinfo;
315 /* Get buffer start address and alignment offset */
316 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
318 /* Now retrieve mbuf to give to stack */
320 if (__predict_false(mbuf == NULL)) {
321 panic("%s: Received packet fragment with NULL mbuf",
322 device_get_nameunit(nic->dev));
325 * Clear the mbuf in the descriptor to indicate
326 * that this slot is processed and free to use.
330 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
331 bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
336 /* Allocate RBDR ring and populate receive buffers */
338 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
339 int buf_size, int qidx)
343 struct rbdr_entry_t *desc;
347 /* Allocate rbdr descriptors ring */
348 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
349 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
351 device_printf(nic->dev,
352 "Failed to create RBDR descriptors ring\n");
356 rbdr->desc = rbdr->dmem.base;
358 * Buffer size has to be in multiples of 128 bytes.
359 * Make room for metadata of size of one line (128 bytes).
361 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
363 rbdr->thresh = RBDR_THRESH;
368 * Create DMA tag for Rx buffers.
369 * Each map created using this tag is intended to store Rx payload for
370 * one fragment and one header structure containing rbuf_info (thus
371 * additional 128 byte line since RB must be a multiple of 128 byte
374 if (buf_size > MCLBYTES) {
375 device_printf(nic->dev,
376 "Buffer size to large for mbuf cluster\n");
379 err = bus_dma_tag_create(
380 bus_get_dma_tag(nic->dev), /* parent tag */
381 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */
383 DMAP_MAX_PHYSADDR, /* lowaddr */
384 DMAP_MIN_PHYSADDR, /* highaddr */
385 NULL, NULL, /* filtfunc, filtfuncarg */
386 roundup2(buf_size, MCLBYTES), /* maxsize */
388 roundup2(buf_size, MCLBYTES), /* maxsegsize */
390 NULL, NULL, /* lockfunc, lockfuncarg */
391 &rbdr->rbdr_buff_dmat); /* dmat */
394 device_printf(nic->dev,
395 "Failed to create busdma tag for RBDR buffers\n");
399 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
400 ring_len, M_NICVF, (M_WAITOK | M_ZERO));
402 for (idx = 0; idx < ring_len; idx++) {
403 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
405 device_printf(nic->dev,
406 "Failed to create DMA map for RB\n");
409 rbdr->rbdr_buff_dmaps[idx] = dmap;
411 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
412 DMA_BUFFER_LEN, &rbuf);
416 desc = GET_RBDR_DESC(rbdr, idx);
417 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
420 /* Allocate taskqueue */
421 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
422 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
423 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
424 taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
425 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
426 device_get_nameunit(nic->dev));
431 /* Free RBDR ring and its receive buffers */
433 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
436 struct queue_set *qs;
437 struct rbdr_entry_t *desc;
438 struct rbuf_info *rinfo;
445 if ((qs == NULL) || (rbdr == NULL))
448 rbdr->enable = FALSE;
449 if (rbdr->rbdr_taskq != NULL) {
451 while (taskqueue_cancel(rbdr->rbdr_taskq,
452 &rbdr->rbdr_task_nowait, NULL) != 0) {
453 /* Finish the nowait task first */
454 taskqueue_drain(rbdr->rbdr_taskq,
455 &rbdr->rbdr_task_nowait);
457 taskqueue_free(rbdr->rbdr_taskq);
458 rbdr->rbdr_taskq = NULL;
460 while (taskqueue_cancel(taskqueue_thread,
461 &rbdr->rbdr_task, NULL) != 0) {
462 /* Now finish the sleepable task */
463 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
468 * Free all of the memory under the RB descriptors.
469 * There are assumptions here:
470 * 1. Corresponding RBDR is disabled
471 * - it is safe to operate using head and tail indexes
472 * 2. All bffers that were received are properly freed by
473 * the receive handler
474 * - there is no need to unload DMA map and free MBUF for other
475 * descriptors than unused ones
477 if (rbdr->rbdr_buff_dmat != NULL) {
480 while (head != tail) {
481 desc = GET_RBDR_DESC(rbdr, head);
482 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
483 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
484 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
486 /* This will destroy everything including rinfo! */
489 head &= (rbdr->dmem.q_len - 1);
491 /* Free tail descriptor */
492 desc = GET_RBDR_DESC(rbdr, tail);
493 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
494 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
495 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
497 /* This will destroy everything including rinfo! */
500 /* Destroy DMA maps */
501 for (idx = 0; idx < qs->rbdr_len; idx++) {
502 if (rbdr->rbdr_buff_dmaps[idx] == NULL)
504 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
505 rbdr->rbdr_buff_dmaps[idx]);
507 ("%s: Could not destroy DMA map for RB, desc: %d",
509 rbdr->rbdr_buff_dmaps[idx] = NULL;
512 /* Now destroy the tag */
513 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
515 ("%s: Trying to destroy BUSY DMA tag", __func__));
522 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
526 * Refill receive buffer descriptors with new buffers.
529 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
532 struct queue_set *qs;
536 struct rbdr_entry_t *desc;
539 boolean_t rb_alloc_fail;
542 rb_alloc_fail = TRUE;
546 rbdr_idx = rbdr->idx;
548 /* Check if it's enabled */
552 /* Get no of desc's to be refilled */
553 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
555 /* Doorbell can be ringed with a max of ring size minus 1 */
556 if (qcount >= (qs->rbdr_len - 1)) {
557 rb_alloc_fail = FALSE;
560 refill_rb_cnt = qs->rbdr_len - qcount - 1;
562 /* Start filling descs from tail */
563 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
564 while (refill_rb_cnt) {
566 tail &= (rbdr->dmem.q_len - 1);
568 dmap = rbdr->rbdr_buff_dmaps[tail];
569 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
570 DMA_BUFFER_LEN, &rbuf)) {
571 /* Something went wrong. Resign */
574 desc = GET_RBDR_DESC(rbdr, tail);
575 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
580 /* make sure all memory stores are done before ringing doorbell */
583 /* Check if buffer allocation failed */
584 if (refill_rb_cnt == 0)
585 rb_alloc_fail = FALSE;
588 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
591 if (!rb_alloc_fail) {
593 * Re-enable RBDR interrupts only
594 * if buffer allocation is success.
596 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
604 /* Refill RBs even if sleep is needed to reclaim memory */
606 nicvf_rbdr_task(void *arg, int pending)
611 rbdr = (struct rbdr *)arg;
613 err = nicvf_refill_rbdr(rbdr, M_WAITOK);
614 if (__predict_false(err != 0)) {
615 panic("%s: Failed to refill RBs even when sleep enabled",
620 /* Refill RBs as soon as possible without waiting */
622 nicvf_rbdr_task_nowait(void *arg, int pending)
627 rbdr = (struct rbdr *)arg;
629 err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
632 * Schedule another, sleepable kernel thread
633 * that will for sure refill the buffers.
635 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
640 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
641 struct cqe_rx_t *cqe_rx, int cqe_type)
644 struct rcv_queue *rq;
648 rq_idx = cqe_rx->rq_idx;
649 rq = &nic->qs->rq[rq_idx];
651 /* Check for errors */
652 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
653 if (err && !cqe_rx->rb_cnt)
656 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
658 dprintf(nic->dev, "Packet not received\n");
662 /* If error packet */
668 if (rq->lro_enabled &&
669 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
670 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
671 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
673 * At this point it is known that there are no errors in the
674 * packet. Attempt to LRO enqueue. Send to stack if no resources
677 if ((rq->lro.lro_cnt != 0) &&
678 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
682 * Push this packet to the stack later to avoid
683 * unlocking completion task in the middle of work.
685 err = buf_ring_enqueue(cq->rx_br, mbuf);
688 * Failed to enqueue this mbuf.
689 * We don't drop it, just schedule another task.
698 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
699 struct cqe_send_t *cqe_tx, int cqe_type)
703 struct snd_queue *sq;
704 struct sq_hdr_subdesc *hdr;
707 sq = &nic->qs->sq[cqe_tx->sq_idx];
709 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
710 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
714 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
715 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
716 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
718 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
719 bus_dmamap_unload(sq->snd_buff_dmat, dmap);
721 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
724 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
725 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
728 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
732 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
736 int processed_cqe, work_done = 0, tx_done = 0;
737 int cqe_count, cqe_head;
738 struct queue_set *qs = nic->qs;
739 struct cmp_queue *cq = &qs->cq[cq_idx];
740 struct snd_queue *sq = &qs->sq[cq_idx];
741 struct rcv_queue *rq;
742 struct cqe_rx_t *cq_desc;
743 struct lro_ctrl *lro;
750 /* Get no of valid CQ entries to process */
751 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
752 cqe_count &= CQ_CQE_COUNT;
756 /* Get head of the valid CQ entries */
757 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
760 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
761 __func__, cq_idx, cqe_count, cqe_head);
762 while (processed_cqe < cqe_count) {
763 /* Get the CQ descriptor */
764 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
766 cqe_head &= (cq->dmem.q_len - 1);
767 /* Prefetch next CQ descriptor */
768 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
770 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
772 switch (cq_desc->cqe_type) {
774 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
776 if (__predict_false(cmp_err != 0)) {
778 * Ups. Cannot finish now.
779 * Let's try again later.
786 nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
790 case CQE_TYPE_INVALID:
791 case CQE_TYPE_RX_SPLIT:
792 case CQE_TYPE_RX_TCP:
793 case CQE_TYPE_SEND_PTP:
801 "%s CQ%d processed_cqe %d work_done %d\n",
802 __func__, cq_idx, processed_cqe, work_done);
804 /* Ring doorbell to inform H/W to reuse processed CQEs */
805 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
808 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
809 /* Reenable TXQ if its stopped earlier due to SQ full */
810 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
811 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
815 * Flush any outstanding LRO work
818 rq = &nic->qs->rq[rq_idx];
820 tcp_lro_flush_all(lro);
822 NICVF_CMP_UNLOCK(cq);
825 /* Push received MBUFs to the stack */
826 while (!buf_ring_empty(cq->rx_br)) {
827 mbuf = buf_ring_dequeue_mc(cq->rx_br);
828 if (__predict_true(mbuf != NULL))
829 (*ifp->if_input)(ifp, mbuf);
836 * Qset error interrupt handler
838 * As of now only CQ errors are handled
841 nicvf_qs_err_task(void *arg, int pending)
844 struct queue_set *qs;
847 boolean_t enable = TRUE;
849 nic = (struct nicvf *)arg;
852 /* Deactivate network interface */
853 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
855 /* Check if it is CQ err */
856 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
857 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
859 if ((status & CQ_ERR_MASK) == 0)
861 /* Process already queued CQEs and reconfig CQ */
862 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
863 nicvf_sq_disable(nic, qidx);
864 (void)nicvf_cq_intr_handler(nic, qidx);
865 nicvf_cmp_queue_config(nic, qs, qidx, enable);
866 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
867 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
868 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
871 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
872 /* Re-enable Qset error interrupt */
873 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
877 nicvf_cmp_task(void *arg, int pending)
879 struct cmp_queue *cq;
883 cq = (struct cmp_queue *)arg;
886 /* Handle CQ descriptors */
887 cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
888 if (__predict_false(cmp_err != 0)) {
890 * Schedule another thread here since we did not
891 * process the entire CQ due to Tx or Rx CQ parse error.
893 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
897 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
898 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
899 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
903 /* Initialize completion queue */
905 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
910 /* Initizalize lock */
911 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
912 device_get_nameunit(nic->dev), qidx);
913 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
915 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
916 NICVF_CQ_BASE_ALIGN_BYTES);
919 device_printf(nic->dev,
920 "Could not allocate DMA memory for CQ\n");
924 cq->desc = cq->dmem.base;
925 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
928 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
930 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
933 /* Allocate taskqueue */
934 NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
935 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
936 taskqueue_thread_enqueue, &cq->cmp_taskq);
937 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
938 device_get_nameunit(nic->dev), qidx);
944 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
950 * The completion queue itself should be disabled by now
951 * (ref. nicvf_snd_queue_config()).
952 * Ensure that it is safe to disable it or panic.
955 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
957 if (cq->cmp_taskq != NULL) {
959 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
960 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
962 taskqueue_free(cq->cmp_taskq);
963 cq->cmp_taskq = NULL;
966 * Completion interrupt will possibly enable interrupts again
967 * so disable interrupting now after we finished processing
968 * completion task. It is safe to do so since the corresponding CQ
969 * was already disabled.
971 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
972 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
975 nicvf_free_q_desc_mem(nic, &cq->dmem);
976 drbr_free(cq->rx_br, M_DEVBUF);
977 NICVF_CMP_UNLOCK(cq);
978 mtx_destroy(&cq->mtx);
979 memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
983 nicvf_xmit_locked(struct snd_queue *sq)
990 NICVF_TX_LOCK_ASSERT(sq);
996 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
997 /* Send a copy of the frame to the BPF listener */
998 ETHER_BPF_MTAP(ifp, next);
1000 err = nicvf_tx_mbuf_locked(sq, &next);
1003 drbr_advance(ifp, sq->br);
1005 drbr_putback(ifp, sq->br, next);
1009 drbr_advance(ifp, sq->br);
1015 nicvf_snd_task(void *arg, int pending)
1017 struct snd_queue *sq = (struct snd_queue *)arg;
1026 * Skip sending anything if the driver is not running,
1027 * SQ full or link is down.
1029 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1030 IFF_DRV_RUNNING) || !nic->link_up)
1034 err = nicvf_xmit_locked(sq);
1035 NICVF_TX_UNLOCK(sq);
1038 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1041 /* Initialize transmit queue */
1043 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1049 /* Initizalize TX lock for this queue */
1050 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1051 device_get_nameunit(nic->dev), qidx);
1052 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1055 /* Allocate buffer ring */
1056 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1057 M_NOWAIT, &sq->mtx);
1058 if (sq->br == NULL) {
1059 device_printf(nic->dev,
1060 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1065 /* Allocate DMA memory for Tx descriptors */
1066 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1067 NICVF_SQ_BASE_ALIGN_BYTES);
1069 device_printf(nic->dev,
1070 "Could not allocate DMA memory for SQ\n");
1074 sq->desc = sq->dmem.base;
1075 sq->head = sq->tail = 0;
1076 atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1077 sq->thresh = SND_QUEUE_THRESH;
1082 * Allocate DMA maps for Tx buffers
1085 /* Create DMA tag first */
1086 err = bus_dma_tag_create(
1087 bus_get_dma_tag(nic->dev), /* parent tag */
1090 BUS_SPACE_MAXADDR, /* lowaddr */
1091 BUS_SPACE_MAXADDR, /* highaddr */
1092 NULL, NULL, /* filtfunc, filtfuncarg */
1093 NICVF_TSO_MAXSIZE, /* maxsize */
1094 NICVF_TSO_NSEGS, /* nsegments */
1095 MCLBYTES, /* maxsegsize */
1097 NULL, NULL, /* lockfunc, lockfuncarg */
1098 &sq->snd_buff_dmat); /* dmat */
1101 device_printf(nic->dev,
1102 "Failed to create busdma tag for Tx buffers\n");
1106 /* Allocate send buffers array */
1107 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1108 (M_NOWAIT | M_ZERO));
1109 if (sq->snd_buff == NULL) {
1110 device_printf(nic->dev,
1111 "Could not allocate memory for Tx buffers array\n");
1116 /* Now populate maps */
1117 for (i = 0; i < q_len; i++) {
1118 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1119 &sq->snd_buff[i].dmap);
1121 device_printf(nic->dev,
1122 "Failed to create DMA maps for Tx buffers\n");
1126 NICVF_TX_UNLOCK(sq);
1128 /* Allocate taskqueue */
1129 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1130 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1131 taskqueue_thread_enqueue, &sq->snd_taskq);
1132 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1133 device_get_nameunit(nic->dev), qidx);
1137 NICVF_TX_UNLOCK(sq);
1142 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1144 struct queue_set *qs = nic->qs;
1151 if (sq->snd_taskq != NULL) {
1153 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1154 taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1156 taskqueue_free(sq->snd_taskq);
1157 sq->snd_taskq = NULL;
1161 if (sq->snd_buff_dmat != NULL) {
1162 if (sq->snd_buff != NULL) {
1163 for (i = 0; i < qs->sq_len; i++) {
1164 m_freem(sq->snd_buff[i].mbuf);
1165 sq->snd_buff[i].mbuf = NULL;
1167 bus_dmamap_unload(sq->snd_buff_dmat,
1168 sq->snd_buff[i].dmap);
1169 err = bus_dmamap_destroy(sq->snd_buff_dmat,
1170 sq->snd_buff[i].dmap);
1172 * If bus_dmamap_destroy fails it can cause
1173 * random panic later if the tag is also
1174 * destroyed in the process.
1177 ("%s: Could not destroy DMA map for SQ",
1182 free(sq->snd_buff, M_NICVF);
1184 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1186 ("%s: Trying to destroy BUSY DMA tag", __func__));
1189 /* Free private driver ring for this send queue */
1191 drbr_free(sq->br, M_DEVBUF);
1193 if (sq->dmem.base != NULL)
1194 nicvf_free_q_desc_mem(nic, &sq->dmem);
1196 NICVF_TX_UNLOCK(sq);
1197 /* Destroy Tx lock */
1198 mtx_destroy(&sq->mtx);
1199 memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1203 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1206 /* Disable send queue */
1207 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1208 /* Check if SQ is stopped */
1209 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1211 /* Reset send queue */
1212 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1216 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1218 union nic_mbx mbx = {};
1220 /* Make sure all packets in the pipeline are written back into mem */
1221 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1222 nicvf_send_msg_to_pf(nic, &mbx);
1226 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1229 /* Disable timer threshold (doesn't get reset upon CQ reset */
1230 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1231 /* Disable completion queue */
1232 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1233 /* Reset completion queue */
1234 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1238 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1240 uint64_t tmp, fifo_state;
1243 /* Save head and tail pointers for feeing up buffers */
1245 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1247 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1250 * If RBDR FIFO is in 'FAIL' state then do a reset first
1253 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1254 if (((fifo_state >> 62) & 0x03) == 0x3) {
1255 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1256 qidx, NICVF_RBDR_RESET);
1260 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1261 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1264 tmp = nicvf_queue_reg_read(nic,
1265 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1266 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1272 device_printf(nic->dev,
1273 "Failed polling on prefetch status\n");
1277 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1280 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1282 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1283 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1287 /* Configures receive queue */
1289 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1290 int qidx, bool enable)
1292 union nic_mbx mbx = {};
1293 struct rcv_queue *rq;
1294 struct rq_cfg rq_cfg;
1296 struct lro_ctrl *lro;
1301 rq->enable = enable;
1305 /* Disable receive queue */
1306 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1309 nicvf_reclaim_rcv_queue(nic, qs, qidx);
1310 /* Free LRO memory */
1312 rq->lro_enabled = FALSE;
1316 /* Configure LRO if enabled */
1317 rq->lro_enabled = FALSE;
1318 if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1319 if (tcp_lro_init(lro) != 0) {
1320 device_printf(nic->dev,
1321 "Failed to initialize LRO for RXQ%d\n", qidx);
1323 rq->lro_enabled = TRUE;
1324 lro->ifp = nic->ifp;
1328 rq->cq_qs = qs->vnic_id;
1330 rq->start_rbdr_qs = qs->vnic_id;
1331 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1332 rq->cont_rbdr_qs = qs->vnic_id;
1333 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1334 /* all writes of RBDR data to be loaded into L2 Cache as well*/
1337 /* Send a mailbox msg to PF to config RQ */
1338 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1339 mbx.rq.qs_num = qs->vnic_id;
1340 mbx.rq.rq_num = qidx;
1341 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1342 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1343 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1344 (rq->start_qs_rbdr_idx);
1345 nicvf_send_msg_to_pf(nic, &mbx);
1347 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1348 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1349 nicvf_send_msg_to_pf(nic, &mbx);
1353 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1355 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1356 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1357 nicvf_send_msg_to_pf(nic, &mbx);
1359 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1361 /* Enable Receive queue */
1364 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1365 *(uint64_t *)&rq_cfg);
1368 /* Configures completion queue */
1370 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1371 int qidx, boolean_t enable)
1373 struct cmp_queue *cq;
1374 struct cq_cfg cq_cfg;
1377 cq->enable = enable;
1380 nicvf_reclaim_cmp_queue(nic, qs, qidx);
1384 /* Reset completion queue */
1385 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1387 /* Set completion queue base address */
1388 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1389 (uint64_t)(cq->dmem.phys_base));
1391 /* Enable Completion queue */
1395 cq_cfg.qsize = CMP_QSIZE;
1397 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1399 /* Set threshold value for interrupt generation */
1400 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1401 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1402 nic->cq_coalesce_usecs);
1405 /* Configures transmit queue */
1407 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1410 union nic_mbx mbx = {};
1411 struct snd_queue *sq;
1412 struct sq_cfg sq_cfg;
1415 sq->enable = enable;
1418 nicvf_reclaim_snd_queue(nic, qs, qidx);
1422 /* Reset send queue */
1423 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1425 sq->cq_qs = qs->vnic_id;
1428 /* Send a mailbox msg to PF to config SQ */
1429 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1430 mbx.sq.qs_num = qs->vnic_id;
1431 mbx.sq.sq_num = qidx;
1432 mbx.sq.sqs_mode = nic->sqs_mode;
1433 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1434 nicvf_send_msg_to_pf(nic, &mbx);
1436 /* Set queue base address */
1437 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1438 (uint64_t)(sq->dmem.phys_base));
1440 /* Enable send queue & set queue size */
1444 sq_cfg.qsize = SND_QSIZE;
1445 sq_cfg.tstmp_bgx_intf = 0;
1446 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1448 /* Set threshold value for interrupt generation */
1449 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1452 /* Configures receive buffer descriptor ring */
1454 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1458 struct rbdr_cfg rbdr_cfg;
1460 rbdr = &qs->rbdr[qidx];
1461 nicvf_reclaim_rbdr(nic, rbdr, qidx);
1465 /* Set descriptor base address */
1466 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1467 (uint64_t)(rbdr->dmem.phys_base));
1469 /* Enable RBDR & set queue size */
1470 /* Buffer size should be in multiples of 128 bytes */
1474 rbdr_cfg.qsize = RBDR_SIZE;
1475 rbdr_cfg.avg_con = 0;
1476 rbdr_cfg.lines = rbdr->dma_size / 128;
1477 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1478 *(uint64_t *)&rbdr_cfg);
1481 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1484 /* Set threshold value for interrupt generation */
1485 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1489 /* Requests PF to assign and enable Qset */
1491 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
1493 union nic_mbx mbx = {};
1494 struct queue_set *qs;
1495 struct qs_cfg *qs_cfg;
1499 device_printf(nic->dev,
1500 "Qset is still not allocated, don't init queues\n");
1504 qs->enable = enable;
1505 qs->vnic_id = nic->vf_id;
1507 /* Send a mailbox msg to PF to config Qset */
1508 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1509 mbx.qs.num = qs->vnic_id;
1512 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1515 qs_cfg->vnic = qs->vnic_id;
1517 nicvf_send_msg_to_pf(nic, &mbx);
1521 nicvf_free_resources(struct nicvf *nic)
1524 struct queue_set *qs;
1528 * Remove QS error task first since it has to be dead
1529 * to safely free completion queue tasks.
1531 if (qs->qs_err_taskq != NULL) {
1532 /* Shut down QS error tasks */
1533 while (taskqueue_cancel(qs->qs_err_taskq,
1534 &qs->qs_err_task, NULL) != 0) {
1535 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1538 taskqueue_free(qs->qs_err_taskq);
1539 qs->qs_err_taskq = NULL;
1541 /* Free receive buffer descriptor ring */
1542 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1543 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1545 /* Free completion queue */
1546 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1547 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1549 /* Free send queue */
1550 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1551 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1555 nicvf_alloc_resources(struct nicvf *nic)
1557 struct queue_set *qs = nic->qs;
1560 /* Alloc receive buffer descriptor ring */
1561 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1562 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1563 DMA_BUFFER_LEN, qidx))
1567 /* Alloc send queue */
1568 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1569 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1573 /* Alloc completion queue */
1574 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1575 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1579 /* Allocate QS error taskqueue */
1580 NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1581 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1582 taskqueue_thread_enqueue, &qs->qs_err_taskq);
1583 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1584 device_get_nameunit(nic->dev));
1588 nicvf_free_resources(nic);
1593 nicvf_set_qset_resources(struct nicvf *nic)
1595 struct queue_set *qs;
1597 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1600 /* Set count of each queue */
1601 qs->rbdr_cnt = RBDR_CNT;
1602 qs->rq_cnt = RCV_QUEUE_CNT;
1604 qs->sq_cnt = SND_QUEUE_CNT;
1605 qs->cq_cnt = CMP_QUEUE_CNT;
1607 /* Set queue lengths */
1608 qs->rbdr_len = RCV_BUF_COUNT;
1609 qs->sq_len = SND_QUEUE_LEN;
1610 qs->cq_len = CMP_QUEUE_LEN;
1612 nic->rx_queues = qs->rq_cnt;
1613 nic->tx_queues = qs->sq_cnt;
1619 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1621 boolean_t disable = FALSE;
1622 struct queue_set *qs;
1630 if (nicvf_alloc_resources(nic) != 0)
1633 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1634 nicvf_snd_queue_config(nic, qs, qidx, enable);
1635 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1636 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1637 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1638 nicvf_rbdr_config(nic, qs, qidx, enable);
1639 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1640 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1642 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1643 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1644 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1645 nicvf_rbdr_config(nic, qs, qidx, disable);
1646 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1647 nicvf_snd_queue_config(nic, qs, qidx, disable);
1648 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1649 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1651 nicvf_free_resources(nic);
1658 * Get a free desc from SQ
1659 * returns descriptor ponter & descriptor number
1662 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1667 atomic_subtract_int(&sq->free_cnt, desc_cnt);
1668 sq->tail += desc_cnt;
1669 sq->tail &= (sq->dmem.q_len - 1);
1674 /* Free descriptor back to SQ for future use */
1676 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1679 atomic_add_int(&sq->free_cnt, desc_cnt);
1680 sq->head += desc_cnt;
1681 sq->head &= (sq->dmem.q_len - 1);
1685 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1688 qentry &= (sq->dmem.q_len - 1);
1693 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1697 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1698 sq_cfg |= NICVF_SQ_EN;
1699 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1700 /* Ring doorbell so that H/W restarts processing SQEs */
1701 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1705 nicvf_sq_disable(struct nicvf *nic, int qidx)
1709 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1710 sq_cfg &= ~NICVF_SQ_EN;
1711 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1715 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1718 struct snd_buff *snd_buff;
1719 struct sq_hdr_subdesc *hdr;
1722 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1723 while (sq->head != head) {
1724 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1725 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1726 nicvf_put_sq_desc(sq, 1);
1729 snd_buff = &sq->snd_buff[sq->head];
1730 if (snd_buff->mbuf != NULL) {
1731 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1732 m_freem(snd_buff->mbuf);
1733 sq->snd_buff[sq->head].mbuf = NULL;
1735 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1737 NICVF_TX_UNLOCK(sq);
1741 * Add SQ HEADER subdescriptor.
1742 * First subdescriptor for every send descriptor.
1745 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1746 int subdesc_cnt, struct mbuf *mbuf, int len)
1749 struct sq_hdr_subdesc *hdr;
1750 struct ether_vlan_header *eh;
1756 int ehdrlen, iphlen, poff, proto;
1760 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1761 sq->snd_buff[qentry].mbuf = mbuf;
1763 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1764 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1765 /* Enable notification via CQE after processing SQE */
1767 /* No of subdescriptors following this */
1768 hdr->subdesc_cnt = subdesc_cnt;
1771 eh = mtod(mbuf, struct ether_vlan_header *);
1772 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1773 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1774 etype = ntohs(eh->evl_proto);
1776 ehdrlen = ETHER_HDR_LEN;
1777 etype = ntohs(eh->evl_encap_proto);
1783 case ETHERTYPE_IPV6:
1784 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
1785 mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
1786 sq->snd_buff[qentry].mbuf = NULL;
1790 poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
1798 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1799 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1800 sq->snd_buff[qentry].mbuf = mbuf;
1804 if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
1805 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1807 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1808 iphlen = ip->ip_hl << 2;
1809 poff = ehdrlen + iphlen;
1815 #if defined(INET6) || defined(INET)
1816 if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
1819 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1822 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1823 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1824 sq->snd_buff[qentry].mbuf = mbuf;
1828 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1831 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1834 if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1835 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1836 sq->snd_buff[qentry].mbuf = mbuf;
1840 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1843 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1846 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1847 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1848 sq->snd_buff[qentry].mbuf = mbuf;
1852 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1857 hdr->l3_offset = ehdrlen;
1858 hdr->l4_offset = poff;
1861 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1862 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
1865 hdr->tso_start = poff + (th->th_off * 4);
1866 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1867 hdr->inner_l3_offset = ehdrlen - 2;
1868 nic->drv_stats.tx_tso++;
1876 * SQ GATHER subdescriptor
1877 * Must follow HDR descriptor
1879 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1880 int size, uint64_t data)
1882 struct sq_gather_subdesc *gather;
1884 qentry &= (sq->dmem.q_len - 1);
1885 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1887 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1888 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1889 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1890 gather->size = size;
1891 gather->addr = data;
1894 /* Put an mbuf to a SQ for packet transfer. */
1896 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1898 bus_dma_segment_t segs[256];
1899 struct snd_buff *snd_buff;
1905 NICVF_TX_LOCK_ASSERT(sq);
1907 if (sq->free_cnt == 0)
1910 snd_buff = &sq->snd_buff[sq->tail];
1912 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1913 *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1914 if (__predict_false(err != 0)) {
1915 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
1921 /* Set how many subdescriptors is required */
1922 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1923 if (subdesc_cnt > sq->free_cnt) {
1924 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1925 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1929 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1931 /* Add SQ header subdesc */
1932 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1933 (*mbufp)->m_pkthdr.len);
1935 nicvf_put_sq_desc(sq, subdesc_cnt);
1936 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1937 if (err == ENOBUFS) {
1944 /* Add SQ gather subdescs */
1945 for (seg = 0; seg < nsegs; seg++) {
1946 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1947 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1951 /* make sure all memory stores are done before ringing doorbell */
1952 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1954 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1955 __func__, sq->idx, subdesc_cnt);
1956 /* Inform HW to xmit new packet */
1957 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1958 sq->idx, subdesc_cnt);
1962 static __inline u_int
1965 #if BYTE_ORDER == BIG_ENDIAN
1966 return ((i & ~3) + 3 - (i & 3));
1972 /* Returns MBUF for a received packet */
1974 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1977 int payload_len = 0;
1979 struct mbuf *mbuf_frag;
1980 uint16_t *rb_lens = NULL;
1981 uint64_t *rb_ptrs = NULL;
1984 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1985 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1987 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1988 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1990 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1991 payload_len = rb_lens[frag_num(frag)];
1993 /* First fragment */
1994 mbuf = nicvf_rb_ptr_to_mbuf(nic,
1995 (*rb_ptrs - cqe_rx->align_pad));
1996 mbuf->m_len = payload_len;
1997 mbuf->m_data += cqe_rx->align_pad;
1998 if_setrcvif(mbuf, nic->ifp);
2001 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2002 m_append(mbuf, payload_len, mbuf_frag->m_data);
2005 /* Next buffer pointer */
2009 if (__predict_true(mbuf != NULL)) {
2011 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2012 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2013 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2015 * HW by default verifies IP & TCP/UDP/SCTP checksums
2017 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2018 mbuf->m_pkthdr.csum_flags =
2019 (CSUM_IP_CHECKED | CSUM_IP_VALID);
2022 switch (cqe_rx->l4_type) {
2024 case L4TYPE_TCP: /* fall through */
2025 mbuf->m_pkthdr.csum_flags |=
2026 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2027 mbuf->m_pkthdr.csum_data = 0xffff;
2030 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2041 /* Enable interrupt */
2043 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2047 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2051 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2054 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2056 case NICVF_INTR_RBDR:
2057 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2059 case NICVF_INTR_PKT_DROP:
2060 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2062 case NICVF_INTR_TCP_TIMER:
2063 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2065 case NICVF_INTR_MBOX:
2066 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2068 case NICVF_INTR_QS_ERR:
2069 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2072 device_printf(nic->dev,
2073 "Failed to enable interrupt: unknown type\n");
2077 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2080 /* Disable interrupt */
2082 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2084 uint64_t reg_val = 0;
2088 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2091 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2093 case NICVF_INTR_RBDR:
2094 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2096 case NICVF_INTR_PKT_DROP:
2097 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2099 case NICVF_INTR_TCP_TIMER:
2100 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2102 case NICVF_INTR_MBOX:
2103 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2105 case NICVF_INTR_QS_ERR:
2106 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2109 device_printf(nic->dev,
2110 "Failed to disable interrupt: unknown type\n");
2114 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2117 /* Clear interrupt */
2119 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2121 uint64_t reg_val = 0;
2125 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2128 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2130 case NICVF_INTR_RBDR:
2131 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2133 case NICVF_INTR_PKT_DROP:
2134 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2136 case NICVF_INTR_TCP_TIMER:
2137 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2139 case NICVF_INTR_MBOX:
2140 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2142 case NICVF_INTR_QS_ERR:
2143 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2146 device_printf(nic->dev,
2147 "Failed to clear interrupt: unknown type\n");
2151 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2154 /* Check if interrupt is enabled */
2156 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2159 uint64_t mask = 0xff;
2161 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2165 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2168 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2170 case NICVF_INTR_RBDR:
2171 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2173 case NICVF_INTR_PKT_DROP:
2174 mask = NICVF_INTR_PKT_DROP_MASK;
2176 case NICVF_INTR_TCP_TIMER:
2177 mask = NICVF_INTR_TCP_TIMER_MASK;
2179 case NICVF_INTR_MBOX:
2180 mask = NICVF_INTR_MBOX_MASK;
2182 case NICVF_INTR_QS_ERR:
2183 mask = NICVF_INTR_QS_ERR_MASK;
2186 device_printf(nic->dev,
2187 "Failed to check interrupt enable: unknown type\n");
2191 return (reg_val & mask);
2195 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2197 struct rcv_queue *rq;
2199 #define GET_RQ_STATS(reg) \
2200 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2201 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2203 rq = &nic->qs->rq[rq_idx];
2204 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2205 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2209 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2211 struct snd_queue *sq;
2213 #define GET_SQ_STATS(reg) \
2214 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2215 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2217 sq = &nic->qs->sq[sq_idx];
2218 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2219 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2222 /* Check for errors in the receive cmp.queue entry */
2224 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2225 struct cqe_rx_t *cqe_rx)
2227 struct nicvf_hw_stats *stats = &nic->hw_stats;
2228 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2230 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2231 drv_stats->rx_frames_ok++;
2235 switch (cqe_rx->err_opcode) {
2236 case CQ_RX_ERROP_RE_PARTIAL:
2237 stats->rx_bgx_truncated_pkts++;
2239 case CQ_RX_ERROP_RE_JABBER:
2240 stats->rx_jabber_errs++;
2242 case CQ_RX_ERROP_RE_FCS:
2243 stats->rx_fcs_errs++;
2245 case CQ_RX_ERROP_RE_RX_CTL:
2246 stats->rx_bgx_errs++;
2248 case CQ_RX_ERROP_PREL2_ERR:
2249 stats->rx_prel2_errs++;
2251 case CQ_RX_ERROP_L2_MAL:
2252 stats->rx_l2_hdr_malformed++;
2254 case CQ_RX_ERROP_L2_OVERSIZE:
2255 stats->rx_oversize++;
2257 case CQ_RX_ERROP_L2_UNDERSIZE:
2258 stats->rx_undersize++;
2260 case CQ_RX_ERROP_L2_LENMISM:
2261 stats->rx_l2_len_mismatch++;
2263 case CQ_RX_ERROP_L2_PCLP:
2264 stats->rx_l2_pclp++;
2266 case CQ_RX_ERROP_IP_NOT:
2267 stats->rx_ip_ver_errs++;
2269 case CQ_RX_ERROP_IP_CSUM_ERR:
2270 stats->rx_ip_csum_errs++;
2272 case CQ_RX_ERROP_IP_MAL:
2273 stats->rx_ip_hdr_malformed++;
2275 case CQ_RX_ERROP_IP_MALD:
2276 stats->rx_ip_payload_malformed++;
2278 case CQ_RX_ERROP_IP_HOP:
2279 stats->rx_ip_ttl_errs++;
2281 case CQ_RX_ERROP_L3_PCLP:
2282 stats->rx_l3_pclp++;
2284 case CQ_RX_ERROP_L4_MAL:
2285 stats->rx_l4_malformed++;
2287 case CQ_RX_ERROP_L4_CHK:
2288 stats->rx_l4_csum_errs++;
2290 case CQ_RX_ERROP_UDP_LEN:
2291 stats->rx_udp_len_errs++;
2293 case CQ_RX_ERROP_L4_PORT:
2294 stats->rx_l4_port_errs++;
2296 case CQ_RX_ERROP_TCP_FLAG:
2297 stats->rx_tcp_flag_errs++;
2299 case CQ_RX_ERROP_TCP_OFFSET:
2300 stats->rx_tcp_offset_errs++;
2302 case CQ_RX_ERROP_L4_PCLP:
2303 stats->rx_l4_pclp++;
2305 case CQ_RX_ERROP_RBDR_TRUNC:
2306 stats->rx_truncated_pkts++;
2313 /* Check for errors in the send cmp.queue entry */
2315 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2316 struct cqe_send_t *cqe_tx)
2318 struct cmp_queue_stats *stats = &cq->stats;
2320 switch (cqe_tx->send_status) {
2321 case CQ_TX_ERROP_GOOD:
2324 case CQ_TX_ERROP_DESC_FAULT:
2325 stats->tx.desc_fault++;
2327 case CQ_TX_ERROP_HDR_CONS_ERR:
2328 stats->tx.hdr_cons_err++;
2330 case CQ_TX_ERROP_SUBDC_ERR:
2331 stats->tx.subdesc_err++;
2333 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2334 stats->tx.imm_size_oflow++;
2336 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2337 stats->tx.data_seq_err++;
2339 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2340 stats->tx.mem_seq_err++;
2342 case CQ_TX_ERROP_LOCK_VIOL:
2343 stats->tx.lock_viol++;
2345 case CQ_TX_ERROP_DATA_FAULT:
2346 stats->tx.data_fault++;
2348 case CQ_TX_ERROP_TSTMP_CONFLICT:
2349 stats->tx.tstmp_conflict++;
2351 case CQ_TX_ERROP_TSTMP_TIMEOUT:
2352 stats->tx.tstmp_timeout++;
2354 case CQ_TX_ERROP_MEM_FAULT:
2355 stats->tx.mem_fault++;
2357 case CQ_TX_ERROP_CK_OVERLAP:
2358 stats->tx.csum_overlap++;
2360 case CQ_TX_ERROP_CK_OFLOW:
2361 stats->tx.csum_overflow++;