2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/buf_ring.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
46 #include <sys/pciio.h>
49 #include <sys/sockio.h>
50 #include <sys/socket.h>
51 #include <sys/cpuset.h>
53 #include <sys/mutex.h>
55 #include <sys/taskqueue.h>
60 #include <machine/bus.h>
61 #include <machine/vmparam.h>
64 #include <net/if_var.h>
65 #include <net/if_media.h>
68 #include <net/ethernet.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/sctp.h>
76 #include <netinet/tcp.h>
77 #include <netinet/tcp_lro.h>
78 #include <netinet/udp.h>
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
83 #include "thunder_bgx.h"
87 #include "nicvf_queues.h"
93 #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__)
95 #define dprintf(dev, fmt, ...)
98 MALLOC_DECLARE(M_NICVF);
100 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
101 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
102 static void nicvf_sq_disable(struct nicvf *, int);
103 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
104 static void nicvf_put_sq_desc(struct snd_queue *, int);
105 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
107 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
109 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
111 static void nicvf_rbdr_task(void *, int);
112 static void nicvf_rbdr_task_nowait(void *, int);
120 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
122 /* Poll a register for a specific value */
123 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
124 uint64_t reg, int bit_pos, int bits, int val)
130 bit_mask = (1UL << bits) - 1;
131 bit_mask = (bit_mask << bit_pos);
134 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
135 if (((reg_val & bit_mask) >> bit_pos) == val)
141 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
145 /* Callback for bus_dmamap_load() */
147 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
151 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
153 *paddr = segs->ds_addr;
156 /* Allocate memory for a queue's descriptors */
158 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
159 int q_len, int desc_size, int align_bytes)
163 /* Create DMA tag first */
164 err = bus_dma_tag_create(
165 bus_get_dma_tag(nic->dev), /* parent tag */
166 align_bytes, /* alignment */
168 BUS_SPACE_MAXADDR, /* lowaddr */
169 BUS_SPACE_MAXADDR, /* highaddr */
170 NULL, NULL, /* filtfunc, filtfuncarg */
171 (q_len * desc_size), /* maxsize */
173 (q_len * desc_size), /* maxsegsize */
175 NULL, NULL, /* lockfunc, lockfuncarg */
176 &dmem->dmat); /* dmat */
179 device_printf(nic->dev,
180 "Failed to create busdma tag for descriptors ring\n");
184 /* Allocate segment of continuous DMA safe memory */
185 err = bus_dmamem_alloc(
186 dmem->dmat, /* DMA tag */
187 &dmem->base, /* virtual address */
188 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */
189 &dmem->dmap); /* DMA map */
191 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
192 "descriptors ring\n");
196 err = bus_dmamap_load(
200 (q_len * desc_size), /* allocation size */
201 nicvf_dmamap_q_cb, /* map to DMA address cb. */
202 &dmem->phys_base, /* physical address */
205 device_printf(nic->dev,
206 "Cannot load DMA map of descriptors ring\n");
211 dmem->size = (desc_size * q_len);
216 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
219 err_dmat = bus_dma_tag_destroy(dmem->dmat);
221 KASSERT(err_dmat == 0,
222 ("%s: Trying to destroy BUSY DMA tag", __func__));
227 /* Free queue's descriptor memory */
229 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
233 if ((dmem == NULL) || (dmem->base == NULL))
237 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
238 bus_dmamap_unload(dmem->dmat, dmem->dmap);
239 /* Free DMA memory */
240 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
241 /* Destroy DMA tag */
242 err = bus_dma_tag_destroy(dmem->dmat);
245 ("%s: Trying to destroy BUSY DMA tag", __func__));
252 * Allocate buffer for packet reception
253 * HW returns memory address where packet is DMA'ed but not a pointer
254 * into RBDR ring, so save buffer address at the start of fragment and
255 * align the start address to a cache aligned address
258 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
259 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
262 struct rbuf_info *rinfo;
263 bus_dma_segment_t segs[1];
267 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
272 * The length is equal to the actual length + one 128b line
273 * used as a room for rbuf_info structure.
275 mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
277 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
278 &nsegs, BUS_DMA_NOWAIT);
280 device_printf(nic->dev,
281 "Failed to map mbuf into DMA visible memory, err: %d\n",
284 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
288 panic("Unexpected number of DMA segments for RB: %d", nsegs);
290 * Now use the room for rbuf_info structure
291 * and adjust mbuf data and length.
293 rinfo = (struct rbuf_info *)mbuf->m_data;
294 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
296 rinfo->dmat = rbdr->rbdr_buff_dmat;
300 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
305 /* Retrieve mbuf for received packet */
307 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
310 struct rbuf_info *rinfo;
312 /* Get buffer start address and alignment offset */
313 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
315 /* Now retrieve mbuf to give to stack */
317 if (__predict_false(mbuf == NULL)) {
318 panic("%s: Received packet fragment with NULL mbuf",
319 device_get_nameunit(nic->dev));
322 * Clear the mbuf in the descriptor to indicate
323 * that this slot is processed and free to use.
327 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
328 bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
333 /* Allocate RBDR ring and populate receive buffers */
335 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
336 int buf_size, int qidx)
340 struct rbdr_entry_t *desc;
344 /* Allocate rbdr descriptors ring */
345 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
346 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
348 device_printf(nic->dev,
349 "Failed to create RBDR descriptors ring\n");
353 rbdr->desc = rbdr->dmem.base;
355 * Buffer size has to be in multiples of 128 bytes.
356 * Make room for metadata of size of one line (128 bytes).
358 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
360 rbdr->thresh = RBDR_THRESH;
365 * Create DMA tag for Rx buffers.
366 * Each map created using this tag is intended to store Rx payload for
367 * one fragment and one header structure containing rbuf_info (thus
368 * additional 128 byte line since RB must be a multiple of 128 byte
371 if (buf_size > MCLBYTES) {
372 device_printf(nic->dev,
373 "Buffer size to large for mbuf cluster\n");
376 err = bus_dma_tag_create(
377 bus_get_dma_tag(nic->dev), /* parent tag */
378 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */
380 DMAP_MAX_PHYSADDR, /* lowaddr */
381 DMAP_MIN_PHYSADDR, /* highaddr */
382 NULL, NULL, /* filtfunc, filtfuncarg */
383 roundup2(buf_size, MCLBYTES), /* maxsize */
385 roundup2(buf_size, MCLBYTES), /* maxsegsize */
387 NULL, NULL, /* lockfunc, lockfuncarg */
388 &rbdr->rbdr_buff_dmat); /* dmat */
391 device_printf(nic->dev,
392 "Failed to create busdma tag for RBDR buffers\n");
396 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
397 ring_len, M_NICVF, (M_WAITOK | M_ZERO));
399 for (idx = 0; idx < ring_len; idx++) {
400 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
402 device_printf(nic->dev,
403 "Failed to create DMA map for RB\n");
406 rbdr->rbdr_buff_dmaps[idx] = dmap;
408 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
409 DMA_BUFFER_LEN, &rbuf);
413 desc = GET_RBDR_DESC(rbdr, idx);
414 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
417 /* Allocate taskqueue */
418 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
419 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
420 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
421 taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
422 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
423 device_get_nameunit(nic->dev));
428 /* Free RBDR ring and its receive buffers */
430 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
433 struct queue_set *qs;
434 struct rbdr_entry_t *desc;
435 struct rbuf_info *rinfo;
442 if ((qs == NULL) || (rbdr == NULL))
445 rbdr->enable = FALSE;
446 if (rbdr->rbdr_taskq != NULL) {
448 while (taskqueue_cancel(rbdr->rbdr_taskq,
449 &rbdr->rbdr_task_nowait, NULL) != 0) {
450 /* Finish the nowait task first */
451 taskqueue_drain(rbdr->rbdr_taskq,
452 &rbdr->rbdr_task_nowait);
454 taskqueue_free(rbdr->rbdr_taskq);
455 rbdr->rbdr_taskq = NULL;
457 while (taskqueue_cancel(taskqueue_thread,
458 &rbdr->rbdr_task, NULL) != 0) {
459 /* Now finish the sleepable task */
460 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
465 * Free all of the memory under the RB descriptors.
466 * There are assumptions here:
467 * 1. Corresponding RBDR is disabled
468 * - it is safe to operate using head and tail indexes
469 * 2. All bffers that were received are properly freed by
470 * the receive handler
471 * - there is no need to unload DMA map and free MBUF for other
472 * descriptors than unused ones
474 if (rbdr->rbdr_buff_dmat != NULL) {
477 while (head != tail) {
478 desc = GET_RBDR_DESC(rbdr, head);
479 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
480 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
481 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
483 /* This will destroy everything including rinfo! */
486 head &= (rbdr->dmem.q_len - 1);
488 /* Free tail descriptor */
489 desc = GET_RBDR_DESC(rbdr, tail);
490 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
491 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
492 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
494 /* This will destroy everything including rinfo! */
497 /* Destroy DMA maps */
498 for (idx = 0; idx < qs->rbdr_len; idx++) {
499 if (rbdr->rbdr_buff_dmaps[idx] == NULL)
501 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
502 rbdr->rbdr_buff_dmaps[idx]);
504 ("%s: Could not destroy DMA map for RB, desc: %d",
506 rbdr->rbdr_buff_dmaps[idx] = NULL;
509 /* Now destroy the tag */
510 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
512 ("%s: Trying to destroy BUSY DMA tag", __func__));
519 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
523 * Refill receive buffer descriptors with new buffers.
526 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
529 struct queue_set *qs;
533 struct rbdr_entry_t *desc;
536 boolean_t rb_alloc_fail;
539 rb_alloc_fail = TRUE;
543 rbdr_idx = rbdr->idx;
545 /* Check if it's enabled */
549 /* Get no of desc's to be refilled */
550 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
552 /* Doorbell can be ringed with a max of ring size minus 1 */
553 if (qcount >= (qs->rbdr_len - 1)) {
554 rb_alloc_fail = FALSE;
557 refill_rb_cnt = qs->rbdr_len - qcount - 1;
559 /* Start filling descs from tail */
560 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
561 while (refill_rb_cnt) {
563 tail &= (rbdr->dmem.q_len - 1);
565 dmap = rbdr->rbdr_buff_dmaps[tail];
566 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
567 DMA_BUFFER_LEN, &rbuf)) {
568 /* Something went wrong. Resign */
571 desc = GET_RBDR_DESC(rbdr, tail);
572 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
577 /* make sure all memory stores are done before ringing doorbell */
580 /* Check if buffer allocation failed */
581 if (refill_rb_cnt == 0)
582 rb_alloc_fail = FALSE;
585 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
588 if (!rb_alloc_fail) {
590 * Re-enable RBDR interrupts only
591 * if buffer allocation is success.
593 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
601 /* Refill RBs even if sleep is needed to reclaim memory */
603 nicvf_rbdr_task(void *arg, int pending)
608 rbdr = (struct rbdr *)arg;
610 err = nicvf_refill_rbdr(rbdr, M_WAITOK);
611 if (__predict_false(err != 0)) {
612 panic("%s: Failed to refill RBs even when sleep enabled",
617 /* Refill RBs as soon as possible without waiting */
619 nicvf_rbdr_task_nowait(void *arg, int pending)
624 rbdr = (struct rbdr *)arg;
626 err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
629 * Schedule another, sleepable kernel thread
630 * that will for sure refill the buffers.
632 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
637 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
638 struct cqe_rx_t *cqe_rx, int cqe_type)
641 struct rcv_queue *rq;
645 rq_idx = cqe_rx->rq_idx;
646 rq = &nic->qs->rq[rq_idx];
648 /* Check for errors */
649 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
650 if (err && !cqe_rx->rb_cnt)
653 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
655 dprintf(nic->dev, "Packet not received\n");
659 /* If error packet */
665 if (rq->lro_enabled &&
666 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
667 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
668 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
670 * At this point it is known that there are no errors in the
671 * packet. Attempt to LRO enqueue. Send to stack if no resources
674 if ((rq->lro.lro_cnt != 0) &&
675 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
679 * Push this packet to the stack later to avoid
680 * unlocking completion task in the middle of work.
682 err = buf_ring_enqueue(cq->rx_br, mbuf);
685 * Failed to enqueue this mbuf.
686 * We don't drop it, just schedule another task.
695 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
696 struct cqe_send_t *cqe_tx, int cqe_type)
700 struct snd_queue *sq;
701 struct sq_hdr_subdesc *hdr;
704 sq = &nic->qs->sq[cqe_tx->sq_idx];
705 /* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */
706 if (NICVF_TX_TRYLOCK(sq) == 0)
709 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
710 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
716 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
717 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
718 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
720 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
721 bus_dmamap_unload(sq->snd_buff_dmat, dmap);
723 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
726 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
727 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
730 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
737 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
741 int processed_cqe, work_done = 0, tx_done = 0;
742 int cqe_count, cqe_head;
743 struct queue_set *qs = nic->qs;
744 struct cmp_queue *cq = &qs->cq[cq_idx];
745 struct snd_queue *sq = &qs->sq[cq_idx];
746 struct rcv_queue *rq;
747 struct cqe_rx_t *cq_desc;
748 struct lro_ctrl *lro;
755 /* Get no of valid CQ entries to process */
756 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
757 cqe_count &= CQ_CQE_COUNT;
761 /* Get head of the valid CQ entries */
762 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
765 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
766 __func__, cq_idx, cqe_count, cqe_head);
767 while (processed_cqe < cqe_count) {
768 /* Get the CQ descriptor */
769 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
771 cqe_head &= (cq->dmem.q_len - 1);
772 /* Prefetch next CQ descriptor */
773 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
775 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
777 switch (cq_desc->cqe_type) {
779 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
781 if (__predict_false(cmp_err != 0)) {
783 * Ups. Cannot finish now.
784 * Let's try again later.
791 cmp_err = nicvf_snd_pkt_handler(nic, cq,
792 (void *)cq_desc, CQE_TYPE_SEND);
793 if (__predict_false(cmp_err != 0)) {
795 * Ups. Cannot finish now.
796 * Let's try again later.
803 case CQE_TYPE_INVALID:
804 case CQE_TYPE_RX_SPLIT:
805 case CQE_TYPE_RX_TCP:
806 case CQE_TYPE_SEND_PTP:
814 "%s CQ%d processed_cqe %d work_done %d\n",
815 __func__, cq_idx, processed_cqe, work_done);
817 /* Ring doorbell to inform H/W to reuse processed CQEs */
818 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
821 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
822 /* Reenable TXQ if its stopped earlier due to SQ full */
823 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
824 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
828 * Flush any outstanding LRO work
831 rq = &nic->qs->rq[rq_idx];
833 tcp_lro_flush_all(lro);
835 NICVF_CMP_UNLOCK(cq);
838 /* Push received MBUFs to the stack */
839 while (!buf_ring_empty(cq->rx_br)) {
840 mbuf = buf_ring_dequeue_mc(cq->rx_br);
841 if (__predict_true(mbuf != NULL))
842 (*ifp->if_input)(ifp, mbuf);
849 * Qset error interrupt handler
851 * As of now only CQ errors are handled
854 nicvf_qs_err_task(void *arg, int pending)
857 struct queue_set *qs;
860 boolean_t enable = TRUE;
862 nic = (struct nicvf *)arg;
865 /* Deactivate network interface */
866 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
868 /* Check if it is CQ err */
869 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
870 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
872 if ((status & CQ_ERR_MASK) == 0)
874 /* Process already queued CQEs and reconfig CQ */
875 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
876 nicvf_sq_disable(nic, qidx);
877 (void)nicvf_cq_intr_handler(nic, qidx);
878 nicvf_cmp_queue_config(nic, qs, qidx, enable);
879 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
880 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
881 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
884 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
885 /* Re-enable Qset error interrupt */
886 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
890 nicvf_cmp_task(void *arg, int pending)
892 struct cmp_queue *cq;
896 cq = (struct cmp_queue *)arg;
899 /* Handle CQ descriptors */
900 cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
901 if (__predict_false(cmp_err != 0)) {
903 * Schedule another thread here since we did not
904 * process the entire CQ due to Tx or Rx CQ parse error.
906 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
910 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
911 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
912 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
916 /* Initialize completion queue */
918 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
923 /* Initizalize lock */
924 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
925 device_get_nameunit(nic->dev), qidx);
926 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
928 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
929 NICVF_CQ_BASE_ALIGN_BYTES);
932 device_printf(nic->dev,
933 "Could not allocate DMA memory for CQ\n");
937 cq->desc = cq->dmem.base;
938 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
941 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
943 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
946 /* Allocate taskqueue */
947 TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
948 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
949 taskqueue_thread_enqueue, &cq->cmp_taskq);
950 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
951 device_get_nameunit(nic->dev), qidx);
957 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
963 * The completion queue itself should be disabled by now
964 * (ref. nicvf_snd_queue_config()).
965 * Ensure that it is safe to disable it or panic.
968 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
970 if (cq->cmp_taskq != NULL) {
972 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
973 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
975 taskqueue_free(cq->cmp_taskq);
976 cq->cmp_taskq = NULL;
979 * Completion interrupt will possibly enable interrupts again
980 * so disable interrupting now after we finished processing
981 * completion task. It is safe to do so since the corresponding CQ
982 * was already disabled.
984 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
985 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
988 nicvf_free_q_desc_mem(nic, &cq->dmem);
989 drbr_free(cq->rx_br, M_DEVBUF);
990 NICVF_CMP_UNLOCK(cq);
991 mtx_destroy(&cq->mtx);
992 memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
996 nicvf_xmit_locked(struct snd_queue *sq)
1003 NICVF_TX_LOCK_ASSERT(sq);
1009 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
1010 err = nicvf_tx_mbuf_locked(sq, &next);
1013 drbr_advance(ifp, sq->br);
1015 drbr_putback(ifp, sq->br, next);
1019 drbr_advance(ifp, sq->br);
1020 /* Send a copy of the frame to the BPF listener */
1021 ETHER_BPF_MTAP(ifp, next);
1027 nicvf_snd_task(void *arg, int pending)
1029 struct snd_queue *sq = (struct snd_queue *)arg;
1038 * Skip sending anything if the driver is not running,
1039 * SQ full or link is down.
1041 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1042 IFF_DRV_RUNNING) || !nic->link_up)
1046 err = nicvf_xmit_locked(sq);
1047 NICVF_TX_UNLOCK(sq);
1050 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1053 /* Initialize transmit queue */
1055 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1061 /* Initizalize TX lock for this queue */
1062 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1063 device_get_nameunit(nic->dev), qidx);
1064 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1067 /* Allocate buffer ring */
1068 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1069 M_NOWAIT, &sq->mtx);
1070 if (sq->br == NULL) {
1071 device_printf(nic->dev,
1072 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1077 /* Allocate DMA memory for Tx descriptors */
1078 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1079 NICVF_SQ_BASE_ALIGN_BYTES);
1081 device_printf(nic->dev,
1082 "Could not allocate DMA memory for SQ\n");
1086 sq->desc = sq->dmem.base;
1087 sq->head = sq->tail = 0;
1088 sq->free_cnt = q_len - 1;
1089 sq->thresh = SND_QUEUE_THRESH;
1094 * Allocate DMA maps for Tx buffers
1097 /* Create DMA tag first */
1098 err = bus_dma_tag_create(
1099 bus_get_dma_tag(nic->dev), /* parent tag */
1102 BUS_SPACE_MAXADDR, /* lowaddr */
1103 BUS_SPACE_MAXADDR, /* highaddr */
1104 NULL, NULL, /* filtfunc, filtfuncarg */
1105 NICVF_TSO_MAXSIZE, /* maxsize */
1106 NICVF_TSO_NSEGS, /* nsegments */
1107 MCLBYTES, /* maxsegsize */
1109 NULL, NULL, /* lockfunc, lockfuncarg */
1110 &sq->snd_buff_dmat); /* dmat */
1113 device_printf(nic->dev,
1114 "Failed to create busdma tag for Tx buffers\n");
1118 /* Allocate send buffers array */
1119 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1120 (M_NOWAIT | M_ZERO));
1121 if (sq->snd_buff == NULL) {
1122 device_printf(nic->dev,
1123 "Could not allocate memory for Tx buffers array\n");
1128 /* Now populate maps */
1129 for (i = 0; i < q_len; i++) {
1130 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1131 &sq->snd_buff[i].dmap);
1133 device_printf(nic->dev,
1134 "Failed to create DMA maps for Tx buffers\n");
1138 NICVF_TX_UNLOCK(sq);
1140 /* Allocate taskqueue */
1141 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1142 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1143 taskqueue_thread_enqueue, &sq->snd_taskq);
1144 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1145 device_get_nameunit(nic->dev), qidx);
1149 NICVF_TX_UNLOCK(sq);
1154 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1156 struct queue_set *qs = nic->qs;
1163 if (sq->snd_taskq != NULL) {
1165 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1166 taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1168 taskqueue_free(sq->snd_taskq);
1169 sq->snd_taskq = NULL;
1173 if (sq->snd_buff_dmat != NULL) {
1174 if (sq->snd_buff != NULL) {
1175 for (i = 0; i < qs->sq_len; i++) {
1176 m_freem(sq->snd_buff[i].mbuf);
1177 sq->snd_buff[i].mbuf = NULL;
1179 bus_dmamap_unload(sq->snd_buff_dmat,
1180 sq->snd_buff[i].dmap);
1181 err = bus_dmamap_destroy(sq->snd_buff_dmat,
1182 sq->snd_buff[i].dmap);
1184 * If bus_dmamap_destroy fails it can cause
1185 * random panic later if the tag is also
1186 * destroyed in the process.
1189 ("%s: Could not destroy DMA map for SQ",
1194 free(sq->snd_buff, M_NICVF);
1196 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1198 ("%s: Trying to destroy BUSY DMA tag", __func__));
1201 /* Free private driver ring for this send queue */
1203 drbr_free(sq->br, M_DEVBUF);
1205 if (sq->dmem.base != NULL)
1206 nicvf_free_q_desc_mem(nic, &sq->dmem);
1208 NICVF_TX_UNLOCK(sq);
1209 /* Destroy Tx lock */
1210 mtx_destroy(&sq->mtx);
1211 memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1215 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1218 /* Disable send queue */
1219 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1220 /* Check if SQ is stopped */
1221 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1223 /* Reset send queue */
1224 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1228 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1230 union nic_mbx mbx = {};
1232 /* Make sure all packets in the pipeline are written back into mem */
1233 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1234 nicvf_send_msg_to_pf(nic, &mbx);
1238 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1241 /* Disable timer threshold (doesn't get reset upon CQ reset */
1242 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1243 /* Disable completion queue */
1244 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1245 /* Reset completion queue */
1246 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1250 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1252 uint64_t tmp, fifo_state;
1255 /* Save head and tail pointers for feeing up buffers */
1257 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1259 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1262 * If RBDR FIFO is in 'FAIL' state then do a reset first
1265 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1266 if (((fifo_state >> 62) & 0x03) == 0x3) {
1267 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1268 qidx, NICVF_RBDR_RESET);
1272 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1273 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1276 tmp = nicvf_queue_reg_read(nic,
1277 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1278 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1284 device_printf(nic->dev,
1285 "Failed polling on prefetch status\n");
1289 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1292 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1294 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1295 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1299 /* Configures receive queue */
1301 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1302 int qidx, bool enable)
1304 union nic_mbx mbx = {};
1305 struct rcv_queue *rq;
1306 struct rq_cfg rq_cfg;
1308 struct lro_ctrl *lro;
1313 rq->enable = enable;
1317 /* Disable receive queue */
1318 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1321 nicvf_reclaim_rcv_queue(nic, qs, qidx);
1322 /* Free LRO memory */
1324 rq->lro_enabled = FALSE;
1328 /* Configure LRO if enabled */
1329 rq->lro_enabled = FALSE;
1330 if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1331 if (tcp_lro_init(lro) != 0) {
1332 device_printf(nic->dev,
1333 "Failed to initialize LRO for RXQ%d\n", qidx);
1335 rq->lro_enabled = TRUE;
1336 lro->ifp = nic->ifp;
1340 rq->cq_qs = qs->vnic_id;
1342 rq->start_rbdr_qs = qs->vnic_id;
1343 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1344 rq->cont_rbdr_qs = qs->vnic_id;
1345 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1346 /* all writes of RBDR data to be loaded into L2 Cache as well*/
1349 /* Send a mailbox msg to PF to config RQ */
1350 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1351 mbx.rq.qs_num = qs->vnic_id;
1352 mbx.rq.rq_num = qidx;
1353 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1354 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1355 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1356 (rq->start_qs_rbdr_idx);
1357 nicvf_send_msg_to_pf(nic, &mbx);
1359 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1360 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1361 nicvf_send_msg_to_pf(nic, &mbx);
1365 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1367 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1368 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1369 nicvf_send_msg_to_pf(nic, &mbx);
1371 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1373 /* Enable Receive queue */
1376 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1377 *(uint64_t *)&rq_cfg);
1380 /* Configures completion queue */
1382 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1383 int qidx, boolean_t enable)
1385 struct cmp_queue *cq;
1386 struct cq_cfg cq_cfg;
1389 cq->enable = enable;
1392 nicvf_reclaim_cmp_queue(nic, qs, qidx);
1396 /* Reset completion queue */
1397 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1399 /* Set completion queue base address */
1400 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1401 (uint64_t)(cq->dmem.phys_base));
1403 /* Enable Completion queue */
1407 cq_cfg.qsize = CMP_QSIZE;
1409 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1411 /* Set threshold value for interrupt generation */
1412 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1413 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1414 nic->cq_coalesce_usecs);
1417 /* Configures transmit queue */
1419 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1422 union nic_mbx mbx = {};
1423 struct snd_queue *sq;
1424 struct sq_cfg sq_cfg;
1427 sq->enable = enable;
1430 nicvf_reclaim_snd_queue(nic, qs, qidx);
1434 /* Reset send queue */
1435 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1437 sq->cq_qs = qs->vnic_id;
1440 /* Send a mailbox msg to PF to config SQ */
1441 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1442 mbx.sq.qs_num = qs->vnic_id;
1443 mbx.sq.sq_num = qidx;
1444 mbx.sq.sqs_mode = nic->sqs_mode;
1445 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1446 nicvf_send_msg_to_pf(nic, &mbx);
1448 /* Set queue base address */
1449 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1450 (uint64_t)(sq->dmem.phys_base));
1452 /* Enable send queue & set queue size */
1456 sq_cfg.qsize = SND_QSIZE;
1457 sq_cfg.tstmp_bgx_intf = 0;
1458 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1460 /* Set threshold value for interrupt generation */
1461 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1464 /* Configures receive buffer descriptor ring */
1466 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1470 struct rbdr_cfg rbdr_cfg;
1472 rbdr = &qs->rbdr[qidx];
1473 nicvf_reclaim_rbdr(nic, rbdr, qidx);
1477 /* Set descriptor base address */
1478 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1479 (uint64_t)(rbdr->dmem.phys_base));
1481 /* Enable RBDR & set queue size */
1482 /* Buffer size should be in multiples of 128 bytes */
1486 rbdr_cfg.qsize = RBDR_SIZE;
1487 rbdr_cfg.avg_con = 0;
1488 rbdr_cfg.lines = rbdr->dma_size / 128;
1489 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1490 *(uint64_t *)&rbdr_cfg);
1493 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1496 /* Set threshold value for interrupt generation */
1497 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1501 /* Requests PF to assign and enable Qset */
1503 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
1505 union nic_mbx mbx = {};
1506 struct queue_set *qs;
1507 struct qs_cfg *qs_cfg;
1511 device_printf(nic->dev,
1512 "Qset is still not allocated, don't init queues\n");
1516 qs->enable = enable;
1517 qs->vnic_id = nic->vf_id;
1519 /* Send a mailbox msg to PF to config Qset */
1520 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1521 mbx.qs.num = qs->vnic_id;
1524 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1527 qs_cfg->vnic = qs->vnic_id;
1529 nicvf_send_msg_to_pf(nic, &mbx);
1533 nicvf_free_resources(struct nicvf *nic)
1536 struct queue_set *qs;
1540 * Remove QS error task first since it has to be dead
1541 * to safely free completion queue tasks.
1543 if (qs->qs_err_taskq != NULL) {
1544 /* Shut down QS error tasks */
1545 while (taskqueue_cancel(qs->qs_err_taskq,
1546 &qs->qs_err_task, NULL) != 0) {
1547 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1550 taskqueue_free(qs->qs_err_taskq);
1551 qs->qs_err_taskq = NULL;
1553 /* Free receive buffer descriptor ring */
1554 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1555 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1557 /* Free completion queue */
1558 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1559 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1561 /* Free send queue */
1562 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1563 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1567 nicvf_alloc_resources(struct nicvf *nic)
1569 struct queue_set *qs = nic->qs;
1572 /* Alloc receive buffer descriptor ring */
1573 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1574 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1575 DMA_BUFFER_LEN, qidx))
1579 /* Alloc send queue */
1580 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1581 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1585 /* Alloc completion queue */
1586 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1587 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1591 /* Allocate QS error taskqueue */
1592 TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1593 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1594 taskqueue_thread_enqueue, &qs->qs_err_taskq);
1595 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1596 device_get_nameunit(nic->dev));
1600 nicvf_free_resources(nic);
1605 nicvf_set_qset_resources(struct nicvf *nic)
1607 struct queue_set *qs;
1609 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1612 /* Set count of each queue */
1613 qs->rbdr_cnt = RBDR_CNT;
1614 /* With no RSS we stay with single RQ */
1617 qs->sq_cnt = SND_QUEUE_CNT;
1618 qs->cq_cnt = CMP_QUEUE_CNT;
1620 /* Set queue lengths */
1621 qs->rbdr_len = RCV_BUF_COUNT;
1622 qs->sq_len = SND_QUEUE_LEN;
1623 qs->cq_len = CMP_QUEUE_LEN;
1625 nic->rx_queues = qs->rq_cnt;
1626 nic->tx_queues = qs->sq_cnt;
1632 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1634 boolean_t disable = FALSE;
1635 struct queue_set *qs;
1643 if (nicvf_alloc_resources(nic) != 0)
1646 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1647 nicvf_snd_queue_config(nic, qs, qidx, enable);
1648 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1649 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1650 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1651 nicvf_rbdr_config(nic, qs, qidx, enable);
1652 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1653 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1655 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1656 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1657 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1658 nicvf_rbdr_config(nic, qs, qidx, disable);
1659 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1660 nicvf_snd_queue_config(nic, qs, qidx, disable);
1661 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1662 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1664 nicvf_free_resources(nic);
1671 * Get a free desc from SQ
1672 * returns descriptor ponter & descriptor number
1675 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1680 sq->free_cnt -= desc_cnt;
1681 sq->tail += desc_cnt;
1682 sq->tail &= (sq->dmem.q_len - 1);
1687 /* Free descriptor back to SQ for future use */
1689 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1692 sq->free_cnt += desc_cnt;
1693 sq->head += desc_cnt;
1694 sq->head &= (sq->dmem.q_len - 1);
1698 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1701 qentry &= (sq->dmem.q_len - 1);
1706 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1710 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1711 sq_cfg |= NICVF_SQ_EN;
1712 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1713 /* Ring doorbell so that H/W restarts processing SQEs */
1714 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1718 nicvf_sq_disable(struct nicvf *nic, int qidx)
1722 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1723 sq_cfg &= ~NICVF_SQ_EN;
1724 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1728 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1730 uint64_t head, tail;
1731 struct snd_buff *snd_buff;
1732 struct sq_hdr_subdesc *hdr;
1735 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1736 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1737 while (sq->head != head) {
1738 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1739 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1740 nicvf_put_sq_desc(sq, 1);
1743 snd_buff = &sq->snd_buff[sq->head];
1744 if (snd_buff->mbuf != NULL) {
1745 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1746 m_freem(snd_buff->mbuf);
1747 sq->snd_buff[sq->head].mbuf = NULL;
1749 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1751 NICVF_TX_UNLOCK(sq);
1755 * Add SQ HEADER subdescriptor.
1756 * First subdescriptor for every send descriptor.
1759 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1760 int subdesc_cnt, struct mbuf *mbuf, int len)
1763 struct sq_hdr_subdesc *hdr;
1764 struct ether_vlan_header *eh;
1770 int ehdrlen, iphlen, poff;
1774 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1775 sq->snd_buff[qentry].mbuf = mbuf;
1777 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1778 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1779 /* Enable notification via CQE after processing SQE */
1781 /* No of subdescriptors following this */
1782 hdr->subdesc_cnt = subdesc_cnt;
1785 eh = mtod(mbuf, struct ether_vlan_header *);
1786 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1787 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1788 etype = ntohs(eh->evl_proto);
1790 ehdrlen = ETHER_HDR_LEN;
1791 etype = ntohs(eh->evl_encap_proto);
1796 case ETHERTYPE_IPV6:
1797 /* ARM64TODO: Add support for IPv6 */
1799 sq->snd_buff[qentry].mbuf = NULL;
1804 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1805 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1806 sq->snd_buff[qentry].mbuf = mbuf;
1811 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1812 iphlen = ip->ip_hl << 2;
1813 poff = ehdrlen + iphlen;
1815 if (mbuf->m_pkthdr.csum_flags != 0) {
1816 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1819 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1822 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1823 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1824 sq->snd_buff[qentry].mbuf = mbuf;
1828 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1831 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1834 if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1835 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1836 sq->snd_buff[qentry].mbuf = mbuf;
1840 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1843 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1846 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1847 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1848 sq->snd_buff[qentry].mbuf = mbuf;
1852 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1857 hdr->l3_offset = ehdrlen;
1858 hdr->l4_offset = ehdrlen + iphlen;
1861 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1863 * Extract ip again as m_data could have been modified.
1865 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1866 th = (struct tcphdr *)((caddr_t)ip + iphlen);
1869 hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4);
1870 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1871 hdr->inner_l3_offset = ehdrlen - 2;
1872 nic->drv_stats.tx_tso++;
1884 * SQ GATHER subdescriptor
1885 * Must follow HDR descriptor
1887 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1888 int size, uint64_t data)
1890 struct sq_gather_subdesc *gather;
1892 qentry &= (sq->dmem.q_len - 1);
1893 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1895 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1896 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1897 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1898 gather->size = size;
1899 gather->addr = data;
1902 /* Put an mbuf to a SQ for packet transfer. */
1904 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1906 bus_dma_segment_t segs[256];
1908 struct snd_buff *snd_buff;
1914 NICVF_TX_LOCK_ASSERT(sq);
1916 if (sq->free_cnt == 0)
1919 snd_buff = &sq->snd_buff[sq->tail];
1921 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1922 *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1923 if (__predict_false(err != 0)) {
1924 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
1930 /* Set how many subdescriptors is required */
1932 if ((*mbufp)->m_pkthdr.tso_segsz != 0 && nic->hw_tso)
1933 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1935 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1937 if (subdesc_cnt > sq->free_cnt) {
1938 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1939 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1943 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1945 /* Add SQ header subdesc */
1946 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1947 (*mbufp)->m_pkthdr.len);
1949 nicvf_put_sq_desc(sq, subdesc_cnt);
1950 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1951 if (err == ENOBUFS) {
1958 /* Add SQ gather subdescs */
1959 for (seg = 0; seg < nsegs; seg++) {
1960 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1961 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1965 /* make sure all memory stores are done before ringing doorbell */
1966 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1968 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1969 __func__, sq->idx, subdesc_cnt);
1970 /* Inform HW to xmit new packet */
1971 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1972 sq->idx, subdesc_cnt);
1976 static __inline u_int
1979 #if BYTE_ORDER == BIG_ENDIAN
1980 return ((i & ~3) + 3 - (i & 3));
1986 /* Returns MBUF for a received packet */
1988 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1991 int payload_len = 0;
1993 struct mbuf *mbuf_frag;
1994 uint16_t *rb_lens = NULL;
1995 uint64_t *rb_ptrs = NULL;
1998 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1999 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
2001 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
2002 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
2004 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
2005 payload_len = rb_lens[frag_num(frag)];
2007 /* First fragment */
2008 mbuf = nicvf_rb_ptr_to_mbuf(nic,
2009 (*rb_ptrs - cqe_rx->align_pad));
2010 mbuf->m_len = payload_len;
2011 mbuf->m_data += cqe_rx->align_pad;
2012 if_setrcvif(mbuf, nic->ifp);
2015 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2016 m_append(mbuf, payload_len, mbuf_frag->m_data);
2019 /* Next buffer pointer */
2023 if (__predict_true(mbuf != NULL)) {
2025 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2026 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2027 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2029 * HW by default verifies IP & TCP/UDP/SCTP checksums
2031 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2032 mbuf->m_pkthdr.csum_flags =
2033 (CSUM_IP_CHECKED | CSUM_IP_VALID);
2036 switch (cqe_rx->l4_type) {
2038 case L4TYPE_TCP: /* fall through */
2039 mbuf->m_pkthdr.csum_flags |=
2040 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2041 mbuf->m_pkthdr.csum_data = 0xffff;
2044 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2055 /* Enable interrupt */
2057 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2061 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2065 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2068 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2070 case NICVF_INTR_RBDR:
2071 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2073 case NICVF_INTR_PKT_DROP:
2074 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2076 case NICVF_INTR_TCP_TIMER:
2077 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2079 case NICVF_INTR_MBOX:
2080 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2082 case NICVF_INTR_QS_ERR:
2083 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2086 device_printf(nic->dev,
2087 "Failed to enable interrupt: unknown type\n");
2091 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2094 /* Disable interrupt */
2096 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2098 uint64_t reg_val = 0;
2102 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2105 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2107 case NICVF_INTR_RBDR:
2108 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2110 case NICVF_INTR_PKT_DROP:
2111 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2113 case NICVF_INTR_TCP_TIMER:
2114 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2116 case NICVF_INTR_MBOX:
2117 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2119 case NICVF_INTR_QS_ERR:
2120 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2123 device_printf(nic->dev,
2124 "Failed to disable interrupt: unknown type\n");
2128 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2131 /* Clear interrupt */
2133 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2135 uint64_t reg_val = 0;
2139 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2142 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2144 case NICVF_INTR_RBDR:
2145 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2147 case NICVF_INTR_PKT_DROP:
2148 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2150 case NICVF_INTR_TCP_TIMER:
2151 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2153 case NICVF_INTR_MBOX:
2154 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2156 case NICVF_INTR_QS_ERR:
2157 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2160 device_printf(nic->dev,
2161 "Failed to clear interrupt: unknown type\n");
2165 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2168 /* Check if interrupt is enabled */
2170 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2173 uint64_t mask = 0xff;
2175 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2179 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2182 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2184 case NICVF_INTR_RBDR:
2185 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2187 case NICVF_INTR_PKT_DROP:
2188 mask = NICVF_INTR_PKT_DROP_MASK;
2190 case NICVF_INTR_TCP_TIMER:
2191 mask = NICVF_INTR_TCP_TIMER_MASK;
2193 case NICVF_INTR_MBOX:
2194 mask = NICVF_INTR_MBOX_MASK;
2196 case NICVF_INTR_QS_ERR:
2197 mask = NICVF_INTR_QS_ERR_MASK;
2200 device_printf(nic->dev,
2201 "Failed to check interrupt enable: unknown type\n");
2205 return (reg_val & mask);
2209 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2211 struct rcv_queue *rq;
2213 #define GET_RQ_STATS(reg) \
2214 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2215 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2217 rq = &nic->qs->rq[rq_idx];
2218 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2219 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2223 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2225 struct snd_queue *sq;
2227 #define GET_SQ_STATS(reg) \
2228 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2229 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2231 sq = &nic->qs->sq[sq_idx];
2232 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2233 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2236 /* Check for errors in the receive cmp.queue entry */
2238 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2239 struct cqe_rx_t *cqe_rx)
2241 struct nicvf_hw_stats *stats = &nic->hw_stats;
2242 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2244 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2245 drv_stats->rx_frames_ok++;
2249 switch (cqe_rx->err_opcode) {
2250 case CQ_RX_ERROP_RE_PARTIAL:
2251 stats->rx_bgx_truncated_pkts++;
2253 case CQ_RX_ERROP_RE_JABBER:
2254 stats->rx_jabber_errs++;
2256 case CQ_RX_ERROP_RE_FCS:
2257 stats->rx_fcs_errs++;
2259 case CQ_RX_ERROP_RE_RX_CTL:
2260 stats->rx_bgx_errs++;
2262 case CQ_RX_ERROP_PREL2_ERR:
2263 stats->rx_prel2_errs++;
2265 case CQ_RX_ERROP_L2_MAL:
2266 stats->rx_l2_hdr_malformed++;
2268 case CQ_RX_ERROP_L2_OVERSIZE:
2269 stats->rx_oversize++;
2271 case CQ_RX_ERROP_L2_UNDERSIZE:
2272 stats->rx_undersize++;
2274 case CQ_RX_ERROP_L2_LENMISM:
2275 stats->rx_l2_len_mismatch++;
2277 case CQ_RX_ERROP_L2_PCLP:
2278 stats->rx_l2_pclp++;
2280 case CQ_RX_ERROP_IP_NOT:
2281 stats->rx_ip_ver_errs++;
2283 case CQ_RX_ERROP_IP_CSUM_ERR:
2284 stats->rx_ip_csum_errs++;
2286 case CQ_RX_ERROP_IP_MAL:
2287 stats->rx_ip_hdr_malformed++;
2289 case CQ_RX_ERROP_IP_MALD:
2290 stats->rx_ip_payload_malformed++;
2292 case CQ_RX_ERROP_IP_HOP:
2293 stats->rx_ip_ttl_errs++;
2295 case CQ_RX_ERROP_L3_PCLP:
2296 stats->rx_l3_pclp++;
2298 case CQ_RX_ERROP_L4_MAL:
2299 stats->rx_l4_malformed++;
2301 case CQ_RX_ERROP_L4_CHK:
2302 stats->rx_l4_csum_errs++;
2304 case CQ_RX_ERROP_UDP_LEN:
2305 stats->rx_udp_len_errs++;
2307 case CQ_RX_ERROP_L4_PORT:
2308 stats->rx_l4_port_errs++;
2310 case CQ_RX_ERROP_TCP_FLAG:
2311 stats->rx_tcp_flag_errs++;
2313 case CQ_RX_ERROP_TCP_OFFSET:
2314 stats->rx_tcp_offset_errs++;
2316 case CQ_RX_ERROP_L4_PCLP:
2317 stats->rx_l4_pclp++;
2319 case CQ_RX_ERROP_RBDR_TRUNC:
2320 stats->rx_truncated_pkts++;
2327 /* Check for errors in the send cmp.queue entry */
2329 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2330 struct cqe_send_t *cqe_tx)
2332 struct cmp_queue_stats *stats = &cq->stats;
2334 switch (cqe_tx->send_status) {
2335 case CQ_TX_ERROP_GOOD:
2338 case CQ_TX_ERROP_DESC_FAULT:
2339 stats->tx.desc_fault++;
2341 case CQ_TX_ERROP_HDR_CONS_ERR:
2342 stats->tx.hdr_cons_err++;
2344 case CQ_TX_ERROP_SUBDC_ERR:
2345 stats->tx.subdesc_err++;
2347 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2348 stats->tx.imm_size_oflow++;
2350 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2351 stats->tx.data_seq_err++;
2353 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2354 stats->tx.mem_seq_err++;
2356 case CQ_TX_ERROP_LOCK_VIOL:
2357 stats->tx.lock_viol++;
2359 case CQ_TX_ERROP_DATA_FAULT:
2360 stats->tx.data_fault++;
2362 case CQ_TX_ERROP_TSTMP_CONFLICT:
2363 stats->tx.tstmp_conflict++;
2365 case CQ_TX_ERROP_TSTMP_TIMEOUT:
2366 stats->tx.tstmp_timeout++;
2368 case CQ_TX_ERROP_MEM_FAULT:
2369 stats->tx.mem_fault++;
2371 case CQ_TX_ERROP_CK_OVERLAP:
2372 stats->tx.csum_overlap++;
2374 case CQ_TX_ERROP_CK_OFLOW:
2375 stats->tx.csum_overflow++;