2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/buf_ring.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
46 #include <sys/pciio.h>
49 #include <sys/sockio.h>
50 #include <sys/socket.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
56 #include <sys/taskqueue.h>
61 #include <machine/bus.h>
62 #include <machine/vmparam.h>
65 #include <net/if_var.h>
66 #include <net/if_media.h>
69 #include <net/ethernet.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/sctp.h>
77 #include <netinet/tcp.h>
78 #include <netinet/tcp_lro.h>
79 #include <netinet/udp.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
84 #include "thunder_bgx.h"
88 #include "nicvf_queues.h"
94 #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__)
96 #define dprintf(dev, fmt, ...)
99 MALLOC_DECLARE(M_NICVF);
101 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
102 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
103 static void nicvf_sq_disable(struct nicvf *, int);
104 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
105 static void nicvf_put_sq_desc(struct snd_queue *, int);
106 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
108 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
110 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
112 static void nicvf_rbdr_task(void *, int);
113 static void nicvf_rbdr_task_nowait(void *, int);
121 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
123 /* Poll a register for a specific value */
124 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
125 uint64_t reg, int bit_pos, int bits, int val)
131 bit_mask = (1UL << bits) - 1;
132 bit_mask = (bit_mask << bit_pos);
135 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
136 if (((reg_val & bit_mask) >> bit_pos) == val)
142 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
146 /* Callback for bus_dmamap_load() */
148 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
152 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
154 *paddr = segs->ds_addr;
157 /* Allocate memory for a queue's descriptors */
159 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
160 int q_len, int desc_size, int align_bytes)
164 /* Create DMA tag first */
165 err = bus_dma_tag_create(
166 bus_get_dma_tag(nic->dev), /* parent tag */
167 align_bytes, /* alignment */
169 BUS_SPACE_MAXADDR, /* lowaddr */
170 BUS_SPACE_MAXADDR, /* highaddr */
171 NULL, NULL, /* filtfunc, filtfuncarg */
172 (q_len * desc_size), /* maxsize */
174 (q_len * desc_size), /* maxsegsize */
176 NULL, NULL, /* lockfunc, lockfuncarg */
177 &dmem->dmat); /* dmat */
180 device_printf(nic->dev,
181 "Failed to create busdma tag for descriptors ring\n");
185 /* Allocate segment of continuous DMA safe memory */
186 err = bus_dmamem_alloc(
187 dmem->dmat, /* DMA tag */
188 &dmem->base, /* virtual address */
189 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */
190 &dmem->dmap); /* DMA map */
192 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
193 "descriptors ring\n");
197 err = bus_dmamap_load(
201 (q_len * desc_size), /* allocation size */
202 nicvf_dmamap_q_cb, /* map to DMA address cb. */
203 &dmem->phys_base, /* physical address */
206 device_printf(nic->dev,
207 "Cannot load DMA map of descriptors ring\n");
212 dmem->size = (desc_size * q_len);
217 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
220 err_dmat = bus_dma_tag_destroy(dmem->dmat);
222 KASSERT(err_dmat == 0,
223 ("%s: Trying to destroy BUSY DMA tag", __func__));
228 /* Free queue's descriptor memory */
230 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
234 if ((dmem == NULL) || (dmem->base == NULL))
238 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
239 bus_dmamap_unload(dmem->dmat, dmem->dmap);
240 /* Free DMA memory */
241 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
242 /* Destroy DMA tag */
243 err = bus_dma_tag_destroy(dmem->dmat);
246 ("%s: Trying to destroy BUSY DMA tag", __func__));
253 * Allocate buffer for packet reception
254 * HW returns memory address where packet is DMA'ed but not a pointer
255 * into RBDR ring, so save buffer address at the start of fragment and
256 * align the start address to a cache aligned address
259 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
260 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
263 struct rbuf_info *rinfo;
264 bus_dma_segment_t segs[1];
268 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
273 * The length is equal to the actual length + one 128b line
274 * used as a room for rbuf_info structure.
276 mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
278 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
279 &nsegs, BUS_DMA_NOWAIT);
281 device_printf(nic->dev,
282 "Failed to map mbuf into DMA visible memory, err: %d\n",
285 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
289 panic("Unexpected number of DMA segments for RB: %d", nsegs);
291 * Now use the room for rbuf_info structure
292 * and adjust mbuf data and length.
294 rinfo = (struct rbuf_info *)mbuf->m_data;
295 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
297 rinfo->dmat = rbdr->rbdr_buff_dmat;
301 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
306 /* Retrieve mbuf for received packet */
308 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
311 struct rbuf_info *rinfo;
313 /* Get buffer start address and alignment offset */
314 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
316 /* Now retrieve mbuf to give to stack */
318 if (__predict_false(mbuf == NULL)) {
319 panic("%s: Received packet fragment with NULL mbuf",
320 device_get_nameunit(nic->dev));
323 * Clear the mbuf in the descriptor to indicate
324 * that this slot is processed and free to use.
328 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
329 bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
334 /* Allocate RBDR ring and populate receive buffers */
336 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
337 int buf_size, int qidx)
341 struct rbdr_entry_t *desc;
345 /* Allocate rbdr descriptors ring */
346 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
347 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
349 device_printf(nic->dev,
350 "Failed to create RBDR descriptors ring\n");
354 rbdr->desc = rbdr->dmem.base;
356 * Buffer size has to be in multiples of 128 bytes.
357 * Make room for metadata of size of one line (128 bytes).
359 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
361 rbdr->thresh = RBDR_THRESH;
366 * Create DMA tag for Rx buffers.
367 * Each map created using this tag is intended to store Rx payload for
368 * one fragment and one header structure containing rbuf_info (thus
369 * additional 128 byte line since RB must be a multiple of 128 byte
372 if (buf_size > MCLBYTES) {
373 device_printf(nic->dev,
374 "Buffer size to large for mbuf cluster\n");
377 err = bus_dma_tag_create(
378 bus_get_dma_tag(nic->dev), /* parent tag */
379 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */
381 DMAP_MAX_PHYSADDR, /* lowaddr */
382 DMAP_MIN_PHYSADDR, /* highaddr */
383 NULL, NULL, /* filtfunc, filtfuncarg */
384 roundup2(buf_size, MCLBYTES), /* maxsize */
386 roundup2(buf_size, MCLBYTES), /* maxsegsize */
388 NULL, NULL, /* lockfunc, lockfuncarg */
389 &rbdr->rbdr_buff_dmat); /* dmat */
392 device_printf(nic->dev,
393 "Failed to create busdma tag for RBDR buffers\n");
397 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
398 ring_len, M_NICVF, (M_WAITOK | M_ZERO));
400 for (idx = 0; idx < ring_len; idx++) {
401 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
403 device_printf(nic->dev,
404 "Failed to create DMA map for RB\n");
407 rbdr->rbdr_buff_dmaps[idx] = dmap;
409 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
410 DMA_BUFFER_LEN, &rbuf);
414 desc = GET_RBDR_DESC(rbdr, idx);
415 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
418 /* Allocate taskqueue */
419 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
420 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
421 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
422 taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
423 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
424 device_get_nameunit(nic->dev));
429 /* Free RBDR ring and its receive buffers */
431 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
434 struct queue_set *qs;
435 struct rbdr_entry_t *desc;
436 struct rbuf_info *rinfo;
443 if ((qs == NULL) || (rbdr == NULL))
446 rbdr->enable = FALSE;
447 if (rbdr->rbdr_taskq != NULL) {
449 while (taskqueue_cancel(rbdr->rbdr_taskq,
450 &rbdr->rbdr_task_nowait, NULL) != 0) {
451 /* Finish the nowait task first */
452 taskqueue_drain(rbdr->rbdr_taskq,
453 &rbdr->rbdr_task_nowait);
455 taskqueue_free(rbdr->rbdr_taskq);
456 rbdr->rbdr_taskq = NULL;
458 while (taskqueue_cancel(taskqueue_thread,
459 &rbdr->rbdr_task, NULL) != 0) {
460 /* Now finish the sleepable task */
461 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
466 * Free all of the memory under the RB descriptors.
467 * There are assumptions here:
468 * 1. Corresponding RBDR is disabled
469 * - it is safe to operate using head and tail indexes
470 * 2. All bffers that were received are properly freed by
471 * the receive handler
472 * - there is no need to unload DMA map and free MBUF for other
473 * descriptors than unused ones
475 if (rbdr->rbdr_buff_dmat != NULL) {
478 while (head != tail) {
479 desc = GET_RBDR_DESC(rbdr, head);
480 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
481 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
482 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
484 /* This will destroy everything including rinfo! */
487 head &= (rbdr->dmem.q_len - 1);
489 /* Free tail descriptor */
490 desc = GET_RBDR_DESC(rbdr, tail);
491 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
492 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
493 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
495 /* This will destroy everything including rinfo! */
498 /* Destroy DMA maps */
499 for (idx = 0; idx < qs->rbdr_len; idx++) {
500 if (rbdr->rbdr_buff_dmaps[idx] == NULL)
502 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
503 rbdr->rbdr_buff_dmaps[idx]);
505 ("%s: Could not destroy DMA map for RB, desc: %d",
507 rbdr->rbdr_buff_dmaps[idx] = NULL;
510 /* Now destroy the tag */
511 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
513 ("%s: Trying to destroy BUSY DMA tag", __func__));
520 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
524 * Refill receive buffer descriptors with new buffers.
527 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
530 struct queue_set *qs;
534 struct rbdr_entry_t *desc;
537 boolean_t rb_alloc_fail;
540 rb_alloc_fail = TRUE;
544 rbdr_idx = rbdr->idx;
546 /* Check if it's enabled */
550 /* Get no of desc's to be refilled */
551 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
553 /* Doorbell can be ringed with a max of ring size minus 1 */
554 if (qcount >= (qs->rbdr_len - 1)) {
555 rb_alloc_fail = FALSE;
558 refill_rb_cnt = qs->rbdr_len - qcount - 1;
560 /* Start filling descs from tail */
561 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
562 while (refill_rb_cnt) {
564 tail &= (rbdr->dmem.q_len - 1);
566 dmap = rbdr->rbdr_buff_dmaps[tail];
567 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
568 DMA_BUFFER_LEN, &rbuf)) {
569 /* Something went wrong. Resign */
572 desc = GET_RBDR_DESC(rbdr, tail);
573 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
578 /* make sure all memory stores are done before ringing doorbell */
581 /* Check if buffer allocation failed */
582 if (refill_rb_cnt == 0)
583 rb_alloc_fail = FALSE;
586 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
589 if (!rb_alloc_fail) {
591 * Re-enable RBDR interrupts only
592 * if buffer allocation is success.
594 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
602 /* Refill RBs even if sleep is needed to reclaim memory */
604 nicvf_rbdr_task(void *arg, int pending)
609 rbdr = (struct rbdr *)arg;
611 err = nicvf_refill_rbdr(rbdr, M_WAITOK);
612 if (__predict_false(err != 0)) {
613 panic("%s: Failed to refill RBs even when sleep enabled",
618 /* Refill RBs as soon as possible without waiting */
620 nicvf_rbdr_task_nowait(void *arg, int pending)
625 rbdr = (struct rbdr *)arg;
627 err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
630 * Schedule another, sleepable kernel thread
631 * that will for sure refill the buffers.
633 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
638 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
639 struct cqe_rx_t *cqe_rx, int cqe_type)
642 struct rcv_queue *rq;
646 rq_idx = cqe_rx->rq_idx;
647 rq = &nic->qs->rq[rq_idx];
649 /* Check for errors */
650 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
651 if (err && !cqe_rx->rb_cnt)
654 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
656 dprintf(nic->dev, "Packet not received\n");
660 /* If error packet */
666 if (rq->lro_enabled &&
667 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
668 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
669 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
671 * At this point it is known that there are no errors in the
672 * packet. Attempt to LRO enqueue. Send to stack if no resources
675 if ((rq->lro.lro_cnt != 0) &&
676 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
680 * Push this packet to the stack later to avoid
681 * unlocking completion task in the middle of work.
683 err = buf_ring_enqueue(cq->rx_br, mbuf);
686 * Failed to enqueue this mbuf.
687 * We don't drop it, just schedule another task.
696 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
697 struct cqe_send_t *cqe_tx, int cqe_type)
701 struct snd_queue *sq;
702 struct sq_hdr_subdesc *hdr;
705 sq = &nic->qs->sq[cqe_tx->sq_idx];
707 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
708 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
712 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
713 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
714 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
716 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
717 bus_dmamap_unload(sq->snd_buff_dmat, dmap);
719 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
722 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
723 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
726 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
730 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
734 int processed_cqe, work_done = 0, tx_done = 0;
735 int cqe_count, cqe_head;
736 struct queue_set *qs = nic->qs;
737 struct cmp_queue *cq = &qs->cq[cq_idx];
738 struct snd_queue *sq = &qs->sq[cq_idx];
739 struct rcv_queue *rq;
740 struct cqe_rx_t *cq_desc;
741 struct lro_ctrl *lro;
748 /* Get no of valid CQ entries to process */
749 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
750 cqe_count &= CQ_CQE_COUNT;
754 /* Get head of the valid CQ entries */
755 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
758 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
759 __func__, cq_idx, cqe_count, cqe_head);
760 while (processed_cqe < cqe_count) {
761 /* Get the CQ descriptor */
762 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
764 cqe_head &= (cq->dmem.q_len - 1);
765 /* Prefetch next CQ descriptor */
766 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
768 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
770 switch (cq_desc->cqe_type) {
772 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
774 if (__predict_false(cmp_err != 0)) {
776 * Ups. Cannot finish now.
777 * Let's try again later.
784 nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
788 case CQE_TYPE_INVALID:
789 case CQE_TYPE_RX_SPLIT:
790 case CQE_TYPE_RX_TCP:
791 case CQE_TYPE_SEND_PTP:
799 "%s CQ%d processed_cqe %d work_done %d\n",
800 __func__, cq_idx, processed_cqe, work_done);
802 /* Ring doorbell to inform H/W to reuse processed CQEs */
803 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
806 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
807 /* Reenable TXQ if its stopped earlier due to SQ full */
808 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
809 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
813 * Flush any outstanding LRO work
816 rq = &nic->qs->rq[rq_idx];
818 tcp_lro_flush_all(lro);
820 NICVF_CMP_UNLOCK(cq);
823 /* Push received MBUFs to the stack */
824 while (!buf_ring_empty(cq->rx_br)) {
825 mbuf = buf_ring_dequeue_mc(cq->rx_br);
826 if (__predict_true(mbuf != NULL))
827 (*ifp->if_input)(ifp, mbuf);
834 * Qset error interrupt handler
836 * As of now only CQ errors are handled
839 nicvf_qs_err_task(void *arg, int pending)
842 struct queue_set *qs;
845 boolean_t enable = TRUE;
847 nic = (struct nicvf *)arg;
850 /* Deactivate network interface */
851 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
853 /* Check if it is CQ err */
854 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
855 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
857 if ((status & CQ_ERR_MASK) == 0)
859 /* Process already queued CQEs and reconfig CQ */
860 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
861 nicvf_sq_disable(nic, qidx);
862 (void)nicvf_cq_intr_handler(nic, qidx);
863 nicvf_cmp_queue_config(nic, qs, qidx, enable);
864 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
865 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
866 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
869 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
870 /* Re-enable Qset error interrupt */
871 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
875 nicvf_cmp_task(void *arg, int pending)
877 struct cmp_queue *cq;
881 cq = (struct cmp_queue *)arg;
884 /* Handle CQ descriptors */
885 cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
886 if (__predict_false(cmp_err != 0)) {
888 * Schedule another thread here since we did not
889 * process the entire CQ due to Tx or Rx CQ parse error.
891 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
895 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
896 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
897 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
901 /* Initialize completion queue */
903 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
908 /* Initizalize lock */
909 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
910 device_get_nameunit(nic->dev), qidx);
911 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
913 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
914 NICVF_CQ_BASE_ALIGN_BYTES);
917 device_printf(nic->dev,
918 "Could not allocate DMA memory for CQ\n");
922 cq->desc = cq->dmem.base;
923 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
926 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
928 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
931 /* Allocate taskqueue */
932 TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
933 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
934 taskqueue_thread_enqueue, &cq->cmp_taskq);
935 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
936 device_get_nameunit(nic->dev), qidx);
942 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
948 * The completion queue itself should be disabled by now
949 * (ref. nicvf_snd_queue_config()).
950 * Ensure that it is safe to disable it or panic.
953 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
955 if (cq->cmp_taskq != NULL) {
957 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
958 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
960 taskqueue_free(cq->cmp_taskq);
961 cq->cmp_taskq = NULL;
964 * Completion interrupt will possibly enable interrupts again
965 * so disable interrupting now after we finished processing
966 * completion task. It is safe to do so since the corresponding CQ
967 * was already disabled.
969 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
970 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
973 nicvf_free_q_desc_mem(nic, &cq->dmem);
974 drbr_free(cq->rx_br, M_DEVBUF);
975 NICVF_CMP_UNLOCK(cq);
976 mtx_destroy(&cq->mtx);
977 memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
981 nicvf_xmit_locked(struct snd_queue *sq)
988 NICVF_TX_LOCK_ASSERT(sq);
994 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
995 err = nicvf_tx_mbuf_locked(sq, &next);
998 drbr_advance(ifp, sq->br);
1000 drbr_putback(ifp, sq->br, next);
1004 drbr_advance(ifp, sq->br);
1005 /* Send a copy of the frame to the BPF listener */
1006 ETHER_BPF_MTAP(ifp, next);
1012 nicvf_snd_task(void *arg, int pending)
1014 struct snd_queue *sq = (struct snd_queue *)arg;
1023 * Skip sending anything if the driver is not running,
1024 * SQ full or link is down.
1026 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1027 IFF_DRV_RUNNING) || !nic->link_up)
1031 err = nicvf_xmit_locked(sq);
1032 NICVF_TX_UNLOCK(sq);
1035 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1038 /* Initialize transmit queue */
1040 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1046 /* Initizalize TX lock for this queue */
1047 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1048 device_get_nameunit(nic->dev), qidx);
1049 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1052 /* Allocate buffer ring */
1053 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1054 M_NOWAIT, &sq->mtx);
1055 if (sq->br == NULL) {
1056 device_printf(nic->dev,
1057 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1062 /* Allocate DMA memory for Tx descriptors */
1063 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1064 NICVF_SQ_BASE_ALIGN_BYTES);
1066 device_printf(nic->dev,
1067 "Could not allocate DMA memory for SQ\n");
1071 sq->desc = sq->dmem.base;
1072 sq->head = sq->tail = 0;
1073 atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1074 sq->thresh = SND_QUEUE_THRESH;
1079 * Allocate DMA maps for Tx buffers
1082 /* Create DMA tag first */
1083 err = bus_dma_tag_create(
1084 bus_get_dma_tag(nic->dev), /* parent tag */
1087 BUS_SPACE_MAXADDR, /* lowaddr */
1088 BUS_SPACE_MAXADDR, /* highaddr */
1089 NULL, NULL, /* filtfunc, filtfuncarg */
1090 NICVF_TSO_MAXSIZE, /* maxsize */
1091 NICVF_TSO_NSEGS, /* nsegments */
1092 MCLBYTES, /* maxsegsize */
1094 NULL, NULL, /* lockfunc, lockfuncarg */
1095 &sq->snd_buff_dmat); /* dmat */
1098 device_printf(nic->dev,
1099 "Failed to create busdma tag for Tx buffers\n");
1103 /* Allocate send buffers array */
1104 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1105 (M_NOWAIT | M_ZERO));
1106 if (sq->snd_buff == NULL) {
1107 device_printf(nic->dev,
1108 "Could not allocate memory for Tx buffers array\n");
1113 /* Now populate maps */
1114 for (i = 0; i < q_len; i++) {
1115 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1116 &sq->snd_buff[i].dmap);
1118 device_printf(nic->dev,
1119 "Failed to create DMA maps for Tx buffers\n");
1123 NICVF_TX_UNLOCK(sq);
1125 /* Allocate taskqueue */
1126 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1127 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1128 taskqueue_thread_enqueue, &sq->snd_taskq);
1129 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1130 device_get_nameunit(nic->dev), qidx);
1134 NICVF_TX_UNLOCK(sq);
1139 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1141 struct queue_set *qs = nic->qs;
1148 if (sq->snd_taskq != NULL) {
1150 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1151 taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1153 taskqueue_free(sq->snd_taskq);
1154 sq->snd_taskq = NULL;
1158 if (sq->snd_buff_dmat != NULL) {
1159 if (sq->snd_buff != NULL) {
1160 for (i = 0; i < qs->sq_len; i++) {
1161 m_freem(sq->snd_buff[i].mbuf);
1162 sq->snd_buff[i].mbuf = NULL;
1164 bus_dmamap_unload(sq->snd_buff_dmat,
1165 sq->snd_buff[i].dmap);
1166 err = bus_dmamap_destroy(sq->snd_buff_dmat,
1167 sq->snd_buff[i].dmap);
1169 * If bus_dmamap_destroy fails it can cause
1170 * random panic later if the tag is also
1171 * destroyed in the process.
1174 ("%s: Could not destroy DMA map for SQ",
1179 free(sq->snd_buff, M_NICVF);
1181 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1183 ("%s: Trying to destroy BUSY DMA tag", __func__));
1186 /* Free private driver ring for this send queue */
1188 drbr_free(sq->br, M_DEVBUF);
1190 if (sq->dmem.base != NULL)
1191 nicvf_free_q_desc_mem(nic, &sq->dmem);
1193 NICVF_TX_UNLOCK(sq);
1194 /* Destroy Tx lock */
1195 mtx_destroy(&sq->mtx);
1196 memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1200 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1203 /* Disable send queue */
1204 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1205 /* Check if SQ is stopped */
1206 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1208 /* Reset send queue */
1209 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1213 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1215 union nic_mbx mbx = {};
1217 /* Make sure all packets in the pipeline are written back into mem */
1218 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1219 nicvf_send_msg_to_pf(nic, &mbx);
1223 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1226 /* Disable timer threshold (doesn't get reset upon CQ reset */
1227 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1228 /* Disable completion queue */
1229 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1230 /* Reset completion queue */
1231 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1235 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1237 uint64_t tmp, fifo_state;
1240 /* Save head and tail pointers for feeing up buffers */
1242 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1244 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1247 * If RBDR FIFO is in 'FAIL' state then do a reset first
1250 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1251 if (((fifo_state >> 62) & 0x03) == 0x3) {
1252 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1253 qidx, NICVF_RBDR_RESET);
1257 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1258 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1261 tmp = nicvf_queue_reg_read(nic,
1262 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1263 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1269 device_printf(nic->dev,
1270 "Failed polling on prefetch status\n");
1274 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1277 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1279 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1280 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1284 /* Configures receive queue */
1286 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1287 int qidx, bool enable)
1289 union nic_mbx mbx = {};
1290 struct rcv_queue *rq;
1291 struct rq_cfg rq_cfg;
1293 struct lro_ctrl *lro;
1298 rq->enable = enable;
1302 /* Disable receive queue */
1303 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1306 nicvf_reclaim_rcv_queue(nic, qs, qidx);
1307 /* Free LRO memory */
1309 rq->lro_enabled = FALSE;
1313 /* Configure LRO if enabled */
1314 rq->lro_enabled = FALSE;
1315 if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1316 if (tcp_lro_init(lro) != 0) {
1317 device_printf(nic->dev,
1318 "Failed to initialize LRO for RXQ%d\n", qidx);
1320 rq->lro_enabled = TRUE;
1321 lro->ifp = nic->ifp;
1325 rq->cq_qs = qs->vnic_id;
1327 rq->start_rbdr_qs = qs->vnic_id;
1328 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1329 rq->cont_rbdr_qs = qs->vnic_id;
1330 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1331 /* all writes of RBDR data to be loaded into L2 Cache as well*/
1334 /* Send a mailbox msg to PF to config RQ */
1335 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1336 mbx.rq.qs_num = qs->vnic_id;
1337 mbx.rq.rq_num = qidx;
1338 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1339 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1340 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1341 (rq->start_qs_rbdr_idx);
1342 nicvf_send_msg_to_pf(nic, &mbx);
1344 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1345 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1346 nicvf_send_msg_to_pf(nic, &mbx);
1350 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1352 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1353 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1354 nicvf_send_msg_to_pf(nic, &mbx);
1356 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1358 /* Enable Receive queue */
1361 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1362 *(uint64_t *)&rq_cfg);
1365 /* Configures completion queue */
1367 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1368 int qidx, boolean_t enable)
1370 struct cmp_queue *cq;
1371 struct cq_cfg cq_cfg;
1374 cq->enable = enable;
1377 nicvf_reclaim_cmp_queue(nic, qs, qidx);
1381 /* Reset completion queue */
1382 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1384 /* Set completion queue base address */
1385 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1386 (uint64_t)(cq->dmem.phys_base));
1388 /* Enable Completion queue */
1392 cq_cfg.qsize = CMP_QSIZE;
1394 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1396 /* Set threshold value for interrupt generation */
1397 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1398 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1399 nic->cq_coalesce_usecs);
1402 /* Configures transmit queue */
1404 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1407 union nic_mbx mbx = {};
1408 struct snd_queue *sq;
1409 struct sq_cfg sq_cfg;
1412 sq->enable = enable;
1415 nicvf_reclaim_snd_queue(nic, qs, qidx);
1419 /* Reset send queue */
1420 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1422 sq->cq_qs = qs->vnic_id;
1425 /* Send a mailbox msg to PF to config SQ */
1426 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1427 mbx.sq.qs_num = qs->vnic_id;
1428 mbx.sq.sq_num = qidx;
1429 mbx.sq.sqs_mode = nic->sqs_mode;
1430 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1431 nicvf_send_msg_to_pf(nic, &mbx);
1433 /* Set queue base address */
1434 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1435 (uint64_t)(sq->dmem.phys_base));
1437 /* Enable send queue & set queue size */
1441 sq_cfg.qsize = SND_QSIZE;
1442 sq_cfg.tstmp_bgx_intf = 0;
1443 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1445 /* Set threshold value for interrupt generation */
1446 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1449 /* Configures receive buffer descriptor ring */
1451 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1455 struct rbdr_cfg rbdr_cfg;
1457 rbdr = &qs->rbdr[qidx];
1458 nicvf_reclaim_rbdr(nic, rbdr, qidx);
1462 /* Set descriptor base address */
1463 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1464 (uint64_t)(rbdr->dmem.phys_base));
1466 /* Enable RBDR & set queue size */
1467 /* Buffer size should be in multiples of 128 bytes */
1471 rbdr_cfg.qsize = RBDR_SIZE;
1472 rbdr_cfg.avg_con = 0;
1473 rbdr_cfg.lines = rbdr->dma_size / 128;
1474 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1475 *(uint64_t *)&rbdr_cfg);
1478 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1481 /* Set threshold value for interrupt generation */
1482 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1486 /* Requests PF to assign and enable Qset */
1488 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
1490 union nic_mbx mbx = {};
1491 struct queue_set *qs;
1492 struct qs_cfg *qs_cfg;
1496 device_printf(nic->dev,
1497 "Qset is still not allocated, don't init queues\n");
1501 qs->enable = enable;
1502 qs->vnic_id = nic->vf_id;
1504 /* Send a mailbox msg to PF to config Qset */
1505 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1506 mbx.qs.num = qs->vnic_id;
1509 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1512 qs_cfg->vnic = qs->vnic_id;
1514 nicvf_send_msg_to_pf(nic, &mbx);
1518 nicvf_free_resources(struct nicvf *nic)
1521 struct queue_set *qs;
1525 * Remove QS error task first since it has to be dead
1526 * to safely free completion queue tasks.
1528 if (qs->qs_err_taskq != NULL) {
1529 /* Shut down QS error tasks */
1530 while (taskqueue_cancel(qs->qs_err_taskq,
1531 &qs->qs_err_task, NULL) != 0) {
1532 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1535 taskqueue_free(qs->qs_err_taskq);
1536 qs->qs_err_taskq = NULL;
1538 /* Free receive buffer descriptor ring */
1539 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1540 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1542 /* Free completion queue */
1543 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1544 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1546 /* Free send queue */
1547 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1548 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1552 nicvf_alloc_resources(struct nicvf *nic)
1554 struct queue_set *qs = nic->qs;
1557 /* Alloc receive buffer descriptor ring */
1558 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1559 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1560 DMA_BUFFER_LEN, qidx))
1564 /* Alloc send queue */
1565 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1566 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1570 /* Alloc completion queue */
1571 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1572 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1576 /* Allocate QS error taskqueue */
1577 TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1578 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1579 taskqueue_thread_enqueue, &qs->qs_err_taskq);
1580 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1581 device_get_nameunit(nic->dev));
1585 nicvf_free_resources(nic);
1590 nicvf_set_qset_resources(struct nicvf *nic)
1592 struct queue_set *qs;
1594 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1597 /* Set count of each queue */
1598 qs->rbdr_cnt = RBDR_CNT;
1599 qs->rq_cnt = RCV_QUEUE_CNT;
1601 qs->sq_cnt = SND_QUEUE_CNT;
1602 qs->cq_cnt = CMP_QUEUE_CNT;
1604 /* Set queue lengths */
1605 qs->rbdr_len = RCV_BUF_COUNT;
1606 qs->sq_len = SND_QUEUE_LEN;
1607 qs->cq_len = CMP_QUEUE_LEN;
1609 nic->rx_queues = qs->rq_cnt;
1610 nic->tx_queues = qs->sq_cnt;
1616 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1618 boolean_t disable = FALSE;
1619 struct queue_set *qs;
1627 if (nicvf_alloc_resources(nic) != 0)
1630 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1631 nicvf_snd_queue_config(nic, qs, qidx, enable);
1632 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1633 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1634 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1635 nicvf_rbdr_config(nic, qs, qidx, enable);
1636 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1637 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1639 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1640 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1641 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1642 nicvf_rbdr_config(nic, qs, qidx, disable);
1643 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1644 nicvf_snd_queue_config(nic, qs, qidx, disable);
1645 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1646 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1648 nicvf_free_resources(nic);
1655 * Get a free desc from SQ
1656 * returns descriptor ponter & descriptor number
1659 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1664 atomic_subtract_int(&sq->free_cnt, desc_cnt);
1665 sq->tail += desc_cnt;
1666 sq->tail &= (sq->dmem.q_len - 1);
1671 /* Free descriptor back to SQ for future use */
1673 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1676 atomic_add_int(&sq->free_cnt, desc_cnt);
1677 sq->head += desc_cnt;
1678 sq->head &= (sq->dmem.q_len - 1);
1682 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1685 qentry &= (sq->dmem.q_len - 1);
1690 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1694 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1695 sq_cfg |= NICVF_SQ_EN;
1696 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1697 /* Ring doorbell so that H/W restarts processing SQEs */
1698 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1702 nicvf_sq_disable(struct nicvf *nic, int qidx)
1706 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1707 sq_cfg &= ~NICVF_SQ_EN;
1708 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1712 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1714 uint64_t head, tail;
1715 struct snd_buff *snd_buff;
1716 struct sq_hdr_subdesc *hdr;
1719 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1720 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1721 while (sq->head != head) {
1722 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1723 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1724 nicvf_put_sq_desc(sq, 1);
1727 snd_buff = &sq->snd_buff[sq->head];
1728 if (snd_buff->mbuf != NULL) {
1729 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1730 m_freem(snd_buff->mbuf);
1731 sq->snd_buff[sq->head].mbuf = NULL;
1733 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1735 NICVF_TX_UNLOCK(sq);
1739 * Add SQ HEADER subdescriptor.
1740 * First subdescriptor for every send descriptor.
1743 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1744 int subdesc_cnt, struct mbuf *mbuf, int len)
1747 struct sq_hdr_subdesc *hdr;
1748 struct ether_vlan_header *eh;
1754 int ehdrlen, iphlen, poff;
1758 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1759 sq->snd_buff[qentry].mbuf = mbuf;
1761 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1762 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1763 /* Enable notification via CQE after processing SQE */
1765 /* No of subdescriptors following this */
1766 hdr->subdesc_cnt = subdesc_cnt;
1769 eh = mtod(mbuf, struct ether_vlan_header *);
1770 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1771 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1772 etype = ntohs(eh->evl_proto);
1774 ehdrlen = ETHER_HDR_LEN;
1775 etype = ntohs(eh->evl_encap_proto);
1780 case ETHERTYPE_IPV6:
1781 /* ARM64TODO: Add support for IPv6 */
1783 sq->snd_buff[qentry].mbuf = NULL;
1788 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1789 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1790 sq->snd_buff[qentry].mbuf = mbuf;
1795 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1796 iphlen = ip->ip_hl << 2;
1797 poff = ehdrlen + iphlen;
1799 if (mbuf->m_pkthdr.csum_flags != 0) {
1800 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1803 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1806 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1807 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1808 sq->snd_buff[qentry].mbuf = mbuf;
1812 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1815 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1818 if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1819 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1820 sq->snd_buff[qentry].mbuf = mbuf;
1824 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1827 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1830 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1831 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1832 sq->snd_buff[qentry].mbuf = mbuf;
1836 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1841 hdr->l3_offset = ehdrlen;
1842 hdr->l4_offset = ehdrlen + iphlen;
1845 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1847 * Extract ip again as m_data could have been modified.
1849 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1850 th = (struct tcphdr *)((caddr_t)ip + iphlen);
1853 hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4);
1854 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1855 hdr->inner_l3_offset = ehdrlen - 2;
1856 nic->drv_stats.tx_tso++;
1868 * SQ GATHER subdescriptor
1869 * Must follow HDR descriptor
1871 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1872 int size, uint64_t data)
1874 struct sq_gather_subdesc *gather;
1876 qentry &= (sq->dmem.q_len - 1);
1877 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1879 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1880 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1881 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1882 gather->size = size;
1883 gather->addr = data;
1886 /* Put an mbuf to a SQ for packet transfer. */
1888 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1890 bus_dma_segment_t segs[256];
1891 struct snd_buff *snd_buff;
1897 NICVF_TX_LOCK_ASSERT(sq);
1899 if (sq->free_cnt == 0)
1902 snd_buff = &sq->snd_buff[sq->tail];
1904 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1905 *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1906 if (__predict_false(err != 0)) {
1907 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
1913 /* Set how many subdescriptors is required */
1914 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1915 if (subdesc_cnt > sq->free_cnt) {
1916 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1917 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1921 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1923 /* Add SQ header subdesc */
1924 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1925 (*mbufp)->m_pkthdr.len);
1927 nicvf_put_sq_desc(sq, subdesc_cnt);
1928 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1929 if (err == ENOBUFS) {
1936 /* Add SQ gather subdescs */
1937 for (seg = 0; seg < nsegs; seg++) {
1938 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1939 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1943 /* make sure all memory stores are done before ringing doorbell */
1944 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1946 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1947 __func__, sq->idx, subdesc_cnt);
1948 /* Inform HW to xmit new packet */
1949 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1950 sq->idx, subdesc_cnt);
1954 static __inline u_int
1957 #if BYTE_ORDER == BIG_ENDIAN
1958 return ((i & ~3) + 3 - (i & 3));
1964 /* Returns MBUF for a received packet */
1966 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1969 int payload_len = 0;
1971 struct mbuf *mbuf_frag;
1972 uint16_t *rb_lens = NULL;
1973 uint64_t *rb_ptrs = NULL;
1976 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1977 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1979 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1980 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1982 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1983 payload_len = rb_lens[frag_num(frag)];
1985 /* First fragment */
1986 mbuf = nicvf_rb_ptr_to_mbuf(nic,
1987 (*rb_ptrs - cqe_rx->align_pad));
1988 mbuf->m_len = payload_len;
1989 mbuf->m_data += cqe_rx->align_pad;
1990 if_setrcvif(mbuf, nic->ifp);
1993 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
1994 m_append(mbuf, payload_len, mbuf_frag->m_data);
1997 /* Next buffer pointer */
2001 if (__predict_true(mbuf != NULL)) {
2003 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2004 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2005 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2007 * HW by default verifies IP & TCP/UDP/SCTP checksums
2009 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2010 mbuf->m_pkthdr.csum_flags =
2011 (CSUM_IP_CHECKED | CSUM_IP_VALID);
2014 switch (cqe_rx->l4_type) {
2016 case L4TYPE_TCP: /* fall through */
2017 mbuf->m_pkthdr.csum_flags |=
2018 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2019 mbuf->m_pkthdr.csum_data = 0xffff;
2022 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2033 /* Enable interrupt */
2035 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2039 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2043 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2046 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2048 case NICVF_INTR_RBDR:
2049 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2051 case NICVF_INTR_PKT_DROP:
2052 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2054 case NICVF_INTR_TCP_TIMER:
2055 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2057 case NICVF_INTR_MBOX:
2058 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2060 case NICVF_INTR_QS_ERR:
2061 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2064 device_printf(nic->dev,
2065 "Failed to enable interrupt: unknown type\n");
2069 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2072 /* Disable interrupt */
2074 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2076 uint64_t reg_val = 0;
2080 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2083 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2085 case NICVF_INTR_RBDR:
2086 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2088 case NICVF_INTR_PKT_DROP:
2089 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2091 case NICVF_INTR_TCP_TIMER:
2092 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2094 case NICVF_INTR_MBOX:
2095 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2097 case NICVF_INTR_QS_ERR:
2098 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2101 device_printf(nic->dev,
2102 "Failed to disable interrupt: unknown type\n");
2106 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2109 /* Clear interrupt */
2111 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2113 uint64_t reg_val = 0;
2117 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2120 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2122 case NICVF_INTR_RBDR:
2123 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2125 case NICVF_INTR_PKT_DROP:
2126 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2128 case NICVF_INTR_TCP_TIMER:
2129 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2131 case NICVF_INTR_MBOX:
2132 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2134 case NICVF_INTR_QS_ERR:
2135 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2138 device_printf(nic->dev,
2139 "Failed to clear interrupt: unknown type\n");
2143 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2146 /* Check if interrupt is enabled */
2148 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2151 uint64_t mask = 0xff;
2153 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2157 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2160 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2162 case NICVF_INTR_RBDR:
2163 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2165 case NICVF_INTR_PKT_DROP:
2166 mask = NICVF_INTR_PKT_DROP_MASK;
2168 case NICVF_INTR_TCP_TIMER:
2169 mask = NICVF_INTR_TCP_TIMER_MASK;
2171 case NICVF_INTR_MBOX:
2172 mask = NICVF_INTR_MBOX_MASK;
2174 case NICVF_INTR_QS_ERR:
2175 mask = NICVF_INTR_QS_ERR_MASK;
2178 device_printf(nic->dev,
2179 "Failed to check interrupt enable: unknown type\n");
2183 return (reg_val & mask);
2187 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2189 struct rcv_queue *rq;
2191 #define GET_RQ_STATS(reg) \
2192 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2193 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2195 rq = &nic->qs->rq[rq_idx];
2196 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2197 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2201 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2203 struct snd_queue *sq;
2205 #define GET_SQ_STATS(reg) \
2206 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2207 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2209 sq = &nic->qs->sq[sq_idx];
2210 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2211 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2214 /* Check for errors in the receive cmp.queue entry */
2216 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2217 struct cqe_rx_t *cqe_rx)
2219 struct nicvf_hw_stats *stats = &nic->hw_stats;
2220 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2222 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2223 drv_stats->rx_frames_ok++;
2227 switch (cqe_rx->err_opcode) {
2228 case CQ_RX_ERROP_RE_PARTIAL:
2229 stats->rx_bgx_truncated_pkts++;
2231 case CQ_RX_ERROP_RE_JABBER:
2232 stats->rx_jabber_errs++;
2234 case CQ_RX_ERROP_RE_FCS:
2235 stats->rx_fcs_errs++;
2237 case CQ_RX_ERROP_RE_RX_CTL:
2238 stats->rx_bgx_errs++;
2240 case CQ_RX_ERROP_PREL2_ERR:
2241 stats->rx_prel2_errs++;
2243 case CQ_RX_ERROP_L2_MAL:
2244 stats->rx_l2_hdr_malformed++;
2246 case CQ_RX_ERROP_L2_OVERSIZE:
2247 stats->rx_oversize++;
2249 case CQ_RX_ERROP_L2_UNDERSIZE:
2250 stats->rx_undersize++;
2252 case CQ_RX_ERROP_L2_LENMISM:
2253 stats->rx_l2_len_mismatch++;
2255 case CQ_RX_ERROP_L2_PCLP:
2256 stats->rx_l2_pclp++;
2258 case CQ_RX_ERROP_IP_NOT:
2259 stats->rx_ip_ver_errs++;
2261 case CQ_RX_ERROP_IP_CSUM_ERR:
2262 stats->rx_ip_csum_errs++;
2264 case CQ_RX_ERROP_IP_MAL:
2265 stats->rx_ip_hdr_malformed++;
2267 case CQ_RX_ERROP_IP_MALD:
2268 stats->rx_ip_payload_malformed++;
2270 case CQ_RX_ERROP_IP_HOP:
2271 stats->rx_ip_ttl_errs++;
2273 case CQ_RX_ERROP_L3_PCLP:
2274 stats->rx_l3_pclp++;
2276 case CQ_RX_ERROP_L4_MAL:
2277 stats->rx_l4_malformed++;
2279 case CQ_RX_ERROP_L4_CHK:
2280 stats->rx_l4_csum_errs++;
2282 case CQ_RX_ERROP_UDP_LEN:
2283 stats->rx_udp_len_errs++;
2285 case CQ_RX_ERROP_L4_PORT:
2286 stats->rx_l4_port_errs++;
2288 case CQ_RX_ERROP_TCP_FLAG:
2289 stats->rx_tcp_flag_errs++;
2291 case CQ_RX_ERROP_TCP_OFFSET:
2292 stats->rx_tcp_offset_errs++;
2294 case CQ_RX_ERROP_L4_PCLP:
2295 stats->rx_l4_pclp++;
2297 case CQ_RX_ERROP_RBDR_TRUNC:
2298 stats->rx_truncated_pkts++;
2305 /* Check for errors in the send cmp.queue entry */
2307 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2308 struct cqe_send_t *cqe_tx)
2310 struct cmp_queue_stats *stats = &cq->stats;
2312 switch (cqe_tx->send_status) {
2313 case CQ_TX_ERROP_GOOD:
2316 case CQ_TX_ERROP_DESC_FAULT:
2317 stats->tx.desc_fault++;
2319 case CQ_TX_ERROP_HDR_CONS_ERR:
2320 stats->tx.hdr_cons_err++;
2322 case CQ_TX_ERROP_SUBDC_ERR:
2323 stats->tx.subdesc_err++;
2325 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2326 stats->tx.imm_size_oflow++;
2328 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2329 stats->tx.data_seq_err++;
2331 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2332 stats->tx.mem_seq_err++;
2334 case CQ_TX_ERROP_LOCK_VIOL:
2335 stats->tx.lock_viol++;
2337 case CQ_TX_ERROP_DATA_FAULT:
2338 stats->tx.data_fault++;
2340 case CQ_TX_ERROP_TSTMP_CONFLICT:
2341 stats->tx.tstmp_conflict++;
2343 case CQ_TX_ERROP_TSTMP_TIMEOUT:
2344 stats->tx.tstmp_timeout++;
2346 case CQ_TX_ERROP_MEM_FAULT:
2347 stats->tx.mem_fault++;
2349 case CQ_TX_ERROP_CK_OVERLAP:
2350 stats->tx.csum_overlap++;
2352 case CQ_TX_ERROP_CK_OFLOW:
2353 stats->tx.csum_overflow++;