2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
62 #include "qlnx_ioctl.h"
72 * ioctl related functions
74 static void qlnx_add_sysctls(qlnx_host_t *ha);
79 static void qlnx_release(qlnx_host_t *ha);
80 static void qlnx_fp_isr(void *arg);
81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82 static void qlnx_init(void *arg);
83 static void qlnx_init_locked(qlnx_host_t *ha);
84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85 static int qlnx_set_promisc(qlnx_host_t *ha);
86 static int qlnx_set_allmulti(qlnx_host_t *ha);
87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qlnx_media_change(struct ifnet *ifp);
89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 static void qlnx_stop(qlnx_host_t *ha);
91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92 struct mbuf **m_headp);
93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95 struct qlnx_link_output *if_link);
96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
97 static void qlnx_qflush(struct ifnet *ifp);
99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
109 static int qlnx_nic_setup(struct ecore_dev *cdev,
110 struct ecore_pf_params *func_params);
111 static int qlnx_nic_start(struct ecore_dev *cdev);
112 static int qlnx_slowpath_start(qlnx_host_t *ha);
113 static int qlnx_slowpath_stop(qlnx_host_t *ha);
114 static int qlnx_init_hw(qlnx_host_t *ha);
115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
116 char ver_str[VER_SIZE]);
117 static void qlnx_unload(qlnx_host_t *ha);
118 static int qlnx_load(qlnx_host_t *ha);
119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
126 struct qlnx_rx_queue *rxq);
127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
132 static void qlnx_timer(void *arg);
133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
135 static void qlnx_trigger_dump(qlnx_host_t *ha);
136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
137 struct qlnx_tx_queue *txq);
138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
140 static void qlnx_fp_taskqueue(void *context, int pending);
141 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
143 struct qlnx_agg_info *tpa);
144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
146 #if __FreeBSD_version >= 1100000
147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
152 * Hooks to the Operating Systems
154 static int qlnx_pci_probe (device_t);
155 static int qlnx_pci_attach (device_t);
156 static int qlnx_pci_detach (device_t);
158 static device_method_t qlnx_pci_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, qlnx_pci_probe),
161 DEVMETHOD(device_attach, qlnx_pci_attach),
162 DEVMETHOD(device_detach, qlnx_pci_detach),
166 static driver_t qlnx_pci_driver = {
167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
170 static devclass_t qlnx_devclass;
172 MODULE_VERSION(if_qlnxe,1);
173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
181 char qlnx_dev_str[64];
182 char qlnx_ver_str[VER_SIZE];
183 char qlnx_name_str[NAME_SIZE];
186 * Some PCI Configuration Space Related Defines
189 #ifndef PCI_VENDOR_QLOGIC
190 #define PCI_VENDOR_QLOGIC 0x1077
193 /* 40G Adapter QLE45xxx*/
194 #ifndef QLOGIC_PCI_DEVICE_ID_1634
195 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
198 /* 100G Adapter QLE45xxx*/
199 #ifndef QLOGIC_PCI_DEVICE_ID_1644
200 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
203 /* 25G Adapter QLE45xxx*/
204 #ifndef QLOGIC_PCI_DEVICE_ID_1656
205 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
208 /* 50G Adapter QLE45xxx*/
209 #ifndef QLOGIC_PCI_DEVICE_ID_1654
210 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
214 qlnx_valid_device(device_t dev)
218 device_id = pci_get_device(dev);
220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
223 (device_id == QLOGIC_PCI_DEVICE_ID_1654))
230 * Name: qlnx_pci_probe
231 * Function: Validate the PCI device to be a QLA80XX device
234 qlnx_pci_probe(device_t dev)
236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
244 switch (pci_get_device(dev)) {
246 case QLOGIC_PCI_DEVICE_ID_1644:
247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
251 device_set_desc_copy(dev, qlnx_dev_str);
255 case QLOGIC_PCI_DEVICE_ID_1634:
256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
260 device_set_desc_copy(dev, qlnx_dev_str);
264 case QLOGIC_PCI_DEVICE_ID_1656:
265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
269 device_set_desc_copy(dev, qlnx_dev_str);
273 case QLOGIC_PCI_DEVICE_ID_1654:
274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
278 device_set_desc_copy(dev, qlnx_dev_str);
286 return (BUS_PROBE_DEFAULT);
291 qlnx_sp_intr(void *arg)
293 struct ecore_hwfn *p_hwfn;
299 if (p_hwfn == NULL) {
300 printf("%s: spurious slowpath intr\n", __func__);
304 ha = (qlnx_host_t *)p_hwfn->p_dev;
306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
308 for (i = 0; i < ha->cdev.num_hwfns; i++) {
309 if (&ha->cdev.hwfns[i] == p_hwfn) {
310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
314 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
320 qlnx_sp_taskqueue(void *context, int pending)
322 struct ecore_hwfn *p_hwfn;
326 if (p_hwfn != NULL) {
333 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
338 for (i = 0; i < ha->cdev.num_hwfns; i++) {
340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
342 bzero(tq_name, sizeof (tq_name));
343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
350 if (ha->sp_taskqueue[i] == NULL)
353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
356 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
357 ha->sp_taskqueue[i]));
364 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
368 for (i = 0; i < ha->cdev.num_hwfns; i++) {
369 if (ha->sp_taskqueue[i] != NULL) {
370 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
371 taskqueue_free(ha->sp_taskqueue[i]);
378 qlnx_fp_taskqueue(void *context, int pending)
380 struct qlnx_fastpath *fp;
391 ha = (qlnx_host_t *)fp->edev;
395 mtx_lock(&fp->tx_mtx);
397 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
398 IFF_DRV_RUNNING) || (!ha->link_up)) {
400 mtx_unlock(&fp->tx_mtx);
401 goto qlnx_fp_taskqueue_exit;
404 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
406 mp = drbr_peek(ifp, fp->tx_br);
410 ret = qlnx_send(ha, fp, &mp);
415 drbr_putback(ifp, fp->tx_br, mp);
417 fp->tx_pkts_processed++;
418 drbr_advance(ifp, fp->tx_br);
421 mtx_unlock(&fp->tx_mtx);
423 goto qlnx_fp_taskqueue_exit;
426 drbr_advance(ifp, fp->tx_br);
427 fp->tx_pkts_transmitted++;
428 fp->tx_pkts_processed++;
431 mp = drbr_peek(ifp, fp->tx_br);
434 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
436 mtx_unlock(&fp->tx_mtx);
438 qlnx_fp_taskqueue_exit:
440 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
445 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
449 struct qlnx_fastpath *fp;
451 for (i = 0; i < ha->num_rss; i++) {
453 fp = &ha->fp_array[i];
455 bzero(tq_name, sizeof (tq_name));
456 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
458 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
460 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
461 taskqueue_thread_enqueue,
464 if (fp->fp_taskqueue == NULL)
467 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
470 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
478 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
481 struct qlnx_fastpath *fp;
483 for (i = 0; i < ha->num_rss; i++) {
485 fp = &ha->fp_array[i];
487 if (fp->fp_taskqueue != NULL) {
489 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
490 taskqueue_free(fp->fp_taskqueue);
491 fp->fp_taskqueue = NULL;
498 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
501 struct qlnx_fastpath *fp;
503 for (i = 0; i < ha->num_rss; i++) {
504 fp = &ha->fp_array[i];
506 if (fp->fp_taskqueue != NULL) {
507 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
514 * Name: qlnx_pci_attach
515 * Function: attaches the device to the operating system
518 qlnx_pci_attach(device_t dev)
520 qlnx_host_t *ha = NULL;
521 uint32_t rsrc_len_reg = 0;
522 uint32_t rsrc_len_dbells = 0;
523 uint32_t rsrc_len_msix = 0;
527 if ((ha = device_get_softc(dev)) == NULL) {
528 device_printf(dev, "cannot get softc\n");
532 memset(ha, 0, sizeof (qlnx_host_t));
534 if (qlnx_valid_device(dev) != 0) {
535 device_printf(dev, "device is not valid device\n");
538 ha->pci_func = pci_get_function(dev);
542 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
543 mtx_init(&ha->tx_lock, "qlnx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
545 ha->flags.lock_init = 1;
547 pci_enable_busmaster(dev);
553 ha->reg_rid = PCIR_BAR(0);
554 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
557 if (ha->pci_reg == NULL) {
558 device_printf(dev, "unable to map BAR0\n");
559 goto qlnx_pci_attach_err;
562 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
565 ha->dbells_rid = PCIR_BAR(2);
566 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
567 &ha->dbells_rid, RF_ACTIVE);
569 if (ha->pci_dbells == NULL) {
570 device_printf(dev, "unable to map BAR1\n");
571 goto qlnx_pci_attach_err;
574 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
577 ha->dbells_phys_addr = (uint64_t)
578 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
579 ha->dbells_size = rsrc_len_dbells;
581 ha->msix_rid = PCIR_BAR(4);
582 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
583 &ha->msix_rid, RF_ACTIVE);
585 if (ha->msix_bar == NULL) {
586 device_printf(dev, "unable to map BAR2\n");
587 goto qlnx_pci_attach_err;
590 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
596 if (qlnx_alloc_parent_dma_tag(ha))
597 goto qlnx_pci_attach_err;
599 if (qlnx_alloc_tx_dma_tag(ha))
600 goto qlnx_pci_attach_err;
602 if (qlnx_alloc_rx_dma_tag(ha))
603 goto qlnx_pci_attach_err;
606 if (qlnx_init_hw(ha) != 0)
607 goto qlnx_pci_attach_err;
610 * Allocate MSI-x vectors
612 ha->num_rss = QLNX_MAX_RSS;
613 ha->num_tc = QLNX_MAX_TC;
615 ha->msix_count = pci_msix_count(dev);
617 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
618 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
620 if (!ha->msix_count ||
621 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
622 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
624 goto qlnx_pci_attach_err;
627 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
628 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
630 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
632 QL_DPRINT1(ha, (dev, "%s:\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
633 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
634 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
635 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
636 __func__, ha->pci_reg, rsrc_len_reg,
637 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
638 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
639 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc));
641 if (pci_alloc_msix(dev, &ha->msix_count)) {
642 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
645 goto qlnx_pci_attach_err;
649 * Initialize slow path interrupt and task queue
651 if (qlnx_create_sp_taskqueues(ha) != 0)
652 goto qlnx_pci_attach_err;
654 for (i = 0; i < ha->cdev.num_hwfns; i++) {
656 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
658 ha->sp_irq_rid[i] = i + 1;
659 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
661 (RF_ACTIVE | RF_SHAREABLE));
662 if (ha->sp_irq[i] == NULL) {
664 "could not allocate mbx interrupt\n");
665 goto qlnx_pci_attach_err;
668 if (bus_setup_intr(dev, ha->sp_irq[i],
669 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
670 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
672 "could not setup slow path interrupt\n");
673 goto qlnx_pci_attach_err;
676 QL_DPRINT1(ha, (dev, "%s: p_hwfn [%p] sp_irq_rid %d"
677 " sp_irq %p sp_handle %p\n", __func__, p_hwfn,
678 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]));
683 * initialize fast path interrupt
685 if (qlnx_create_fp_taskqueues(ha) != 0)
686 goto qlnx_pci_attach_err;
688 for (i = 0; i < ha->num_rss; i++) {
689 ha->irq_vec[i].rss_idx = i;
690 ha->irq_vec[i].ha = ha;
691 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
693 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
694 &ha->irq_vec[i].irq_rid,
695 (RF_ACTIVE | RF_SHAREABLE));
697 if (ha->irq_vec[i].irq == NULL) {
699 "could not allocate interrupt[%d]\n", i);
700 goto qlnx_pci_attach_err;
703 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
704 device_printf(dev, "could not allocate tx_br[%d]\n", i);
705 goto qlnx_pci_attach_err;
710 callout_init(&ha->qlnx_callout, 1);
711 ha->flags.callout_init = 1;
713 for (i = 0; i < ha->cdev.num_hwfns; i++) {
715 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
716 goto qlnx_pci_attach_err;
717 if (ha->grcdump_size[i] == 0)
718 goto qlnx_pci_attach_err;
720 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
721 QL_DPRINT1(ha, (dev, "grcdump_size[%d] = 0x%08x\n",
722 i, ha->grcdump_size[i]));
724 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
725 if (ha->grcdump[i] == NULL) {
726 device_printf(dev, "grcdump alloc[%d] failed\n", i);
727 goto qlnx_pci_attach_err;
730 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
731 goto qlnx_pci_attach_err;
732 if (ha->idle_chk_size[i] == 0)
733 goto qlnx_pci_attach_err;
735 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
736 QL_DPRINT1(ha, (dev, "idle_chk_size[%d] = 0x%08x\n",
737 i, ha->idle_chk_size[i]));
739 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
741 if (ha->idle_chk[i] == NULL) {
742 device_printf(dev, "idle_chk alloc failed\n");
743 goto qlnx_pci_attach_err;
747 if (qlnx_slowpath_start(ha) != 0) {
749 qlnx_mdelay(__func__, 1000);
750 qlnx_trigger_dump(ha);
752 goto qlnx_pci_attach_err0;
754 ha->flags.slowpath_start = 1;
756 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
757 qlnx_mdelay(__func__, 1000);
758 qlnx_trigger_dump(ha);
760 goto qlnx_pci_attach_err0;
763 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
764 qlnx_mdelay(__func__, 1000);
765 qlnx_trigger_dump(ha);
767 goto qlnx_pci_attach_err0;
769 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
770 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
771 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
772 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
773 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
774 FW_ENGINEERING_VERSION);
776 QL_DPRINT1(ha, (dev, "%s: STORM_FW version %s MFW version %s\n",
777 __func__, ha->stormfw_ver, ha->mfw_ver));
779 qlnx_init_ifnet(dev, ha);
784 qlnx_add_sysctls(ha);
786 qlnx_pci_attach_err0:
788 * create ioctl device interface
790 if (qlnx_make_cdev(ha)) {
791 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
792 goto qlnx_pci_attach_err;
795 QL_DPRINT2(ha, (dev, "%s: success\n", __func__));
807 * Name: qlnx_pci_detach
808 * Function: Unhooks the device from the operating system
811 qlnx_pci_detach(device_t dev)
813 qlnx_host_t *ha = NULL;
815 if ((ha = device_get_softc(dev)) == NULL) {
816 device_printf(dev, "cannot get softc\n");
830 qlnx_init_hw(qlnx_host_t *ha)
833 struct ecore_hw_prepare_params params;
835 ecore_init_struct(&ha->cdev);
837 /* ha->dp_module = ECORE_MSG_PROBE |
843 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
844 ha->dp_level = ECORE_LEVEL_NOTICE;
846 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
848 ha->cdev.regview = ha->pci_reg;
849 ha->cdev.doorbells = ha->pci_dbells;
850 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
851 ha->cdev.db_size = ha->dbells_size;
853 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
855 ha->personality = ECORE_PCI_DEFAULT;
857 params.personality = ha->personality;
859 params.drv_resc_alloc = false;
860 params.chk_reg_fifo = false;
861 params.initiate_pf_flr = true;
864 ecore_hw_prepare(&ha->cdev, ¶ms);
866 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
872 qlnx_release(qlnx_host_t *ha)
879 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
881 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
882 if (ha->idle_chk[i] != NULL) {
883 free(ha->idle_chk[i], M_QLNXBUF);
884 ha->idle_chk[i] = NULL;
887 if (ha->grcdump[i] != NULL) {
888 free(ha->grcdump[i], M_QLNXBUF);
889 ha->grcdump[i] = NULL;
893 if (ha->flags.callout_init)
894 callout_drain(&ha->qlnx_callout);
896 if (ha->flags.slowpath_start) {
897 qlnx_slowpath_stop(ha);
900 ecore_hw_remove(&ha->cdev);
905 ether_ifdetach(ha->ifp);
907 qlnx_free_tx_dma_tag(ha);
909 qlnx_free_rx_dma_tag(ha);
911 qlnx_free_parent_dma_tag(ha);
913 for (i = 0; i < ha->num_rss; i++) {
914 struct qlnx_fastpath *fp = &ha->fp_array[i];
916 if (ha->irq_vec[i].handle) {
917 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
918 ha->irq_vec[i].handle);
921 if (ha->irq_vec[i].irq) {
922 (void)bus_release_resource(dev, SYS_RES_IRQ,
923 ha->irq_vec[i].irq_rid,
927 qlnx_free_tx_br(ha, fp);
929 qlnx_destroy_fp_taskqueues(ha);
931 for (i = 0; i < ha->cdev.num_hwfns; i++) {
932 if (ha->sp_handle[i])
933 (void)bus_teardown_intr(dev, ha->sp_irq[i],
937 (void) bus_release_resource(dev, SYS_RES_IRQ,
938 ha->sp_irq_rid[i], ha->sp_irq[i]);
941 qlnx_destroy_sp_taskqueues(ha);
944 pci_release_msi(dev);
946 if (ha->flags.lock_init) {
947 mtx_destroy(&ha->tx_lock);
948 mtx_destroy(&ha->hw_lock);
952 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
956 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
960 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
963 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
968 qlnx_trigger_dump(qlnx_host_t *ha)
973 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
975 QL_DPRINT2(ha, (ha->pci_dev, "%s: start\n", __func__));
977 for (i = 0; i < ha->cdev.num_hwfns; i++) {
978 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
979 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
982 QL_DPRINT2(ha, (ha->pci_dev, "%s: end\n", __func__));
988 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
993 err = sysctl_handle_int(oidp, &ret, 0, req);
995 if (err || !req->newptr)
999 ha = (qlnx_host_t *)arg1;
1000 qlnx_trigger_dump(ha);
1006 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1008 int err, i, ret = 0, usecs = 0;
1010 struct ecore_hwfn *p_hwfn;
1011 struct qlnx_fastpath *fp;
1013 err = sysctl_handle_int(oidp, &usecs, 0, req);
1015 if (err || !req->newptr || !usecs || (usecs > 255))
1018 ha = (qlnx_host_t *)arg1;
1020 for (i = 0; i < ha->num_rss; i++) {
1022 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1024 fp = &ha->fp_array[i];
1026 if (fp->txq[0]->handle != NULL) {
1027 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1028 (uint16_t)usecs, fp->txq[0]->handle);
1033 ha->tx_coalesce_usecs = (uint8_t)usecs;
1039 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1041 int err, i, ret = 0, usecs = 0;
1043 struct ecore_hwfn *p_hwfn;
1044 struct qlnx_fastpath *fp;
1046 err = sysctl_handle_int(oidp, &usecs, 0, req);
1048 if (err || !req->newptr || !usecs || (usecs > 255))
1051 ha = (qlnx_host_t *)arg1;
1053 for (i = 0; i < ha->num_rss; i++) {
1055 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1057 fp = &ha->fp_array[i];
1059 if (fp->rxq->handle != NULL) {
1060 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1061 0, fp->rxq->handle);
1066 ha->rx_coalesce_usecs = (uint8_t)usecs;
1072 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1074 struct sysctl_ctx_list *ctx;
1075 struct sysctl_oid_list *children;
1076 struct sysctl_oid *ctx_oid;
1078 ctx = device_get_sysctl_ctx(ha->pci_dev);
1079 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1081 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1082 CTLFLAG_RD, NULL, "spstat");
1083 children = SYSCTL_CHILDREN(ctx_oid);
1085 SYSCTL_ADD_QUAD(ctx, children,
1086 OID_AUTO, "sp_interrupts",
1087 CTLFLAG_RD, &ha->sp_interrupts,
1088 "No. of slowpath interrupts");
1094 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1096 struct sysctl_ctx_list *ctx;
1097 struct sysctl_oid_list *children;
1098 struct sysctl_oid_list *node_children;
1099 struct sysctl_oid *ctx_oid;
1101 uint8_t name_str[16];
1103 ctx = device_get_sysctl_ctx(ha->pci_dev);
1104 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1106 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1107 CTLFLAG_RD, NULL, "fpstat");
1108 children = SYSCTL_CHILDREN(ctx_oid);
1110 for (i = 0; i < ha->num_rss; i++) {
1112 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1113 snprintf(name_str, sizeof(name_str), "%d", i);
1115 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1116 CTLFLAG_RD, NULL, name_str);
1117 node_children = SYSCTL_CHILDREN(ctx_oid);
1121 SYSCTL_ADD_QUAD(ctx, node_children,
1122 OID_AUTO, "tx_pkts_processed",
1123 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1124 "No. of packets processed for transmission");
1126 SYSCTL_ADD_QUAD(ctx, node_children,
1127 OID_AUTO, "tx_pkts_freed",
1128 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1129 "No. of freed packets");
1131 SYSCTL_ADD_QUAD(ctx, node_children,
1132 OID_AUTO, "tx_pkts_transmitted",
1133 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1134 "No. of transmitted packets");
1136 SYSCTL_ADD_QUAD(ctx, node_children,
1137 OID_AUTO, "tx_pkts_completed",
1138 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1139 "No. of transmit completions");
1141 SYSCTL_ADD_QUAD(ctx, node_children,
1142 OID_AUTO, "tx_lso_wnd_min_len",
1143 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1144 "tx_lso_wnd_min_len");
1146 SYSCTL_ADD_QUAD(ctx, node_children,
1147 OID_AUTO, "tx_defrag",
1148 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1151 SYSCTL_ADD_QUAD(ctx, node_children,
1152 OID_AUTO, "tx_nsegs_gt_elem_left",
1153 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1154 "tx_nsegs_gt_elem_left");
1156 SYSCTL_ADD_UINT(ctx, node_children,
1157 OID_AUTO, "tx_tso_max_nsegs",
1158 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1159 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1161 SYSCTL_ADD_UINT(ctx, node_children,
1162 OID_AUTO, "tx_tso_min_nsegs",
1163 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1164 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1166 SYSCTL_ADD_UINT(ctx, node_children,
1167 OID_AUTO, "tx_tso_max_pkt_len",
1168 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1169 ha->fp_array[i].tx_tso_max_pkt_len,
1170 "tx_tso_max_pkt_len");
1172 SYSCTL_ADD_UINT(ctx, node_children,
1173 OID_AUTO, "tx_tso_min_pkt_len",
1174 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1175 ha->fp_array[i].tx_tso_min_pkt_len,
1176 "tx_tso_min_pkt_len");
1178 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1180 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1181 snprintf(name_str, sizeof(name_str),
1182 "tx_pkts_nseg_%02d", (j+1));
1184 SYSCTL_ADD_QUAD(ctx, node_children,
1185 OID_AUTO, name_str, CTLFLAG_RD,
1186 &ha->fp_array[i].tx_pkts[j], name_str);
1189 SYSCTL_ADD_QUAD(ctx, node_children,
1190 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1191 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1192 "err_tx_nsegs_gt_elem_left");
1194 SYSCTL_ADD_QUAD(ctx, node_children,
1195 OID_AUTO, "err_tx_dmamap_create",
1196 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1197 "err_tx_dmamap_create");
1199 SYSCTL_ADD_QUAD(ctx, node_children,
1200 OID_AUTO, "err_tx_defrag_dmamap_load",
1201 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1202 "err_tx_defrag_dmamap_load");
1204 SYSCTL_ADD_QUAD(ctx, node_children,
1205 OID_AUTO, "err_tx_non_tso_max_seg",
1206 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1207 "err_tx_non_tso_max_seg");
1209 SYSCTL_ADD_QUAD(ctx, node_children,
1210 OID_AUTO, "err_tx_dmamap_load",
1211 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1212 "err_tx_dmamap_load");
1214 SYSCTL_ADD_QUAD(ctx, node_children,
1215 OID_AUTO, "err_tx_defrag",
1216 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1219 SYSCTL_ADD_QUAD(ctx, node_children,
1220 OID_AUTO, "err_tx_free_pkt_null",
1221 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1222 "err_tx_free_pkt_null");
1224 SYSCTL_ADD_QUAD(ctx, node_children,
1225 OID_AUTO, "err_tx_cons_idx_conflict",
1226 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1227 "err_tx_cons_idx_conflict");
1229 #ifdef QLNX_TRACE_LRO_CNT
1230 SYSCTL_ADD_QUAD(ctx, node_children,
1231 OID_AUTO, "lro_cnt_64",
1232 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1235 SYSCTL_ADD_QUAD(ctx, node_children,
1236 OID_AUTO, "lro_cnt_128",
1237 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1240 SYSCTL_ADD_QUAD(ctx, node_children,
1241 OID_AUTO, "lro_cnt_256",
1242 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1245 SYSCTL_ADD_QUAD(ctx, node_children,
1246 OID_AUTO, "lro_cnt_512",
1247 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1250 SYSCTL_ADD_QUAD(ctx, node_children,
1251 OID_AUTO, "lro_cnt_1024",
1252 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1254 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
1258 SYSCTL_ADD_QUAD(ctx, node_children,
1259 OID_AUTO, "rx_pkts",
1260 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1261 "No. of received packets");
1263 SYSCTL_ADD_QUAD(ctx, node_children,
1264 OID_AUTO, "tpa_start",
1265 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1266 "No. of tpa_start packets");
1268 SYSCTL_ADD_QUAD(ctx, node_children,
1269 OID_AUTO, "tpa_cont",
1270 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1271 "No. of tpa_cont packets");
1273 SYSCTL_ADD_QUAD(ctx, node_children,
1274 OID_AUTO, "tpa_end",
1275 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1276 "No. of tpa_end packets");
1278 SYSCTL_ADD_QUAD(ctx, node_children,
1279 OID_AUTO, "err_m_getcl",
1280 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1283 SYSCTL_ADD_QUAD(ctx, node_children,
1284 OID_AUTO, "err_m_getjcl",
1285 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1288 SYSCTL_ADD_QUAD(ctx, node_children,
1289 OID_AUTO, "err_rx_hw_errors",
1290 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1291 "err_rx_hw_errors");
1293 SYSCTL_ADD_QUAD(ctx, node_children,
1294 OID_AUTO, "err_rx_alloc_errors",
1295 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1296 "err_rx_alloc_errors");
1303 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1305 struct sysctl_ctx_list *ctx;
1306 struct sysctl_oid_list *children;
1307 struct sysctl_oid *ctx_oid;
1309 ctx = device_get_sysctl_ctx(ha->pci_dev);
1310 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1312 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1313 CTLFLAG_RD, NULL, "hwstat");
1314 children = SYSCTL_CHILDREN(ctx_oid);
1316 SYSCTL_ADD_QUAD(ctx, children,
1317 OID_AUTO, "no_buff_discards",
1318 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1319 "No. of packets discarded due to lack of buffer");
1321 SYSCTL_ADD_QUAD(ctx, children,
1322 OID_AUTO, "packet_too_big_discard",
1323 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1324 "No. of packets discarded because packet was too big");
1326 SYSCTL_ADD_QUAD(ctx, children,
1327 OID_AUTO, "ttl0_discard",
1328 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1331 SYSCTL_ADD_QUAD(ctx, children,
1332 OID_AUTO, "rx_ucast_bytes",
1333 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1336 SYSCTL_ADD_QUAD(ctx, children,
1337 OID_AUTO, "rx_mcast_bytes",
1338 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1341 SYSCTL_ADD_QUAD(ctx, children,
1342 OID_AUTO, "rx_bcast_bytes",
1343 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1346 SYSCTL_ADD_QUAD(ctx, children,
1347 OID_AUTO, "rx_ucast_pkts",
1348 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1351 SYSCTL_ADD_QUAD(ctx, children,
1352 OID_AUTO, "rx_mcast_pkts",
1353 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1356 SYSCTL_ADD_QUAD(ctx, children,
1357 OID_AUTO, "rx_bcast_pkts",
1358 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1361 SYSCTL_ADD_QUAD(ctx, children,
1362 OID_AUTO, "mftag_filter_discards",
1363 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1364 "mftag_filter_discards");
1366 SYSCTL_ADD_QUAD(ctx, children,
1367 OID_AUTO, "mac_filter_discards",
1368 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1369 "mac_filter_discards");
1371 SYSCTL_ADD_QUAD(ctx, children,
1372 OID_AUTO, "tx_ucast_bytes",
1373 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1376 SYSCTL_ADD_QUAD(ctx, children,
1377 OID_AUTO, "tx_mcast_bytes",
1378 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1381 SYSCTL_ADD_QUAD(ctx, children,
1382 OID_AUTO, "tx_bcast_bytes",
1383 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1386 SYSCTL_ADD_QUAD(ctx, children,
1387 OID_AUTO, "tx_ucast_pkts",
1388 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1391 SYSCTL_ADD_QUAD(ctx, children,
1392 OID_AUTO, "tx_mcast_pkts",
1393 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1396 SYSCTL_ADD_QUAD(ctx, children,
1397 OID_AUTO, "tx_bcast_pkts",
1398 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1401 SYSCTL_ADD_QUAD(ctx, children,
1402 OID_AUTO, "tx_err_drop_pkts",
1403 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1404 "tx_err_drop_pkts");
1406 SYSCTL_ADD_QUAD(ctx, children,
1407 OID_AUTO, "tpa_coalesced_pkts",
1408 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1409 "tpa_coalesced_pkts");
1411 SYSCTL_ADD_QUAD(ctx, children,
1412 OID_AUTO, "tpa_coalesced_events",
1413 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1414 "tpa_coalesced_events");
1416 SYSCTL_ADD_QUAD(ctx, children,
1417 OID_AUTO, "tpa_aborts_num",
1418 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1421 SYSCTL_ADD_QUAD(ctx, children,
1422 OID_AUTO, "tpa_not_coalesced_pkts",
1423 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1424 "tpa_not_coalesced_pkts");
1426 SYSCTL_ADD_QUAD(ctx, children,
1427 OID_AUTO, "tpa_coalesced_bytes",
1428 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1429 "tpa_coalesced_bytes");
1431 SYSCTL_ADD_QUAD(ctx, children,
1432 OID_AUTO, "rx_64_byte_packets",
1433 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1434 "rx_64_byte_packets");
1436 SYSCTL_ADD_QUAD(ctx, children,
1437 OID_AUTO, "rx_65_to_127_byte_packets",
1438 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1439 "rx_65_to_127_byte_packets");
1441 SYSCTL_ADD_QUAD(ctx, children,
1442 OID_AUTO, "rx_128_to_255_byte_packets",
1443 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1444 "rx_128_to_255_byte_packets");
1446 SYSCTL_ADD_QUAD(ctx, children,
1447 OID_AUTO, "rx_256_to_511_byte_packets",
1448 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1449 "rx_256_to_511_byte_packets");
1451 SYSCTL_ADD_QUAD(ctx, children,
1452 OID_AUTO, "rx_512_to_1023_byte_packets",
1453 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1454 "rx_512_to_1023_byte_packets");
1456 SYSCTL_ADD_QUAD(ctx, children,
1457 OID_AUTO, "rx_1024_to_1518_byte_packets",
1458 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1459 "rx_1024_to_1518_byte_packets");
1461 SYSCTL_ADD_QUAD(ctx, children,
1462 OID_AUTO, "rx_1519_to_1522_byte_packets",
1463 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1464 "rx_1519_to_1522_byte_packets");
1466 SYSCTL_ADD_QUAD(ctx, children,
1467 OID_AUTO, "rx_1523_to_2047_byte_packets",
1468 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1469 "rx_1523_to_2047_byte_packets");
1471 SYSCTL_ADD_QUAD(ctx, children,
1472 OID_AUTO, "rx_2048_to_4095_byte_packets",
1473 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1474 "rx_2048_to_4095_byte_packets");
1476 SYSCTL_ADD_QUAD(ctx, children,
1477 OID_AUTO, "rx_4096_to_9216_byte_packets",
1478 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1479 "rx_4096_to_9216_byte_packets");
1481 SYSCTL_ADD_QUAD(ctx, children,
1482 OID_AUTO, "rx_9217_to_16383_byte_packets",
1483 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1484 "rx_9217_to_16383_byte_packets");
1486 SYSCTL_ADD_QUAD(ctx, children,
1487 OID_AUTO, "rx_crc_errors",
1488 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1491 SYSCTL_ADD_QUAD(ctx, children,
1492 OID_AUTO, "rx_mac_crtl_frames",
1493 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1494 "rx_mac_crtl_frames");
1496 SYSCTL_ADD_QUAD(ctx, children,
1497 OID_AUTO, "rx_pause_frames",
1498 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1501 SYSCTL_ADD_QUAD(ctx, children,
1502 OID_AUTO, "rx_pfc_frames",
1503 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1506 SYSCTL_ADD_QUAD(ctx, children,
1507 OID_AUTO, "rx_align_errors",
1508 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1511 SYSCTL_ADD_QUAD(ctx, children,
1512 OID_AUTO, "rx_carrier_errors",
1513 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1514 "rx_carrier_errors");
1516 SYSCTL_ADD_QUAD(ctx, children,
1517 OID_AUTO, "rx_oversize_packets",
1518 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1519 "rx_oversize_packets");
1521 SYSCTL_ADD_QUAD(ctx, children,
1522 OID_AUTO, "rx_jabbers",
1523 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1526 SYSCTL_ADD_QUAD(ctx, children,
1527 OID_AUTO, "rx_undersize_packets",
1528 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1529 "rx_undersize_packets");
1531 SYSCTL_ADD_QUAD(ctx, children,
1532 OID_AUTO, "rx_fragments",
1533 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1536 SYSCTL_ADD_QUAD(ctx, children,
1537 OID_AUTO, "tx_64_byte_packets",
1538 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1539 "tx_64_byte_packets");
1541 SYSCTL_ADD_QUAD(ctx, children,
1542 OID_AUTO, "tx_65_to_127_byte_packets",
1543 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1544 "tx_65_to_127_byte_packets");
1546 SYSCTL_ADD_QUAD(ctx, children,
1547 OID_AUTO, "tx_128_to_255_byte_packets",
1548 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1549 "tx_128_to_255_byte_packets");
1551 SYSCTL_ADD_QUAD(ctx, children,
1552 OID_AUTO, "tx_256_to_511_byte_packets",
1553 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1554 "tx_256_to_511_byte_packets");
1556 SYSCTL_ADD_QUAD(ctx, children,
1557 OID_AUTO, "tx_512_to_1023_byte_packets",
1558 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1559 "tx_512_to_1023_byte_packets");
1561 SYSCTL_ADD_QUAD(ctx, children,
1562 OID_AUTO, "tx_1024_to_1518_byte_packets",
1563 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1564 "tx_1024_to_1518_byte_packets");
1566 SYSCTL_ADD_QUAD(ctx, children,
1567 OID_AUTO, "tx_1519_to_2047_byte_packets",
1568 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1569 "tx_1519_to_2047_byte_packets");
1571 SYSCTL_ADD_QUAD(ctx, children,
1572 OID_AUTO, "tx_2048_to_4095_byte_packets",
1573 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1574 "tx_2048_to_4095_byte_packets");
1576 SYSCTL_ADD_QUAD(ctx, children,
1577 OID_AUTO, "tx_4096_to_9216_byte_packets",
1578 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1579 "tx_4096_to_9216_byte_packets");
1581 SYSCTL_ADD_QUAD(ctx, children,
1582 OID_AUTO, "tx_9217_to_16383_byte_packets",
1583 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1584 "tx_9217_to_16383_byte_packets");
1586 SYSCTL_ADD_QUAD(ctx, children,
1587 OID_AUTO, "tx_pause_frames",
1588 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1591 SYSCTL_ADD_QUAD(ctx, children,
1592 OID_AUTO, "tx_pfc_frames",
1593 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1596 SYSCTL_ADD_QUAD(ctx, children,
1597 OID_AUTO, "tx_lpi_entry_count",
1598 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1599 "tx_lpi_entry_count");
1601 SYSCTL_ADD_QUAD(ctx, children,
1602 OID_AUTO, "tx_total_collisions",
1603 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1604 "tx_total_collisions");
1606 SYSCTL_ADD_QUAD(ctx, children,
1607 OID_AUTO, "brb_truncates",
1608 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1611 SYSCTL_ADD_QUAD(ctx, children,
1612 OID_AUTO, "brb_discards",
1613 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1616 SYSCTL_ADD_QUAD(ctx, children,
1617 OID_AUTO, "rx_mac_bytes",
1618 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1621 SYSCTL_ADD_QUAD(ctx, children,
1622 OID_AUTO, "rx_mac_uc_packets",
1623 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1624 "rx_mac_uc_packets");
1626 SYSCTL_ADD_QUAD(ctx, children,
1627 OID_AUTO, "rx_mac_mc_packets",
1628 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1629 "rx_mac_mc_packets");
1631 SYSCTL_ADD_QUAD(ctx, children,
1632 OID_AUTO, "rx_mac_bc_packets",
1633 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1634 "rx_mac_bc_packets");
1636 SYSCTL_ADD_QUAD(ctx, children,
1637 OID_AUTO, "rx_mac_frames_ok",
1638 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1639 "rx_mac_frames_ok");
1641 SYSCTL_ADD_QUAD(ctx, children,
1642 OID_AUTO, "tx_mac_bytes",
1643 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1646 SYSCTL_ADD_QUAD(ctx, children,
1647 OID_AUTO, "tx_mac_uc_packets",
1648 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1649 "tx_mac_uc_packets");
1651 SYSCTL_ADD_QUAD(ctx, children,
1652 OID_AUTO, "tx_mac_mc_packets",
1653 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1654 "tx_mac_mc_packets");
1656 SYSCTL_ADD_QUAD(ctx, children,
1657 OID_AUTO, "tx_mac_bc_packets",
1658 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1659 "tx_mac_bc_packets");
1661 SYSCTL_ADD_QUAD(ctx, children,
1662 OID_AUTO, "tx_mac_ctrl_frames",
1663 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1664 "tx_mac_ctrl_frames");
1669 qlnx_add_sysctls(qlnx_host_t *ha)
1671 device_t dev = ha->pci_dev;
1672 struct sysctl_ctx_list *ctx;
1673 struct sysctl_oid_list *children;
1675 ctx = device_get_sysctl_ctx(dev);
1676 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1678 qlnx_add_fp_stats_sysctls(ha);
1679 qlnx_add_sp_stats_sysctls(ha);
1680 qlnx_add_hw_stats_sysctls(ha);
1682 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1683 CTLFLAG_RD, qlnx_ver_str, 0,
1686 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1687 CTLFLAG_RD, ha->stormfw_ver, 0,
1688 "STORM Firmware Version");
1690 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1691 CTLFLAG_RD, ha->mfw_ver, 0,
1692 "Management Firmware Version");
1694 SYSCTL_ADD_UINT(ctx, children,
1695 OID_AUTO, "personality", CTLFLAG_RD,
1696 &ha->personality, ha->personality,
1697 "\tpersonality = 0 => Ethernet Only\n"
1698 "\tpersonality = 3 => Ethernet and RoCE\n"
1699 "\tpersonality = 4 => Ethernet and iWARP\n"
1700 "\tpersonality = 6 => Default in Shared Memory\n");
1704 SYSCTL_ADD_UINT(ctx, children,
1705 OID_AUTO, "debug", CTLFLAG_RW,
1706 &ha->dbg_level, ha->dbg_level, "Debug Level");
1709 SYSCTL_ADD_UINT(ctx, children,
1710 OID_AUTO, "dp_level", CTLFLAG_RW,
1711 &ha->dp_level, ha->dp_level, "DP Level");
1714 SYSCTL_ADD_UINT(ctx, children,
1715 OID_AUTO, "dp_module", CTLFLAG_RW,
1716 &ha->dp_module, ha->dp_module, "DP Module");
1720 SYSCTL_ADD_UINT(ctx, children,
1721 OID_AUTO, "err_inject", CTLFLAG_RW,
1722 &ha->err_inject, ha->err_inject, "Error Inject");
1724 ha->storm_stats_enable = 0;
1726 SYSCTL_ADD_UINT(ctx, children,
1727 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1728 &ha->storm_stats_enable, ha->storm_stats_enable,
1729 "Enable Storm Statistics Gathering");
1731 ha->storm_stats_index = 0;
1733 SYSCTL_ADD_UINT(ctx, children,
1734 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1735 &ha->storm_stats_index, ha->storm_stats_index,
1736 "Enable Storm Statistics Gathering Current Index");
1738 ha->grcdump_taken = 0;
1739 SYSCTL_ADD_UINT(ctx, children,
1740 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1741 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1743 ha->idle_chk_taken = 0;
1744 SYSCTL_ADD_UINT(ctx, children,
1745 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1746 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1748 SYSCTL_ADD_UINT(ctx, children,
1749 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1750 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1751 "rx_coalesce_usecs");
1753 SYSCTL_ADD_UINT(ctx, children,
1754 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1755 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1756 "tx_coalesce_usecs");
1758 ha->rx_pkt_threshold = 32;
1759 SYSCTL_ADD_UINT(ctx, children,
1760 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1761 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1762 "No. of Rx Pkts to process at a time");
1764 ha->rx_jumbo_buf_eq_mtu = 0;
1765 SYSCTL_ADD_UINT(ctx, children,
1766 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1767 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1768 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1769 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
1771 SYSCTL_ADD_PROC(ctx, children,
1772 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1774 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1776 SYSCTL_ADD_PROC(ctx, children,
1777 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1779 qlnx_set_rx_coalesce, "I",
1780 "rx interrupt coalesce period microseconds");
1782 SYSCTL_ADD_PROC(ctx, children,
1783 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1785 qlnx_set_tx_coalesce, "I",
1786 "tx interrupt coalesce period microseconds");
1788 SYSCTL_ADD_QUAD(ctx, children,
1789 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1790 &ha->err_illegal_intr, "err_illegal_intr");
1792 SYSCTL_ADD_QUAD(ctx, children,
1793 OID_AUTO, "err_fp_null", CTLFLAG_RD,
1794 &ha->err_fp_null, "err_fp_null");
1796 SYSCTL_ADD_QUAD(ctx, children,
1797 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1798 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
1804 /*****************************************************************************
1805 * Operating System Network Interface Functions
1806 *****************************************************************************/
1809 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
1814 ifp = ha->ifp = if_alloc(IFT_ETHER);
1817 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
1819 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1821 device_id = pci_get_device(ha->pci_dev);
1823 #if __FreeBSD_version >= 1000000
1825 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
1826 ifp->if_baudrate = IF_Gbps(40);
1827 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656)
1828 ifp->if_baudrate = IF_Gbps(25);
1829 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
1830 ifp->if_baudrate = IF_Gbps(50);
1831 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
1832 ifp->if_baudrate = IF_Gbps(100);
1834 ifp->if_capabilities = IFCAP_LINKSTATE;
1836 ifp->if_mtu = ETHERMTU;
1837 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
1839 #endif /* #if __FreeBSD_version >= 1000000 */
1841 ifp->if_init = qlnx_init;
1843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1844 ifp->if_ioctl = qlnx_ioctl;
1845 ifp->if_transmit = qlnx_transmit;
1846 ifp->if_qflush = qlnx_qflush;
1848 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
1849 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
1850 IFQ_SET_READY(&ifp->if_snd);
1852 #if __FreeBSD_version >= 1100036
1853 if_setgetcounterfn(ifp, qlnx_get_counter);
1856 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1858 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
1859 ether_ifattach(ifp, ha->primary_mac);
1860 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
1862 ifp->if_capabilities = IFCAP_HWCSUM;
1863 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1865 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1866 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1867 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1868 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1869 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1870 ifp->if_capabilities |= IFCAP_TSO4;
1871 ifp->if_capabilities |= IFCAP_TSO6;
1872 ifp->if_capabilities |= IFCAP_LRO;
1874 ifp->if_capenable = ifp->if_capabilities;
1876 ifp->if_hwassist = CSUM_IP;
1877 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
1878 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
1879 ifp->if_hwassist |= CSUM_TSO;
1881 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1883 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
1886 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
1887 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
1888 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
1889 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
1890 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) {
1891 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
1892 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
1893 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
1894 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
1895 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
1896 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
1897 ifmedia_add(&ha->media,
1898 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
1899 ifmedia_add(&ha->media,
1900 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
1901 ifmedia_add(&ha->media,
1902 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
1905 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
1906 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
1909 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
1911 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
1917 qlnx_init_locked(qlnx_host_t *ha)
1919 struct ifnet *ifp = ha->ifp;
1923 if (qlnx_load(ha) == 0) {
1924 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1925 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1932 qlnx_init(void *arg)
1936 ha = (qlnx_host_t *)arg;
1938 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1941 qlnx_init_locked(ha);
1944 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1950 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1952 struct ecore_filter_mcast *mcast;
1953 struct ecore_dev *cdev;
1958 mcast = &ha->ecore_mcast;
1959 bzero(mcast, sizeof(struct ecore_filter_mcast));
1962 mcast->opcode = ECORE_FILTER_ADD;
1964 mcast->opcode = ECORE_FILTER_REMOVE;
1966 mcast->num_mc_addrs = 1;
1967 memcpy(mcast->mac, mac_addr, ETH_ALEN);
1969 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
1975 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
1979 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
1981 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1982 return 0; /* its been already added */
1985 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
1987 if ((ha->mcast[i].addr[0] == 0) &&
1988 (ha->mcast[i].addr[1] == 0) &&
1989 (ha->mcast[i].addr[2] == 0) &&
1990 (ha->mcast[i].addr[3] == 0) &&
1991 (ha->mcast[i].addr[4] == 0) &&
1992 (ha->mcast[i].addr[5] == 0)) {
1994 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
1997 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2007 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2011 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2012 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2014 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2017 ha->mcast[i].addr[0] = 0;
2018 ha->mcast[i].addr[1] = 0;
2019 ha->mcast[i].addr[2] = 0;
2020 ha->mcast[i].addr[3] = 0;
2021 ha->mcast[i].addr[4] = 0;
2022 ha->mcast[i].addr[5] = 0;
2033 * Name: qls_hw_set_multi
2034 * Function: Sets the Multicast Addresses provided the host O.S into the
2035 * hardware (for the given interface)
2038 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2043 for (i = 0; i < mcnt; i++) {
2045 if (qlnx_hw_add_mcast(ha, mta))
2048 if (qlnx_hw_del_mcast(ha, mta))
2052 mta += ETHER_HDR_LEN;
2058 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2060 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2062 uint8_t mta[QLNX_MCAST_ADDRS_SIZE];
2063 struct ifmultiaddr *ifma;
2065 struct ifnet *ifp = ha->ifp;
2068 if_maddr_rlock(ifp);
2070 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2072 if (ifma->ifma_addr->sa_family != AF_LINK)
2075 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2078 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2079 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2084 if_maddr_runlock(ifp);
2087 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2094 qlnx_set_promisc(qlnx_host_t *ha)
2099 filter = ha->filter;
2100 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2101 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2103 rc = qlnx_set_rx_accept_filter(ha, filter);
2108 qlnx_set_allmulti(qlnx_host_t *ha)
2113 filter = ha->filter;
2114 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2115 rc = qlnx_set_rx_accept_filter(ha, filter);
2122 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2125 struct ifreq *ifr = (struct ifreq *)data;
2126 struct ifaddr *ifa = (struct ifaddr *)data;
2129 ha = (qlnx_host_t *)ifp->if_softc;
2133 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
2136 if (ifa->ifa_addr->sa_family == AF_INET) {
2137 ifp->if_flags |= IFF_UP;
2138 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2140 qlnx_init_locked(ha);
2143 QL_DPRINT4(ha, (ha->pci_dev,
2144 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2146 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
2148 arp_ifinit(ifp, ifa);
2150 ether_ioctl(ifp, cmd, data);
2155 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
2158 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2162 ifp->if_mtu = ifr->ifr_mtu;
2163 ha->max_frame_size =
2164 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2165 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2166 qlnx_init_locked(ha);
2178 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
2183 if (ifp->if_flags & IFF_UP) {
2184 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2185 if ((ifp->if_flags ^ ha->if_flags) &
2187 ret = qlnx_set_promisc(ha);
2188 } else if ((ifp->if_flags ^ ha->if_flags) &
2190 ret = qlnx_set_allmulti(ha);
2193 ha->max_frame_size = ifp->if_mtu +
2194 ETHER_HDR_LEN + ETHER_CRC_LEN;
2195 qlnx_init_locked(ha);
2198 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2200 ha->if_flags = ifp->if_flags;
2207 QL_DPRINT4(ha, (ha->pci_dev,
2208 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
2210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2211 if (qlnx_set_multi(ha, 1))
2217 QL_DPRINT4(ha, (ha->pci_dev,
2218 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
2220 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2221 if (qlnx_set_multi(ha, 0))
2228 QL_DPRINT4(ha, (ha->pci_dev,
2229 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
2231 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2236 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2238 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
2241 if (mask & IFCAP_HWCSUM)
2242 ifp->if_capenable ^= IFCAP_HWCSUM;
2243 if (mask & IFCAP_TSO4)
2244 ifp->if_capenable ^= IFCAP_TSO4;
2245 if (mask & IFCAP_TSO6)
2246 ifp->if_capenable ^= IFCAP_TSO6;
2247 if (mask & IFCAP_VLAN_HWTAGGING)
2248 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2249 if (mask & IFCAP_VLAN_HWTSO)
2250 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2251 if (mask & IFCAP_LRO)
2252 ifp->if_capenable ^= IFCAP_LRO;
2254 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2257 VLAN_CAPABILITIES(ifp);
2260 #if (__FreeBSD_version >= 1100101)
2264 struct ifi2creq i2c;
2265 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2266 struct ecore_ptt *p_ptt;
2268 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2273 if ((i2c.len > sizeof (i2c.data)) ||
2274 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2279 p_ptt = ecore_ptt_acquire(p_hwfn);
2282 QL_DPRINT1(ha, (ha->pci_dev, "%s :"
2283 " ecore_ptt_acquire failed\n", __func__));
2288 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2289 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2290 i2c.len, &i2c.data[0]);
2292 ecore_ptt_release(p_hwfn, p_ptt);
2299 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2301 QL_DPRINT8(ha, (ha->pci_dev, "SIOCGI2C copyout ret = %d"
2302 " len = %d addr = 0x%02x offset = 0x%04x"
2303 " data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
2304 " 0x%02x 0x%02x 0x%02x\n",
2305 ret, i2c.len, i2c.dev_addr, i2c.offset,
2306 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2307 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]));
2310 #endif /* #if (__FreeBSD_version >= 1100101) */
2313 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
2315 ret = ether_ioctl(ifp, cmd, data);
2323 qlnx_media_change(struct ifnet *ifp)
2326 struct ifmedia *ifm;
2329 ha = (qlnx_host_t *)ifp->if_softc;
2331 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2335 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2338 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
2344 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2348 ha = (qlnx_host_t *)ifp->if_softc;
2350 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2352 ifmr->ifm_status = IFM_AVALID;
2353 ifmr->ifm_active = IFM_ETHER;
2356 ifmr->ifm_status |= IFM_ACTIVE;
2358 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2360 if (ha->if_link.link_partner_caps &
2361 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2363 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2366 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,
2367 (ha->link_up ? "link_up" : "link_down")));
2374 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2375 struct qlnx_tx_queue *txq)
2381 struct eth_tx_bd *tx_data_bd;
2382 struct eth_tx_1st_bd *first_bd;
2385 idx = txq->sw_tx_cons;
2386 mp = txq->sw_tx_ring[idx].mp;
2387 map = txq->sw_tx_ring[idx].map;
2389 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2391 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2393 QL_DPRINT1(ha, (ha->pci_dev, "%s: (mp == NULL) "
2395 " ecore_prod_idx = 0x%x"
2396 " ecore_cons_idx = 0x%x"
2397 " hw_bd_cons = 0x%x"
2398 " txq_db_last = 0x%x"
2399 " elem_left = 0x%x\n",
2402 ecore_chain_get_prod_idx(&txq->tx_pbl),
2403 ecore_chain_get_cons_idx(&txq->tx_pbl),
2404 le16toh(*txq->hw_cons_ptr),
2406 ecore_chain_get_elem_left(&txq->tx_pbl)));
2408 fp->err_tx_free_pkt_null++;
2411 qlnx_trigger_dump(ha);
2416 QLNX_INC_OPACKETS((ha->ifp));
2417 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2419 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2420 bus_dmamap_unload(ha->tx_tag, map);
2422 fp->tx_pkts_freed++;
2423 fp->tx_pkts_completed++;
2428 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2429 nbds = first_bd->data.nbds;
2431 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2433 for (i = 1; i < nbds; i++) {
2434 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2435 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2437 txq->sw_tx_ring[idx].flags = 0;
2438 txq->sw_tx_ring[idx].mp = NULL;
2439 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2445 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2446 struct qlnx_tx_queue *txq)
2452 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2454 while (hw_bd_cons !=
2455 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2457 if (hw_bd_cons < ecore_cons_idx) {
2458 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2460 diff = hw_bd_cons - ecore_cons_idx;
2462 if ((diff > TX_RING_SIZE) ||
2463 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2465 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2467 QL_DPRINT1(ha, (ha->pci_dev, "%s: (diff = 0x%x) "
2469 " ecore_prod_idx = 0x%x"
2470 " ecore_cons_idx = 0x%x"
2471 " hw_bd_cons = 0x%x"
2472 " txq_db_last = 0x%x"
2473 " elem_left = 0x%x\n",
2476 ecore_chain_get_prod_idx(&txq->tx_pbl),
2477 ecore_chain_get_cons_idx(&txq->tx_pbl),
2478 le16toh(*txq->hw_cons_ptr),
2480 ecore_chain_get_elem_left(&txq->tx_pbl)));
2482 fp->err_tx_cons_idx_conflict++;
2485 qlnx_trigger_dump(ha);
2488 qlnx_free_tx_pkt(ha, fp, txq);
2490 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2496 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
2498 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
2499 struct qlnx_fastpath *fp;
2500 int rss_id = 0, ret = 0;
2502 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2504 #if __FreeBSD_version >= 1100000
2505 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2507 if (mp->m_flags & M_FLOWID)
2509 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2512 fp = &ha->fp_array[rss_id];
2514 if (fp->tx_br == NULL) {
2516 goto qlnx_transmit_exit;
2520 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2523 if (fp->fp_taskqueue != NULL)
2524 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2530 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
2535 qlnx_qflush(struct ifnet *ifp)
2538 struct qlnx_fastpath *fp;
2542 ha = (qlnx_host_t *)ifp->if_softc;
2544 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2546 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2548 fp = &ha->fp_array[rss_id];
2554 mtx_lock(&fp->tx_mtx);
2556 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2557 fp->tx_pkts_freed++;
2560 mtx_unlock(&fp->tx_mtx);
2563 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
2569 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2571 struct ecore_dev *cdev;
2576 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2578 bus_write_4(ha->pci_dbells, offset, value);
2579 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
2580 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
2586 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2588 struct ether_vlan_header *eh = NULL;
2589 struct ip *ip = NULL;
2590 struct ip6_hdr *ip6 = NULL;
2591 struct tcphdr *th = NULL;
2592 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
2595 uint8_t buf[sizeof(struct ip6_hdr)];
2599 eh = mtod(mp, struct ether_vlan_header *);
2601 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2602 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2603 etype = ntohs(eh->evl_proto);
2605 ehdrlen = ETHER_HDR_LEN;
2606 etype = ntohs(eh->evl_encap_proto);
2612 ip = (struct ip *)(mp->m_data + ehdrlen);
2614 ip_hlen = sizeof (struct ip);
2616 if (mp->m_len < (ehdrlen + ip_hlen)) {
2617 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2618 ip = (struct ip *)buf;
2621 th = (struct tcphdr *)(ip + 1);
2622 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2625 case ETHERTYPE_IPV6:
2626 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2628 ip_hlen = sizeof(struct ip6_hdr);
2630 if (mp->m_len < (ehdrlen + ip_hlen)) {
2631 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2633 ip6 = (struct ip6_hdr *)buf;
2635 th = (struct tcphdr *)(ip6 + 1);
2636 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2647 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2651 uint32_t sum, nbds_in_hdr = 1;
2652 bus_dma_segment_t *t_segs = segs;
2654 /* count the number of segments spanned by TCP header */
2657 while ((i < nsegs) && (offset > t_segs->ds_len)) {
2659 offset = offset - t_segs->ds_len;
2664 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2668 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2669 sum += segs->ds_len;
2673 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2674 fp->tx_lso_wnd_min_len++;
2678 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2685 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2687 bus_dma_segment_t *segs;
2688 bus_dmamap_t map = 0;
2691 struct mbuf *m_head = *m_headp;
2696 struct qlnx_tx_queue *txq;
2698 struct eth_tx_1st_bd *first_bd;
2699 struct eth_tx_2nd_bd *second_bd;
2700 struct eth_tx_3rd_bd *third_bd;
2701 struct eth_tx_bd *tx_data_bd;
2704 uint32_t nbds_in_hdr = 0;
2705 uint32_t offset = 0;
2707 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
2718 idx = txq->sw_tx_prod;
2720 map = txq->sw_tx_ring[idx].map;
2723 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
2726 #ifdef QLNX_TRACE_TSO_PKT_LEN
2728 if (!fp->tx_tso_min_pkt_len) {
2729 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2730 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2732 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
2733 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2734 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
2735 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len;
2738 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */
2740 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2741 offset = qlnx_tcp_offset(ha, m_head);
2743 if ((ret == EFBIG) ||
2744 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2745 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2746 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2747 qlnx_tso_check(fp, segs, nsegs, offset))))) {
2751 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
2752 m_head->m_pkthdr.len));
2756 m = m_defrag(m_head, M_NOWAIT);
2758 fp->err_tx_defrag++;
2759 fp->tx_pkts_freed++;
2762 QL_DPRINT1(ha, (ha->pci_dev,
2763 "%s: m_defrag() = NULL [%d]\n",
2771 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2772 segs, &nsegs, BUS_DMA_NOWAIT))) {
2774 fp->err_tx_defrag_dmamap_load++;
2776 QL_DPRINT1(ha, (ha->pci_dev,
2777 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
2778 __func__, ret, m_head->m_pkthdr.len));
2780 fp->tx_pkts_freed++;
2787 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2788 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2790 fp->err_tx_non_tso_max_seg++;
2792 QL_DPRINT1(ha, (ha->pci_dev,
2793 "%s: (%d) nsegs too many for non-TSO[%d, %d]\n",
2794 __func__, ret, nsegs, m_head->m_pkthdr.len));
2796 fp->tx_pkts_freed++;
2802 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2803 offset = qlnx_tcp_offset(ha, m_head);
2807 fp->err_tx_dmamap_load++;
2809 QL_DPRINT1(ha, (ha->pci_dev,
2810 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
2811 __func__, ret, m_head->m_pkthdr.len));
2813 fp->tx_pkts_freed++;
2819 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2821 #ifdef QLNX_TRACE_TSO_PKT_LEN
2823 if (nsegs < QLNX_FP_MAX_SEGS)
2824 fp->tx_pkts[(nsegs - 1)]++;
2826 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
2828 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */
2830 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2831 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2833 QL_DPRINT1(ha, (ha->pci_dev, "%s: (%d, 0x%x) insuffient BDs"
2834 "in chain[%d] trying to free packets\n",
2835 __func__, nsegs, elem_left, fp->rss_id));
2837 fp->tx_nsegs_gt_elem_left++;
2839 (void)qlnx_tx_int(ha, fp, txq);
2841 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2842 ecore_chain_get_elem_left(&txq->tx_pbl))) {
2844 QL_DPRINT1(ha, (ha->pci_dev,
2845 "%s: (%d, 0x%x) insuffient BDs in chain[%d]\n",
2846 __func__, nsegs, elem_left, fp->rss_id));
2848 fp->err_tx_nsegs_gt_elem_left++;
2849 ha->storm_stats_enable = 1;
2854 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
2856 txq->sw_tx_ring[idx].mp = m_head;
2858 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2860 memset(first_bd, 0, sizeof(*first_bd));
2862 first_bd->data.bd_flags.bitfields =
2863 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2865 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
2869 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
2870 first_bd->data.bd_flags.bitfields |=
2871 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2874 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) {
2875 first_bd->data.bd_flags.bitfields |=
2876 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
2879 if (m_head->m_flags & M_VLANTAG) {
2880 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
2881 first_bd->data.bd_flags.bitfields |=
2882 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
2885 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2887 first_bd->data.bd_flags.bitfields |=
2888 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
2889 first_bd->data.bd_flags.bitfields |=
2890 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2894 if (offset == segs->ds_len) {
2895 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2899 second_bd = (struct eth_tx_2nd_bd *)
2900 ecore_chain_produce(&txq->tx_pbl);
2901 memset(second_bd, 0, sizeof(*second_bd));
2904 if (seg_idx < nsegs) {
2905 BD_SET_UNMAP_ADDR_LEN(second_bd, \
2906 (segs->ds_addr), (segs->ds_len));
2911 third_bd = (struct eth_tx_3rd_bd *)
2912 ecore_chain_produce(&txq->tx_pbl);
2913 memset(third_bd, 0, sizeof(*third_bd));
2914 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
2915 third_bd->data.bitfields |=
2916 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2919 if (seg_idx < nsegs) {
2920 BD_SET_UNMAP_ADDR_LEN(third_bd, \
2921 (segs->ds_addr), (segs->ds_len));
2926 for (; seg_idx < nsegs; seg_idx++) {
2927 tx_data_bd = (struct eth_tx_bd *)
2928 ecore_chain_produce(&txq->tx_pbl);
2929 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
2930 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
2937 } else if (offset < segs->ds_len) {
2938 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2940 second_bd = (struct eth_tx_2nd_bd *)
2941 ecore_chain_produce(&txq->tx_pbl);
2942 memset(second_bd, 0, sizeof(*second_bd));
2943 BD_SET_UNMAP_ADDR_LEN(second_bd, \
2944 (segs->ds_addr + offset),\
2945 (segs->ds_len - offset));
2949 third_bd = (struct eth_tx_3rd_bd *)
2950 ecore_chain_produce(&txq->tx_pbl);
2951 memset(third_bd, 0, sizeof(*third_bd));
2953 BD_SET_UNMAP_ADDR_LEN(third_bd, \
2956 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
2957 third_bd->data.bitfields |=
2958 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2962 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
2963 tx_data_bd = (struct eth_tx_bd *)
2964 ecore_chain_produce(&txq->tx_pbl);
2965 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
2966 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
2974 offset = offset - segs->ds_len;
2977 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
2982 tx_data_bd = (struct eth_tx_bd *)
2983 ecore_chain_produce(&txq->tx_pbl);
2984 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
2986 if (second_bd == NULL) {
2987 second_bd = (struct eth_tx_2nd_bd *)
2989 } else if (third_bd == NULL) {
2990 third_bd = (struct eth_tx_3rd_bd *)
2994 if (offset && (offset < segs->ds_len)) {
2995 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
2996 segs->ds_addr, offset);
2998 tx_data_bd = (struct eth_tx_bd *)
2999 ecore_chain_produce(&txq->tx_pbl);
3001 memset(tx_data_bd, 0,
3002 sizeof(*tx_data_bd));
3004 if (second_bd == NULL) {
3006 (struct eth_tx_2nd_bd *)tx_data_bd;
3007 } else if (third_bd == NULL) {
3009 (struct eth_tx_3rd_bd *)tx_data_bd;
3011 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3012 (segs->ds_addr + offset), \
3013 (segs->ds_len - offset));
3018 offset = offset - segs->ds_len;
3019 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3020 segs->ds_addr, segs->ds_len);
3026 if (third_bd == NULL) {
3027 third_bd = (struct eth_tx_3rd_bd *)
3028 ecore_chain_produce(&txq->tx_pbl);
3029 memset(third_bd, 0, sizeof(*third_bd));
3032 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3033 third_bd->data.bitfields |=
3034 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3038 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3039 tx_data_bd = (struct eth_tx_bd *)
3040 ecore_chain_produce(&txq->tx_pbl);
3041 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3042 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3047 first_bd->data.bitfields =
3048 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3049 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3050 first_bd->data.bitfields =
3051 htole16(first_bd->data.bitfields);
3055 first_bd->data.nbds = nbd;
3057 #ifdef QLNX_TRACE_TSO_PKT_LEN
3059 if (fp->tx_tso_max_nsegs < nsegs)
3060 fp->tx_tso_max_nsegs = nsegs;
3062 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3063 fp->tx_tso_min_nsegs = nsegs;
3065 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */
3067 txq->sw_tx_ring[idx].nsegs = nsegs;
3068 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3070 txq->tx_db.data.bd_prod =
3071 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3073 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3075 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
3080 qlnx_stop(qlnx_host_t *ha)
3082 struct ifnet *ifp = ha->ifp;
3088 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3091 * We simply lock and unlock each fp->tx_mtx to
3092 * propagate the if_drv_flags
3093 * state to each tx thread
3095 if (ha->state == QLNX_STATE_OPEN) {
3096 for (i = 0; i < ha->num_rss; i++) {
3097 struct qlnx_fastpath *fp = &ha->fp_array[i];
3099 mtx_lock(&fp->tx_mtx);
3100 mtx_unlock(&fp->tx_mtx);
3102 if (fp->fp_taskqueue != NULL)
3103 taskqueue_enqueue(fp->fp_taskqueue,
3114 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3116 return(TX_RING_SIZE - 1);
3120 qlnx_get_mac_addr(qlnx_host_t *ha)
3122 struct ecore_hwfn *p_hwfn;
3124 p_hwfn = &ha->cdev.hwfns[0];
3125 return (p_hwfn->hw_info.hw_mac_addr);
3129 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3131 uint32_t ifm_type = 0;
3133 switch (if_link->media_type) {
3135 case MEDIA_MODULE_FIBER:
3136 case MEDIA_UNSPECIFIED:
3137 if (if_link->speed == (100 * 1000))
3138 ifm_type = QLNX_IFM_100G_SR4;
3139 else if (if_link->speed == (40 * 1000))
3140 ifm_type = IFM_40G_SR4;
3141 else if (if_link->speed == (25 * 1000))
3142 ifm_type = QLNX_IFM_25G_SR;
3145 case MEDIA_DA_TWINAX:
3146 if (if_link->speed == (100 * 1000))
3147 ifm_type = QLNX_IFM_100G_CR4;
3148 else if (if_link->speed == (40 * 1000))
3149 ifm_type = IFM_40G_CR4;
3150 else if (if_link->speed == (25 * 1000))
3151 ifm_type = QLNX_IFM_25G_CR;
3155 ifm_type = IFM_UNKNOWN;
3163 /*****************************************************************************
3164 * Interrupt Service Functions
3165 *****************************************************************************/
3168 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3169 struct mbuf *mp_head, uint16_t len)
3171 struct mbuf *mp, *mpf, *mpl;
3172 struct sw_rx_data *sw_rx_data;
3173 struct qlnx_rx_queue *rxq;
3174 uint16_t len_in_buffer;
3177 mpf = mpl = mp = NULL;
3181 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3183 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3184 mp = sw_rx_data->data;
3187 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n",
3189 fp->err_rx_mp_null++;
3191 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3198 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3199 BUS_DMASYNC_POSTREAD);
3201 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3203 QL_DPRINT1(ha, (ha->pci_dev,
3204 "%s: New buffer allocation failed, dropping"
3205 " incoming packet and reusing its buffer\n",
3208 qlnx_reuse_rx_data(rxq);
3209 fp->err_rx_alloc_errors++;
3216 ecore_chain_consume(&rxq->rx_bd_ring);
3218 if (len > rxq->rx_buf_size)
3219 len_in_buffer = rxq->rx_buf_size;
3221 len_in_buffer = len;
3223 len = len - len_in_buffer;
3225 mp->m_flags &= ~M_PKTHDR;
3227 mp->m_len = len_in_buffer;
3238 mp_head->m_next = mpf;
3244 qlnx_tpa_start(qlnx_host_t *ha,
3245 struct qlnx_fastpath *fp,
3246 struct qlnx_rx_queue *rxq,
3247 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3250 struct ifnet *ifp = ha->ifp;
3252 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3253 struct sw_rx_data *sw_rx_data;
3256 struct eth_rx_bd *rx_bd;
3259 #if __FreeBSD_version >= 1100000
3261 #endif /* #if __FreeBSD_version >= 1100000 */
3264 agg_index = cqe->tpa_agg_index;
3266 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3268 "\t bitfields = 0x%x\n"
3269 "\t seg_len = 0x%x\n"
3270 "\t pars_flags = 0x%x\n"
3271 "\t vlan_tag = 0x%x\n"
3272 "\t rss_hash = 0x%x\n"
3273 "\t len_on_first_bd = 0x%x\n"
3274 "\t placement_offset = 0x%x\n"
3275 "\t tpa_agg_index = 0x%x\n"
3276 "\t header_len = 0x%x\n"
3277 "\t ext_bd_len_list[0] = 0x%x\n"
3278 "\t ext_bd_len_list[1] = 0x%x\n"
3279 "\t ext_bd_len_list[2] = 0x%x\n"
3280 "\t ext_bd_len_list[3] = 0x%x\n"
3281 "\t ext_bd_len_list[4] = 0x%x\n",
3282 __func__, fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3283 cqe->pars_flags.flags, cqe->vlan_tag,
3284 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3285 cqe->tpa_agg_index, cqe->header_len,
3286 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3287 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3288 cqe->ext_bd_len_list[4]));
3290 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3291 fp->err_rx_tpa_invalid_agg_num++;
3295 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3296 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3297 mp = sw_rx_data->data;
3299 QL_DPRINT7(ha, (dev, "%s[%d]: mp = %p \n ", __func__, fp->rss_id, mp));
3302 QL_DPRINT7(ha, (dev, "%s[%d]: mp = NULL\n", __func__,
3304 fp->err_rx_mp_null++;
3305 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3310 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3312 QL_DPRINT7(ha, (dev, "%s[%d]: CQE in CONS = %u has error,"
3313 " flags = %x, dropping incoming packet\n", __func__,
3314 fp->rss_id, rxq->sw_rx_cons,
3315 le16toh(cqe->pars_flags.flags)));
3317 fp->err_rx_hw_errors++;
3319 qlnx_reuse_rx_data(rxq);
3321 QLNX_INC_IERRORS(ifp);
3326 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3328 QL_DPRINT7(ha, (dev, "%s[%d]: New buffer allocation failed,"
3329 " dropping incoming packet and reusing its buffer\n",
3330 __func__, fp->rss_id));
3332 fp->err_rx_alloc_errors++;
3333 QLNX_INC_IQDROPS(ifp);
3336 * Load the tpa mbuf into the rx ring and save the
3340 map = sw_rx_data->map;
3341 addr = sw_rx_data->dma_addr;
3343 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3345 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3346 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3347 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3349 rxq->tpa_info[agg_index].rx_buf.data = mp;
3350 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3351 rxq->tpa_info[agg_index].rx_buf.map = map;
3353 rx_bd = (struct eth_rx_bd *)
3354 ecore_chain_produce(&rxq->rx_bd_ring);
3356 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3357 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3359 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3360 BUS_DMASYNC_PREREAD);
3362 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3363 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3365 ecore_chain_consume(&rxq->rx_bd_ring);
3367 /* Now reuse any buffers posted in ext_bd_len_list */
3368 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3370 if (cqe->ext_bd_len_list[i] == 0)
3373 qlnx_reuse_rx_data(rxq);
3376 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3380 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3382 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state,"
3383 " dropping incoming packet and reusing its buffer\n",
3384 __func__, fp->rss_id));
3386 QLNX_INC_IQDROPS(ifp);
3388 /* if we already have mbuf head in aggregation free it */
3389 if (rxq->tpa_info[agg_index].mpf) {
3390 m_freem(rxq->tpa_info[agg_index].mpf);
3391 rxq->tpa_info[agg_index].mpl = NULL;
3393 rxq->tpa_info[agg_index].mpf = mp;
3394 rxq->tpa_info[agg_index].mpl = NULL;
3396 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3397 ecore_chain_consume(&rxq->rx_bd_ring);
3399 /* Now reuse any buffers posted in ext_bd_len_list */
3400 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3402 if (cqe->ext_bd_len_list[i] == 0)
3405 qlnx_reuse_rx_data(rxq);
3407 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3413 * first process the ext_bd_len_list
3414 * if this fails then we simply drop the packet
3416 ecore_chain_consume(&rxq->rx_bd_ring);
3417 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3419 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3421 QL_DPRINT7(ha, (dev, "%s[%d]: 4\n ", __func__, fp->rss_id));
3423 if (cqe->ext_bd_len_list[i] == 0)
3426 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3427 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3428 BUS_DMASYNC_POSTREAD);
3430 mpc = sw_rx_data->data;
3433 QL_DPRINT7(ha, (ha->pci_dev, "%s[%d]: mpc = NULL\n",
3434 __func__, fp->rss_id));
3435 fp->err_rx_mp_null++;
3439 rxq->tpa_info[agg_index].agg_state =
3440 QLNX_AGG_STATE_ERROR;
3441 ecore_chain_consume(&rxq->rx_bd_ring);
3443 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3447 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3448 QL_DPRINT7(ha, (dev,
3449 "%s[%d]: New buffer allocation failed, dropping"
3450 " incoming packet and reusing its buffer\n",
3451 __func__, fp->rss_id));
3453 qlnx_reuse_rx_data(rxq);
3459 rxq->tpa_info[agg_index].agg_state =
3460 QLNX_AGG_STATE_ERROR;
3462 ecore_chain_consume(&rxq->rx_bd_ring);
3464 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3469 mpc->m_flags &= ~M_PKTHDR;
3471 mpc->m_len = cqe->ext_bd_len_list[i];
3477 mpl->m_len = ha->rx_buf_size;
3482 ecore_chain_consume(&rxq->rx_bd_ring);
3484 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3487 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3489 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state,"
3490 " dropping incoming packet and reusing its buffer\n",
3491 __func__, fp->rss_id));
3493 QLNX_INC_IQDROPS(ifp);
3495 rxq->tpa_info[agg_index].mpf = mp;
3496 rxq->tpa_info[agg_index].mpl = NULL;
3501 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3504 mp->m_len = ha->rx_buf_size;
3506 rxq->tpa_info[agg_index].mpf = mp;
3507 rxq->tpa_info[agg_index].mpl = mpl;
3509 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3510 rxq->tpa_info[agg_index].mpf = mp;
3511 rxq->tpa_info[agg_index].mpl = mp;
3515 mp->m_flags |= M_PKTHDR;
3517 /* assign packet to this interface interface */
3518 mp->m_pkthdr.rcvif = ifp;
3520 /* assume no hardware checksum has complated */
3521 mp->m_pkthdr.csum_flags = 0;
3523 //mp->m_pkthdr.flowid = fp->rss_id;
3524 mp->m_pkthdr.flowid = cqe->rss_hash;
3526 #if __FreeBSD_version >= 1100000
3528 hash_type = cqe->bitfields &
3529 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3530 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3532 switch (hash_type) {
3534 case RSS_HASH_TYPE_IPV4:
3535 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3538 case RSS_HASH_TYPE_TCP_IPV4:
3539 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3542 case RSS_HASH_TYPE_IPV6:
3543 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3546 case RSS_HASH_TYPE_TCP_IPV6:
3547 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3551 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3556 mp->m_flags |= M_FLOWID;
3559 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3560 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3562 mp->m_pkthdr.csum_data = 0xFFFF;
3564 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3565 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3566 mp->m_flags |= M_VLANTAG;
3569 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3571 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n" "\tagg_state = %d\n"
3572 "\t mpf = %p mpl = %p\n", __func__, fp->rss_id,
3573 rxq->tpa_info[agg_index].agg_state,
3574 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl));
3580 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3581 struct qlnx_rx_queue *rxq,
3582 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3584 struct sw_rx_data *sw_rx_data;
3586 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3593 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3595 "\t tpa_agg_index = 0x%x\n"
3596 "\t len_list[0] = 0x%x\n"
3597 "\t len_list[1] = 0x%x\n"
3598 "\t len_list[2] = 0x%x\n"
3599 "\t len_list[3] = 0x%x\n"
3600 "\t len_list[4] = 0x%x\n"
3601 "\t len_list[5] = 0x%x\n",
3602 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index,
3603 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3604 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]));
3606 agg_index = cqe->tpa_agg_index;
3608 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3609 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id));
3610 fp->err_rx_tpa_invalid_agg_num++;
3615 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3617 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id));
3619 if (cqe->len_list[i] == 0)
3622 if (rxq->tpa_info[agg_index].agg_state !=
3623 QLNX_AGG_STATE_START) {
3624 qlnx_reuse_rx_data(rxq);
3628 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3629 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3630 BUS_DMASYNC_POSTREAD);
3632 mpc = sw_rx_data->data;
3636 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n",
3637 __func__, fp->rss_id));
3639 fp->err_rx_mp_null++;
3643 rxq->tpa_info[agg_index].agg_state =
3644 QLNX_AGG_STATE_ERROR;
3645 ecore_chain_consume(&rxq->rx_bd_ring);
3647 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3651 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3653 QL_DPRINT7(ha, (dev,
3654 "%s[%d]: New buffer allocation failed, dropping"
3655 " incoming packet and reusing its buffer\n",
3656 __func__, fp->rss_id));
3658 qlnx_reuse_rx_data(rxq);
3664 rxq->tpa_info[agg_index].agg_state =
3665 QLNX_AGG_STATE_ERROR;
3667 ecore_chain_consume(&rxq->rx_bd_ring);
3669 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3674 mpc->m_flags &= ~M_PKTHDR;
3676 mpc->m_len = cqe->len_list[i];
3682 mpl->m_len = ha->rx_buf_size;
3687 ecore_chain_consume(&rxq->rx_bd_ring);
3689 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3692 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3693 __func__, fp->rss_id, mpf, mpl));
3696 mp = rxq->tpa_info[agg_index].mpl;
3697 mp->m_len = ha->rx_buf_size;
3699 rxq->tpa_info[agg_index].mpl = mpl;
3706 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3707 struct qlnx_rx_queue *rxq,
3708 struct eth_fast_path_rx_tpa_end_cqe *cqe)
3710 struct sw_rx_data *sw_rx_data;
3712 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3716 struct ifnet *ifp = ha->ifp;
3721 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3723 "\t tpa_agg_index = 0x%x\n"
3724 "\t total_packet_len = 0x%x\n"
3725 "\t num_of_bds = 0x%x\n"
3726 "\t end_reason = 0x%x\n"
3727 "\t num_of_coalesced_segs = 0x%x\n"
3728 "\t ts_delta = 0x%x\n"
3729 "\t len_list[0] = 0x%x\n"
3730 "\t len_list[1] = 0x%x\n"
3731 "\t len_list[2] = 0x%x\n"
3732 "\t len_list[3] = 0x%x\n",
3733 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index,
3734 cqe->total_packet_len, cqe->num_of_bds,
3735 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3736 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3739 agg_index = cqe->tpa_agg_index;
3741 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3743 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id));
3745 fp->err_rx_tpa_invalid_agg_num++;
3750 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3752 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id));
3754 if (cqe->len_list[i] == 0)
3757 if (rxq->tpa_info[agg_index].agg_state !=
3758 QLNX_AGG_STATE_START) {
3760 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n ", __func__,
3763 qlnx_reuse_rx_data(rxq);
3767 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3768 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3769 BUS_DMASYNC_POSTREAD);
3771 mpc = sw_rx_data->data;
3775 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n",
3776 __func__, fp->rss_id));
3778 fp->err_rx_mp_null++;
3782 rxq->tpa_info[agg_index].agg_state =
3783 QLNX_AGG_STATE_ERROR;
3784 ecore_chain_consume(&rxq->rx_bd_ring);
3786 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3790 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3791 QL_DPRINT7(ha, (dev,
3792 "%s[%d]: New buffer allocation failed, dropping"
3793 " incoming packet and reusing its buffer\n",
3794 __func__, fp->rss_id));
3796 qlnx_reuse_rx_data(rxq);
3802 rxq->tpa_info[agg_index].agg_state =
3803 QLNX_AGG_STATE_ERROR;
3805 ecore_chain_consume(&rxq->rx_bd_ring);
3807 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3812 mpc->m_flags &= ~M_PKTHDR;
3814 mpc->m_len = cqe->len_list[i];
3820 mpl->m_len = ha->rx_buf_size;
3825 ecore_chain_consume(&rxq->rx_bd_ring);
3827 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3830 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n ", __func__, fp->rss_id));
3834 QL_DPRINT7(ha, (dev, "%s[%d]: 6\n ", __func__, fp->rss_id));
3836 mp = rxq->tpa_info[agg_index].mpl;
3837 mp->m_len = ha->rx_buf_size;
3841 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3843 QL_DPRINT7(ha, (dev, "%s[%d]: 7\n ", __func__, fp->rss_id));
3845 if (rxq->tpa_info[agg_index].mpf != NULL)
3846 m_freem(rxq->tpa_info[agg_index].mpf);
3847 rxq->tpa_info[agg_index].mpf = NULL;
3848 rxq->tpa_info[agg_index].mpl = NULL;
3849 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3853 mp = rxq->tpa_info[agg_index].mpf;
3854 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
3855 mp->m_pkthdr.len = cqe->total_packet_len;
3857 if (mp->m_next == NULL)
3858 mp->m_len = mp->m_pkthdr.len;
3860 /* compute the total packet length */
3862 while (mpf != NULL) {
3867 if (cqe->total_packet_len > len) {
3868 mpl = rxq->tpa_info[agg_index].mpl;
3869 mpl->m_len += (cqe->total_packet_len - len);
3873 QLNX_INC_IPACKETS(ifp);
3874 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3876 QL_DPRINT7(ha, (dev, "%s[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n "
3877 "m_len = 0x%x m_pkthdr_len = 0x%x\n",
3878 __func__, fp->rss_id, mp->m_pkthdr.csum_data,
3879 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len));
3881 (*ifp->if_input)(ifp, mp);
3883 rxq->tpa_info[agg_index].mpf = NULL;
3884 rxq->tpa_info[agg_index].mpl = NULL;
3885 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3887 return (cqe->num_of_coalesced_segs);
3891 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
3894 uint16_t hw_comp_cons, sw_comp_cons;
3896 struct qlnx_rx_queue *rxq = fp->rxq;
3897 struct ifnet *ifp = ha->ifp;
3898 struct ecore_dev *cdev = &ha->cdev;
3899 struct ecore_hwfn *p_hwfn;
3901 #ifdef QLNX_SOFT_LRO
3902 struct lro_ctrl *lro;
3905 #endif /* #ifdef QLNX_SOFT_LRO */
3907 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
3908 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
3910 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
3912 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
3913 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
3914 * read before it is written by FW, then FW writes CQE and SB, and then
3915 * the CPU reads the hw_comp_cons, it will use an old CQE.
3918 /* Loop to complete all indicated BDs */
3919 while (sw_comp_cons != hw_comp_cons) {
3920 union eth_rx_cqe *cqe;
3921 struct eth_fast_path_rx_reg_cqe *fp_cqe;
3922 struct sw_rx_data *sw_rx_data;
3923 register struct mbuf *mp;
3924 enum eth_rx_cqe_type cqe_type;
3925 uint16_t len, pad, len_on_first_bd;
3927 #if __FreeBSD_version >= 1100000
3929 #endif /* #if __FreeBSD_version >= 1100000 */
3931 /* Get the CQE from the completion ring */
3932 cqe = (union eth_rx_cqe *)
3933 ecore_chain_consume(&rxq->rx_comp_ring);
3934 cqe_type = cqe->fast_path_regular.type;
3936 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
3937 QL_DPRINT3(ha, (ha->pci_dev, "Got a slowath CQE\n"));
3939 ecore_eth_cqe_completion(p_hwfn,
3940 (struct eth_slow_path_rx_cqe *)cqe);
3944 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
3948 case ETH_RX_CQE_TYPE_TPA_START:
3949 qlnx_tpa_start(ha, fp, rxq,
3950 &cqe->fast_path_tpa_start);
3954 case ETH_RX_CQE_TYPE_TPA_CONT:
3955 qlnx_tpa_cont(ha, fp, rxq,
3956 &cqe->fast_path_tpa_cont);
3960 case ETH_RX_CQE_TYPE_TPA_END:
3961 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
3962 &cqe->fast_path_tpa_end);
3973 /* Get the data from the SW ring */
3974 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3975 mp = sw_rx_data->data;
3978 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n",
3980 fp->err_rx_mp_null++;
3982 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3985 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3986 BUS_DMASYNC_POSTREAD);
3989 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
3990 len = le16toh(fp_cqe->pkt_len);
3991 pad = fp_cqe->placement_offset;
3994 (ha->pci_dev, "CQE type = %x, flags = %x, vlan = %x,"
3995 " len %u, parsing flags = %d pad = %d\n",
3996 cqe_type, fp_cqe->bitfields,
3997 le16toh(fp_cqe->vlan_tag),
3998 len, le16toh(fp_cqe->pars_flags.flags), pad));
4000 data = mtod(mp, uint8_t *);
4004 qlnx_dump_buf8(ha, __func__, data, len);
4006 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4007 * is always with a fixed size. If allocation fails, we take the
4008 * consumed BD and return it to the ring in the PROD position.
4009 * The packet that was received on that BD will be dropped (and
4010 * not passed to the upper stack).
4012 /* If this is an error packet then drop it */
4013 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4016 QL_DPRINT1(ha, (ha->pci_dev,
4017 "CQE in CONS = %u has error, flags = %x,"
4018 " dropping incoming packet\n", sw_comp_cons,
4019 le16toh(cqe->fast_path_regular.pars_flags.flags)));
4021 fp->err_rx_hw_errors++;
4023 qlnx_reuse_rx_data(rxq);
4025 QLNX_INC_IERRORS(ifp);
4030 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4032 QL_DPRINT1(ha, (ha->pci_dev,
4033 "New buffer allocation failed, dropping"
4034 " incoming packet and reusing its buffer\n"));
4036 qlnx_reuse_rx_data(rxq);
4038 fp->err_rx_alloc_errors++;
4040 QLNX_INC_IQDROPS(ifp);
4045 ecore_chain_consume(&rxq->rx_bd_ring);
4047 len_on_first_bd = fp_cqe->len_on_first_bd;
4049 mp->m_pkthdr.len = len;
4052 (ha->pci_dev, "%s: len = %d len_on_first_bd = %d\n",
4053 __func__, len, len_on_first_bd));
4055 if ((len > 60 ) && (len > len_on_first_bd)) {
4057 mp->m_len = len_on_first_bd;
4059 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4060 (len - len_on_first_bd)) != 0) {
4064 QLNX_INC_IQDROPS(ifp);
4069 } else if (len_on_first_bd < len) {
4070 fp->err_rx_jumbo_chain_pkts++;
4075 mp->m_flags |= M_PKTHDR;
4077 /* assign packet to this interface interface */
4078 mp->m_pkthdr.rcvif = ifp;
4080 /* assume no hardware checksum has complated */
4081 mp->m_pkthdr.csum_flags = 0;
4083 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4085 #if __FreeBSD_version >= 1100000
4087 hash_type = fp_cqe->bitfields &
4088 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4089 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4091 switch (hash_type) {
4093 case RSS_HASH_TYPE_IPV4:
4094 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4097 case RSS_HASH_TYPE_TCP_IPV4:
4098 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4101 case RSS_HASH_TYPE_IPV6:
4102 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4105 case RSS_HASH_TYPE_TCP_IPV6:
4106 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4110 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4115 mp->m_flags |= M_FLOWID;
4118 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4119 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4122 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4123 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4126 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4127 mp->m_pkthdr.csum_data = 0xFFFF;
4128 mp->m_pkthdr.csum_flags |=
4129 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4132 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4133 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4134 mp->m_flags |= M_VLANTAG;
4137 QLNX_INC_IPACKETS(ifp);
4138 QLNX_INC_IBYTES(ifp, len);
4140 #ifdef QLNX_SOFT_LRO
4144 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4146 tcp_lro_queue_mbuf(lro, mp);
4150 if (tcp_lro_rx(lro, mp, 0))
4151 (*ifp->if_input)(ifp, mp);
4153 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4156 (*ifp->if_input)(ifp, mp);
4160 (*ifp->if_input)(ifp, mp);
4162 #endif /* #ifdef QLNX_SOFT_LRO */
4166 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4168 next_cqe: /* don't consume bd rx buffer */
4169 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4170 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4172 /* CR TPA - revisit how to handle budget in TPA perhaps
4173 increase on "end" */
4174 if (rx_pkt == budget)
4176 } /* repeat while sw_comp_cons != hw_comp_cons... */
4178 /* Update producers */
4179 qlnx_update_rx_prod(p_hwfn, rxq);
4185 * fast path interrupt
4189 qlnx_fp_isr(void *arg)
4191 qlnx_ivec_t *ivec = arg;
4193 struct qlnx_fastpath *fp = NULL;
4194 int idx, lro_enable, tc;
4195 int rx_int = 0, total_rx_count = 0;
4198 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4200 if (ha->state != QLNX_STATE_OPEN) {
4204 idx = ivec->rss_idx;
4206 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4207 QL_DPRINT1(ha, (ha->pci_dev, "%s: illegal interrupt[%d]\n",
4209 ha->err_illegal_intr++;
4212 fp = &ha->fp_array[idx];
4215 QL_DPRINT1(ha, (ha->pci_dev, "%s: fp_array[%d] NULL\n",
4219 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4222 for (tc = 0; tc < ha->num_tc; tc++) {
4223 if (mtx_trylock(&fp->tx_mtx)) {
4224 qlnx_tx_int(ha, fp, fp->txq[tc]);
4225 mtx_unlock(&fp->tx_mtx);
4229 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4233 fp->rx_pkts += rx_int;
4234 total_rx_count += rx_int;
4240 #ifdef QLNX_SOFT_LRO
4242 struct lro_ctrl *lro;
4244 lro = &fp->rxq->lro;
4246 if (lro_enable && total_rx_count) {
4248 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4250 #ifdef QLNX_TRACE_LRO_CNT
4251 if (lro->lro_mbuf_count & ~1023)
4253 else if (lro->lro_mbuf_count & ~511)
4255 else if (lro->lro_mbuf_count & ~255)
4257 else if (lro->lro_mbuf_count & ~127)
4259 else if (lro->lro_mbuf_count & ~63)
4261 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4263 tcp_lro_flush_all(lro);
4266 struct lro_entry *queued;
4268 while ((!SLIST_EMPTY(&lro->lro_active))) {
4269 queued = SLIST_FIRST(&lro->lro_active);
4270 SLIST_REMOVE_HEAD(&lro->lro_active, \
4272 tcp_lro_flush(lro, queued);
4274 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4277 #endif /* #ifdef QLNX_SOFT_LRO */
4279 if (fp->fp_taskqueue != NULL)
4280 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4282 ecore_sb_update_sb_idx(fp->sb_info);
4284 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4294 * slow path interrupt processing function
4295 * can be invoked in polled mode or in interrupt mode via taskqueue.
4298 qlnx_sp_isr(void *arg)
4300 struct ecore_hwfn *p_hwfn;
4305 ha = (qlnx_host_t *)p_hwfn->p_dev;
4307 ha->sp_interrupts++;
4309 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
4311 ecore_int_sp_dpc(p_hwfn);
4313 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
4318 /*****************************************************************************
4319 * Support Functions for DMA'able Memory
4320 *****************************************************************************/
4323 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4325 *((bus_addr_t *)arg) = 0;
4328 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4332 *((bus_addr_t *)arg) = segs[0].ds_addr;
4338 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4346 ret = bus_dma_tag_create(
4347 ha->parent_tag,/* parent */
4349 ((bus_size_t)(1ULL << 32)),/* boundary */
4350 BUS_SPACE_MAXADDR, /* lowaddr */
4351 BUS_SPACE_MAXADDR, /* highaddr */
4352 NULL, NULL, /* filter, filterarg */
4353 dma_buf->size, /* maxsize */
4355 dma_buf->size, /* maxsegsize */
4357 NULL, NULL, /* lockfunc, lockarg */
4362 (dev, "%s: could not create dma tag\n", __func__));
4363 goto qlnx_alloc_dmabuf_exit;
4365 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4366 (void **)&dma_buf->dma_b,
4367 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4370 bus_dma_tag_destroy(dma_buf->dma_tag);
4372 (dev, "%s: bus_dmamem_alloc failed\n", __func__));
4373 goto qlnx_alloc_dmabuf_exit;
4376 ret = bus_dmamap_load(dma_buf->dma_tag,
4380 qlnx_dmamap_callback,
4381 &b_addr, BUS_DMA_NOWAIT);
4383 if (ret || !b_addr) {
4384 bus_dma_tag_destroy(dma_buf->dma_tag);
4385 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4388 goto qlnx_alloc_dmabuf_exit;
4391 dma_buf->dma_addr = b_addr;
4393 qlnx_alloc_dmabuf_exit:
4399 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4401 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4402 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4403 bus_dma_tag_destroy(dma_buf->dma_tag);
4408 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4415 ha = (qlnx_host_t *)ecore_dev;
4418 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4420 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4422 dma_buf.size = size + PAGE_SIZE;
4423 dma_buf.alignment = 8;
4425 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4427 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4429 *phys = dma_buf.dma_addr;
4431 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4433 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4435 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__,
4436 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4437 dma_buf.dma_b, (void *)dma_buf.dma_addr, size));
4439 return (dma_buf.dma_b);
4443 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4446 qlnx_dma_t dma_buf, *dma_p;
4450 ha = (qlnx_host_t *)ecore_dev;
4456 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4458 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4460 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__,
4461 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4462 dma_p->dma_b, (void *)dma_p->dma_addr, size));
4466 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4471 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4479 * Allocate parent DMA Tag
4481 ret = bus_dma_tag_create(
4482 bus_get_dma_tag(dev), /* parent */
4483 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4484 BUS_SPACE_MAXADDR, /* lowaddr */
4485 BUS_SPACE_MAXADDR, /* highaddr */
4486 NULL, NULL, /* filter, filterarg */
4487 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4489 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4491 NULL, NULL, /* lockfunc, lockarg */
4495 QL_DPRINT1(ha, (dev, "%s: could not create parent dma tag\n",
4500 ha->flags.parent_tag = 1;
4506 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4508 if (ha->parent_tag != NULL) {
4509 bus_dma_tag_destroy(ha->parent_tag);
4510 ha->parent_tag = NULL;
4516 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4518 if (bus_dma_tag_create(NULL, /* parent */
4519 1, 0, /* alignment, bounds */
4520 BUS_SPACE_MAXADDR, /* lowaddr */
4521 BUS_SPACE_MAXADDR, /* highaddr */
4522 NULL, NULL, /* filter, filterarg */
4523 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4524 QLNX_MAX_SEGMENTS, /* nsegments */
4525 (PAGE_SIZE * 4), /* maxsegsize */
4526 BUS_DMA_ALLOCNOW, /* flags */
4527 NULL, /* lockfunc */
4528 NULL, /* lockfuncarg */
4531 QL_DPRINT1(ha, (ha->pci_dev, "%s: tx_tag alloc failed\n",
4540 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4542 if (ha->tx_tag != NULL) {
4543 bus_dma_tag_destroy(ha->tx_tag);
4550 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4552 if (bus_dma_tag_create(NULL, /* parent */
4553 1, 0, /* alignment, bounds */
4554 BUS_SPACE_MAXADDR, /* lowaddr */
4555 BUS_SPACE_MAXADDR, /* highaddr */
4556 NULL, NULL, /* filter, filterarg */
4557 MJUM9BYTES, /* maxsize */
4559 MJUM9BYTES, /* maxsegsize */
4560 BUS_DMA_ALLOCNOW, /* flags */
4561 NULL, /* lockfunc */
4562 NULL, /* lockfuncarg */
4565 QL_DPRINT1(ha, (ha->pci_dev, "%s: rx_tag alloc failed\n",
4574 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4576 if (ha->rx_tag != NULL) {
4577 bus_dma_tag_destroy(ha->rx_tag);
4583 /*********************************
4584 * Exported functions
4585 *********************************/
4587 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4591 bar_id = bar_id * 2;
4593 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4601 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
4603 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4609 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
4610 uint16_t *reg_value)
4612 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4618 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
4619 uint32_t *reg_value)
4621 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4627 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
4629 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4630 pci_reg, reg_value, 1);
4635 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
4638 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4639 pci_reg, reg_value, 2);
4644 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
4647 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4648 pci_reg, reg_value, 4);
4654 qlnx_pci_find_capability(void *ecore_dev, int cap)
4658 if (pci_find_cap(((qlnx_host_t *)ecore_dev)->pci_dev, PCIY_EXPRESS,
4662 QL_DPRINT1(((qlnx_host_t *)ecore_dev),
4663 (((qlnx_host_t *)ecore_dev)->pci_dev,
4664 "%s: failed\n", __func__));
4670 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4673 struct ecore_dev *cdev;
4674 struct ecore_hwfn *p_hwfn;
4678 cdev = p_hwfn->p_dev;
4680 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4681 (uint8_t *)(cdev->regview)) + reg_addr;
4683 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
4689 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4691 struct ecore_dev *cdev;
4692 struct ecore_hwfn *p_hwfn;
4696 cdev = p_hwfn->p_dev;
4698 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4699 (uint8_t *)(cdev->regview)) + reg_addr;
4701 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4707 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
4709 struct ecore_dev *cdev;
4710 struct ecore_hwfn *p_hwfn;
4714 cdev = p_hwfn->p_dev;
4716 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4717 (uint8_t *)(cdev->regview)) + reg_addr;
4719 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4725 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4727 struct ecore_dev *cdev;
4728 struct ecore_hwfn *p_hwfn;
4732 cdev = p_hwfn->p_dev;
4734 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
4735 (uint8_t *)(cdev->doorbells)) + reg_addr;
4737 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
4743 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
4747 struct ecore_dev *cdev;
4749 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4750 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4752 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
4758 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
4761 struct ecore_dev *cdev;
4763 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4764 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4766 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4772 qlnx_zalloc(uint32_t size)
4776 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
4778 return ((void *)va);
4782 qlnx_barrier(void *p_hwfn)
4786 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4787 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
4791 qlnx_link_update(void *p_hwfn)
4794 int prev_link_state;
4796 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4798 qlnx_fill_link(p_hwfn, &ha->if_link);
4800 prev_link_state = ha->link_up;
4801 ha->link_up = ha->if_link.link_up;
4803 if (prev_link_state != ha->link_up) {
4805 if_link_state_change(ha->ifp, LINK_STATE_UP);
4807 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
4814 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
4816 struct ecore_mcp_link_params link_params;
4817 struct ecore_mcp_link_state link_state;
4819 memset(if_link, 0, sizeof(*if_link));
4820 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
4821 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
4823 /* Prepare source inputs */
4824 /* we only deal with physical functions */
4825 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
4826 sizeof(link_params));
4827 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
4828 sizeof(link_state));
4830 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
4832 /* Set the link parameters to pass to protocol driver */
4833 if (link_state.link_up) {
4834 if_link->link_up = true;
4835 if_link->speed = link_state.speed;
4838 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
4840 if (link_params.speed.autoneg)
4841 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
4843 if (link_params.pause.autoneg ||
4844 (link_params.pause.forced_rx && link_params.pause.forced_tx))
4845 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
4847 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
4848 link_params.pause.forced_tx)
4849 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
4851 if (link_params.speed.advertised_speeds &
4852 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
4853 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
4854 QLNX_LINK_CAP_1000baseT_Full;
4856 if (link_params.speed.advertised_speeds &
4857 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
4858 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4860 if (link_params.speed.advertised_speeds &
4861 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
4862 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4864 if (link_params.speed.advertised_speeds &
4865 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
4866 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4868 if (link_params.speed.advertised_speeds &
4869 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
4870 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4872 if (link_params.speed.advertised_speeds &
4873 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
4874 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4876 if_link->advertised_caps = if_link->supported_caps;
4878 if_link->autoneg = link_params.speed.autoneg;
4879 if_link->duplex = QLNX_LINK_DUPLEX;
4881 /* Link partner capabilities */
4883 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
4884 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
4886 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
4887 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
4889 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
4890 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4892 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
4893 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4895 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
4896 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4898 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
4899 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4901 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
4902 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4904 if (link_state.an_complete)
4905 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
4907 if (link_state.partner_adv_pause)
4908 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
4910 if ((link_state.partner_adv_pause ==
4911 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
4912 (link_state.partner_adv_pause ==
4913 ECORE_LINK_PARTNER_BOTH_PAUSE))
4914 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
4920 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
4924 for (i = 0; i < cdev->num_hwfns; i++) {
4925 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
4926 p_hwfn->pf_params = *func_params;
4929 rc = ecore_resc_alloc(cdev);
4931 goto qlnx_nic_setup_exit;
4933 ecore_resc_setup(cdev);
4935 qlnx_nic_setup_exit:
4941 qlnx_nic_start(struct ecore_dev *cdev)
4944 struct ecore_hw_init_params params;
4946 bzero(¶ms, sizeof (struct ecore_hw_init_params));
4948 params.p_tunn = NULL;
4949 params.b_hw_start = true;
4950 params.int_mode = cdev->int_mode;
4951 params.allow_npar_tx_switch = true;
4952 params.bin_fw_data = NULL;
4954 rc = ecore_hw_init(cdev, ¶ms);
4956 ecore_resc_free(cdev);
4964 qlnx_slowpath_start(qlnx_host_t *ha)
4966 struct ecore_dev *cdev;
4967 struct ecore_pf_params pf_params;
4970 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
4971 pf_params.eth_pf_params.num_cons =
4972 (ha->num_rss) * (ha->num_tc + 1);
4976 rc = qlnx_nic_setup(cdev, &pf_params);
4978 goto qlnx_slowpath_start_exit;
4980 cdev->int_mode = ECORE_INT_MODE_MSIX;
4981 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
4983 #ifdef QLNX_MAX_COALESCE
4984 cdev->rx_coalesce_usecs = 255;
4985 cdev->tx_coalesce_usecs = 255;
4988 rc = qlnx_nic_start(cdev);
4990 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
4991 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
4993 qlnx_slowpath_start_exit:
4999 qlnx_slowpath_stop(qlnx_host_t *ha)
5001 struct ecore_dev *cdev;
5002 device_t dev = ha->pci_dev;
5007 ecore_hw_stop(cdev);
5009 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5011 if (ha->sp_handle[i])
5012 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5015 ha->sp_handle[i] = NULL;
5018 (void) bus_release_resource(dev, SYS_RES_IRQ,
5019 ha->sp_irq_rid[i], ha->sp_irq[i]);
5020 ha->sp_irq[i] = NULL;
5023 ecore_resc_free(cdev);
5029 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5030 char ver_str[VER_SIZE])
5034 memcpy(cdev->name, name, NAME_SIZE);
5036 for_each_hwfn(cdev, i) {
5037 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5040 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5046 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5048 enum ecore_mcp_protocol_type type;
5049 union ecore_mcp_protocol_stats *stats;
5050 struct ecore_eth_stats eth_stats;
5053 dev = ((qlnx_host_t *)cdev)->pci_dev;
5054 stats = proto_stats;
5058 case ECORE_MCP_LAN_STATS:
5059 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5060 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5061 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5062 stats->lan_stats.fcs_err = -1;
5066 ((qlnx_host_t *)cdev)->err_get_proto_invalid_type++;
5068 QL_DPRINT1(((qlnx_host_t *)cdev),
5069 (dev, "%s: invalid protocol type 0x%x\n", __func__,
5077 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5079 struct ecore_hwfn *p_hwfn;
5080 struct ecore_ptt *p_ptt;
5082 p_hwfn = &ha->cdev.hwfns[0];
5083 p_ptt = ecore_ptt_acquire(p_hwfn);
5085 if (p_ptt == NULL) {
5086 QL_DPRINT1(ha, (ha->pci_dev,
5087 "%s : ecore_ptt_acquire failed\n", __func__));
5090 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5092 ecore_ptt_release(p_hwfn, p_ptt);
5098 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5100 struct ecore_hwfn *p_hwfn;
5101 struct ecore_ptt *p_ptt;
5103 p_hwfn = &ha->cdev.hwfns[0];
5104 p_ptt = ecore_ptt_acquire(p_hwfn);
5106 if (p_ptt == NULL) {
5107 QL_DPRINT1(ha, (ha->pci_dev,
5108 "%s : ecore_ptt_acquire failed\n", __func__));
5111 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5113 ecore_ptt_release(p_hwfn, p_ptt);
5119 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5121 struct ecore_dev *cdev;
5125 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5126 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5127 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5133 qlnx_init_fp(qlnx_host_t *ha)
5135 int rss_id, txq_array_index, tc;
5137 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5139 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5141 fp->rss_id = rss_id;
5143 fp->sb_info = &ha->sb_array[rss_id];
5144 fp->rxq = &ha->rxq_array[rss_id];
5145 fp->rxq->rxq_id = rss_id;
5147 for (tc = 0; tc < ha->num_tc; tc++) {
5148 txq_array_index = tc * ha->num_rss + rss_id;
5149 fp->txq[tc] = &ha->txq_array[txq_array_index];
5150 fp->txq[tc]->index = txq_array_index;
5153 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5156 /* reset all the statistics counters */
5158 fp->tx_pkts_processed = 0;
5159 fp->tx_pkts_freed = 0;
5160 fp->tx_pkts_transmitted = 0;
5161 fp->tx_pkts_completed = 0;
5162 fp->tx_lso_wnd_min_len = 0;
5164 fp->tx_nsegs_gt_elem_left = 0;
5165 fp->tx_tso_max_nsegs = 0;
5166 fp->tx_tso_min_nsegs = 0;
5167 fp->err_tx_nsegs_gt_elem_left = 0;
5168 fp->err_tx_dmamap_create = 0;
5169 fp->err_tx_defrag_dmamap_load = 0;
5170 fp->err_tx_non_tso_max_seg = 0;
5171 fp->err_tx_dmamap_load = 0;
5172 fp->err_tx_defrag = 0;
5173 fp->err_tx_free_pkt_null = 0;
5174 fp->err_tx_cons_idx_conflict = 0;
5177 fp->err_m_getcl = 0;
5178 fp->err_m_getjcl = 0;
5184 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5186 struct ecore_dev *cdev;
5190 if (sb_info->sb_virt) {
5191 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5192 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5193 sb_info->sb_virt = NULL;
5198 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5199 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5201 struct ecore_hwfn *p_hwfn;
5205 hwfn_index = sb_id % cdev->num_hwfns;
5206 p_hwfn = &cdev->hwfns[hwfn_index];
5207 rel_sb_id = sb_id / cdev->num_hwfns;
5209 QL_DPRINT2(((qlnx_host_t *)cdev), (((qlnx_host_t *)cdev)->pci_dev,
5210 "%s: hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x "
5211 "sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5212 __func__, hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5213 sb_virt_addr, (void *)sb_phy_addr));
5215 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5216 sb_virt_addr, sb_phy_addr, rel_sb_id);
5221 /* This function allocates fast-path status block memory */
5223 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5225 struct status_block *sb_virt;
5229 struct ecore_dev *cdev;
5233 size = sizeof(*sb_virt);
5234 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5237 QL_DPRINT1(ha, (ha->pci_dev,
5238 "%s: Status block allocation failed\n", __func__));
5242 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5244 QL_DPRINT1(ha, (ha->pci_dev, "%s: failed\n", __func__));
5245 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5252 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5255 struct sw_rx_data *rx_buf;
5257 for (i = 0; i < rxq->num_rx_buffers; i++) {
5259 rx_buf = &rxq->sw_rx_ring[i];
5261 if (rx_buf->data != NULL) {
5262 if (rx_buf->map != NULL) {
5263 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5264 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5267 m_freem(rx_buf->data);
5268 rx_buf->data = NULL;
5275 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5277 struct ecore_dev *cdev;
5282 qlnx_free_rx_buffers(ha, rxq);
5284 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5285 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5286 if (rxq->tpa_info[i].mpf != NULL)
5287 m_freem(rxq->tpa_info[i].mpf);
5290 bzero((void *)&rxq->sw_rx_ring[0],
5291 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5293 /* Free the real RQ ring used by FW */
5294 if (rxq->rx_bd_ring.p_virt_addr) {
5295 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5296 rxq->rx_bd_ring.p_virt_addr = NULL;
5299 /* Free the real completion ring used by FW */
5300 if (rxq->rx_comp_ring.p_virt_addr &&
5301 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5302 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5303 rxq->rx_comp_ring.p_virt_addr = NULL;
5304 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5307 #ifdef QLNX_SOFT_LRO
5309 struct lro_ctrl *lro;
5314 #endif /* #ifdef QLNX_SOFT_LRO */
5320 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5322 register struct mbuf *mp;
5323 uint16_t rx_buf_size;
5324 struct sw_rx_data *sw_rx_data;
5325 struct eth_rx_bd *rx_bd;
5326 dma_addr_t dma_addr;
5328 bus_dma_segment_t segs[1];
5331 struct ecore_dev *cdev;
5335 rx_buf_size = rxq->rx_buf_size;
5337 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5340 QL_DPRINT1(ha, (ha->pci_dev,
5341 "%s : Failed to allocate Rx data\n", __func__));
5345 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5347 map = (bus_dmamap_t)0;
5349 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5351 dma_addr = segs[0].ds_addr;
5353 if (ret || !dma_addr || (nsegs != 1)) {
5355 QL_DPRINT1(ha, (ha->pci_dev,
5356 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5357 __func__, ret, (long long unsigned int)dma_addr,
5362 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5363 sw_rx_data->data = mp;
5364 sw_rx_data->dma_addr = dma_addr;
5365 sw_rx_data->map = map;
5367 /* Advance PROD and get BD pointer */
5368 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5369 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5370 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5371 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5373 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5379 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5380 struct qlnx_agg_info *tpa)
5383 dma_addr_t dma_addr;
5385 bus_dma_segment_t segs[1];
5388 struct sw_rx_data *rx_buf;
5390 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5393 QL_DPRINT1(ha, (ha->pci_dev,
5394 "%s : Failed to allocate Rx data\n", __func__));
5398 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5400 map = (bus_dmamap_t)0;
5402 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5404 dma_addr = segs[0].ds_addr;
5406 if (ret || !dma_addr || (nsegs != 1)) {
5408 QL_DPRINT1(ha, (ha->pci_dev,
5409 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5410 __func__, ret, (long long unsigned int)dma_addr,
5415 rx_buf = &tpa->rx_buf;
5417 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5420 rx_buf->dma_addr = dma_addr;
5423 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5429 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5431 struct sw_rx_data *rx_buf;
5433 rx_buf = &tpa->rx_buf;
5435 if (rx_buf->data != NULL) {
5436 if (rx_buf->map != NULL) {
5437 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5438 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5441 m_freem(rx_buf->data);
5442 rx_buf->data = NULL;
5447 /* This function allocates all memory needed per Rx queue */
5449 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5451 int i, rc, num_allocated;
5453 struct ecore_dev *cdev;
5458 rxq->num_rx_buffers = RX_RING_SIZE;
5460 rxq->rx_buf_size = ha->rx_buf_size;
5462 /* Allocate the parallel driver ring for Rx buffers */
5463 bzero((void *)&rxq->sw_rx_ring[0],
5464 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5466 /* Allocate FW Rx ring */
5468 rc = ecore_chain_alloc(cdev,
5469 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5470 ECORE_CHAIN_MODE_NEXT_PTR,
5471 ECORE_CHAIN_CNT_TYPE_U16,
5473 sizeof(struct eth_rx_bd),
5474 &rxq->rx_bd_ring, NULL);
5479 /* Allocate FW completion ring */
5480 rc = ecore_chain_alloc(cdev,
5481 ECORE_CHAIN_USE_TO_CONSUME,
5482 ECORE_CHAIN_MODE_PBL,
5483 ECORE_CHAIN_CNT_TYPE_U16,
5485 sizeof(union eth_rx_cqe),
5486 &rxq->rx_comp_ring, NULL);
5491 /* Allocate buffers for the Rx ring */
5493 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5494 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5501 for (i = 0; i < rxq->num_rx_buffers; i++) {
5502 rc = qlnx_alloc_rx_buffer(ha, rxq);
5507 if (!num_allocated) {
5508 QL_DPRINT1(ha, (ha->pci_dev,
5509 "%s: Rx buffers allocation failed\n", __func__));
5511 } else if (num_allocated < rxq->num_rx_buffers) {
5512 QL_DPRINT1(ha, (ha->pci_dev,
5513 "%s: Allocated less buffers than"
5514 " desired (%d allocated)\n", __func__, num_allocated));
5517 #ifdef QLNX_SOFT_LRO
5520 struct lro_ctrl *lro;
5524 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5525 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5526 QL_DPRINT1(ha, (ha->pci_dev,
5527 "%s: tcp_lro_init[%d] failed\n",
5528 __func__, rxq->rxq_id));
5532 if (tcp_lro_init(lro)) {
5533 QL_DPRINT1(ha, (ha->pci_dev,
5534 "%s: tcp_lro_init[%d] failed\n",
5535 __func__, rxq->rxq_id));
5538 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5542 #endif /* #ifdef QLNX_SOFT_LRO */
5546 qlnx_free_mem_rxq(ha, rxq);
5552 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5553 struct qlnx_tx_queue *txq)
5555 struct ecore_dev *cdev;
5559 bzero((void *)&txq->sw_tx_ring[0],
5560 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5562 /* Free the real RQ ring used by FW */
5563 if (txq->tx_pbl.p_virt_addr) {
5564 ecore_chain_free(cdev, &txq->tx_pbl);
5565 txq->tx_pbl.p_virt_addr = NULL;
5570 /* This function allocates all memory needed per Tx queue */
5572 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5573 struct qlnx_tx_queue *txq)
5575 int ret = ECORE_SUCCESS;
5576 union eth_tx_bd_types *p_virt;
5577 struct ecore_dev *cdev;
5581 bzero((void *)&txq->sw_tx_ring[0],
5582 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5584 /* Allocate the real Tx ring to be used by FW */
5585 ret = ecore_chain_alloc(cdev,
5586 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5587 ECORE_CHAIN_MODE_PBL,
5588 ECORE_CHAIN_CNT_TYPE_U16,
5591 &txq->tx_pbl, NULL);
5593 if (ret != ECORE_SUCCESS) {
5597 txq->num_tx_buffers = TX_RING_SIZE;
5602 qlnx_free_mem_txq(ha, fp, txq);
5607 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5610 struct ifnet *ifp = ha->ifp;
5612 if (mtx_initialized(&fp->tx_mtx)) {
5614 if (fp->tx_br != NULL) {
5616 mtx_lock(&fp->tx_mtx);
5618 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
5619 fp->tx_pkts_freed++;
5623 mtx_unlock(&fp->tx_mtx);
5625 buf_ring_free(fp->tx_br, M_DEVBUF);
5628 mtx_destroy(&fp->tx_mtx);
5634 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5638 qlnx_free_mem_sb(ha, fp->sb_info);
5640 qlnx_free_mem_rxq(ha, fp->rxq);
5642 for (tc = 0; tc < ha->num_tc; tc++)
5643 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
5649 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5651 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5652 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5654 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5656 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5657 M_NOWAIT, &fp->tx_mtx);
5658 if (fp->tx_br == NULL) {
5659 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
5660 " fp[%d, %d]\n", ha->dev_unit, fp->rss_id));
5667 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5671 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
5675 if (ha->rx_jumbo_buf_eq_mtu) {
5676 if (ha->max_frame_size <= MCLBYTES)
5677 ha->rx_buf_size = MCLBYTES;
5678 else if (ha->max_frame_size <= MJUMPAGESIZE)
5679 ha->rx_buf_size = MJUMPAGESIZE;
5680 else if (ha->max_frame_size <= MJUM9BYTES)
5681 ha->rx_buf_size = MJUM9BYTES;
5682 else if (ha->max_frame_size <= MJUM16BYTES)
5683 ha->rx_buf_size = MJUM16BYTES;
5685 if (ha->max_frame_size <= MCLBYTES)
5686 ha->rx_buf_size = MCLBYTES;
5688 ha->rx_buf_size = MJUMPAGESIZE;
5691 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
5695 for (tc = 0; tc < ha->num_tc; tc++) {
5696 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
5704 qlnx_free_mem_fp(ha, fp);
5709 qlnx_free_mem_load(qlnx_host_t *ha)
5712 struct ecore_dev *cdev;
5716 for (i = 0; i < ha->num_rss; i++) {
5717 struct qlnx_fastpath *fp = &ha->fp_array[i];
5719 qlnx_free_mem_fp(ha, fp);
5725 qlnx_alloc_mem_load(qlnx_host_t *ha)
5729 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5730 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5732 rc = qlnx_alloc_mem_fp(ha, fp);
5740 qlnx_start_vport(struct ecore_dev *cdev,
5744 u8 inner_vlan_removal_en_flg,
5749 struct ecore_sp_vport_start_params vport_start_params = { 0 };
5752 ha = (qlnx_host_t *)cdev;
5754 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
5755 vport_start_params.tx_switching = 0;
5756 vport_start_params.handle_ptp_pkts = 0;
5757 vport_start_params.only_untagged = 0;
5758 vport_start_params.drop_ttl0 = drop_ttl0_flg;
5760 vport_start_params.tpa_mode =
5761 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5762 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5764 vport_start_params.vport_id = vport_id;
5765 vport_start_params.mtu = mtu;
5768 QL_DPRINT2(ha, (ha->pci_dev, "%s: setting mtu to %d\n", __func__, mtu));
5770 for_each_hwfn(cdev, i) {
5771 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5773 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5774 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5776 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5779 QL_DPRINT1(ha, (ha->pci_dev,
5780 "%s: Failed to start VPORT V-PORT %d "
5781 "with MTU %d\n", __func__, vport_id, mtu));
5785 ecore_hw_start_fastpath(p_hwfn);
5787 QL_DPRINT2(ha, (ha->pci_dev,
5788 "%s: Started V-PORT %d with MTU %d\n",
5789 __func__, vport_id, mtu));
5796 qlnx_update_vport(struct ecore_dev *cdev,
5797 struct qlnx_update_vport_params *params)
5799 struct ecore_sp_vport_update_params sp_params;
5800 int rc, i, j, fp_index;
5801 struct ecore_hwfn *p_hwfn;
5802 struct ecore_rss_params *rss;
5803 qlnx_host_t *ha = (qlnx_host_t *)cdev;
5804 struct qlnx_fastpath *fp;
5806 memset(&sp_params, 0, sizeof(sp_params));
5807 /* Translate protocol params into sp params */
5808 sp_params.vport_id = params->vport_id;
5810 sp_params.update_vport_active_rx_flg =
5811 params->update_vport_active_rx_flg;
5812 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
5814 sp_params.update_vport_active_tx_flg =
5815 params->update_vport_active_tx_flg;
5816 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
5818 sp_params.update_inner_vlan_removal_flg =
5819 params->update_inner_vlan_removal_flg;
5820 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5822 sp_params.sge_tpa_params = params->sge_tpa_params;
5824 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5825 * We need to re-fix the rss values per engine for CMT.
5828 sp_params.rss_params = params->rss_params;
5830 for_each_hwfn(cdev, i) {
5832 p_hwfn = &cdev->hwfns[i];
5834 if ((cdev->num_hwfns > 1) &&
5835 params->rss_params->update_rss_config &&
5836 params->rss_params->rss_enable) {
5838 rss = params->rss_params;
5840 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
5842 fp_index = ((cdev->num_hwfns * j) + i) %
5845 fp = &ha->fp_array[fp_index];
5846 rss->rss_ind_table[i] = fp->rxq->handle;
5849 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5850 QL_DPRINT3(ha, (ha->pci_dev,
5851 "%p %p %p %p %p %p %p %p \n",
5852 rss->rss_ind_table[j],
5853 rss->rss_ind_table[j+1],
5854 rss->rss_ind_table[j+2],
5855 rss->rss_ind_table[j+3],
5856 rss->rss_ind_table[j+4],
5857 rss->rss_ind_table[j+5],
5858 rss->rss_ind_table[j+6],
5859 rss->rss_ind_table[j+7]));
5864 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5865 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5866 ECORE_SPQ_MODE_EBLOCK, NULL);
5868 QL_DPRINT1(ha, (ha->pci_dev,
5869 "%s:Failed to update VPORT\n", __func__));
5873 QL_DPRINT2(ha, (ha->pci_dev,
5874 "%s: Updated V-PORT %d: tx_active_flag %d,"
5875 "rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5877 params->vport_id, params->vport_active_tx_flg,
5878 params->vport_active_rx_flg,
5879 params->update_vport_active_tx_flg,
5880 params->update_vport_active_rx_flg));
5887 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5889 struct eth_rx_bd *rx_bd_cons =
5890 ecore_chain_consume(&rxq->rx_bd_ring);
5891 struct eth_rx_bd *rx_bd_prod =
5892 ecore_chain_produce(&rxq->rx_bd_ring);
5893 struct sw_rx_data *sw_rx_data_cons =
5894 &rxq->sw_rx_ring[rxq->sw_rx_cons];
5895 struct sw_rx_data *sw_rx_data_prod =
5896 &rxq->sw_rx_ring[rxq->sw_rx_prod];
5898 sw_rx_data_prod->data = sw_rx_data_cons->data;
5899 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
5901 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
5902 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5908 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
5913 struct eth_rx_prod_data rx_prods = {0};
5915 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
5916 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
5918 /* Update producers */
5919 rx_prods.bd_prod = htole16(bd_prod);
5920 rx_prods.cqe_prod = htole16(cqe_prod);
5922 /* Make sure that the BD and SGE data is updated before updating the
5923 * producers since FW might read the BD/SGE right after the producer
5927 //bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
5928 //bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
5930 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
5931 sizeof(rx_prods), (u32 *)&rx_prods);
5933 /* mmiowb is needed to synchronize doorbell writes from more than one
5934 * processor. It guarantees that the write arrives to the device before
5935 * the napi lock is released and another qlnx_poll is called (possibly
5936 * on another CPU). Without this barrier, the next doorbell can bypass
5937 * this doorbell. This is applicable to IA64/Altix systems.
5944 static uint32_t qlnx_hash_key[] = {
5945 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
5946 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
5947 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
5948 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
5949 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
5950 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
5951 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
5952 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
5953 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
5954 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
5957 qlnx_start_queues(qlnx_host_t *ha)
5959 int rc, tc, i, vport_id = 0,
5960 drop_ttl0_flg = 1, vlan_removal_en = 1,
5961 tx_switching = 0, hw_lro_enable = 0;
5962 struct ecore_dev *cdev = &ha->cdev;
5963 struct ecore_rss_params *rss_params = &ha->rss_params;
5964 struct qlnx_update_vport_params vport_update_params;
5966 struct ecore_hwfn *p_hwfn;
5967 struct ecore_sge_tpa_params tpa_params;
5968 struct ecore_queue_start_common_params qparams;
5969 struct qlnx_fastpath *fp;
5974 QL_DPRINT1(ha, (ha->pci_dev,
5975 "%s: Cannot update V-VPORT as active as there"
5976 " are no Rx queues\n", __func__));
5980 #ifndef QLNX_SOFT_LRO
5981 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
5982 #endif /* #ifndef QLNX_SOFT_LRO */
5984 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
5985 vlan_removal_en, tx_switching, hw_lro_enable);
5988 QL_DPRINT1(ha, (ha->pci_dev,
5989 "%s: Start V-PORT failed %d\n", __func__, rc));
5993 QL_DPRINT2(ha, (ha->pci_dev,
5994 "%s: Start vport ramrod passed,"
5995 " vport_id = %d, MTU = %d, vlan_removal_en = %d\n", __func__,
5996 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en));
5999 struct ecore_rxq_start_ret_params rx_ret_params;
6000 struct ecore_txq_start_ret_params tx_ret_params;
6002 fp = &ha->fp_array[i];
6003 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6005 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6006 bzero(&rx_ret_params,
6007 sizeof (struct ecore_rxq_start_ret_params));
6009 qparams.queue_id = i ;
6010 qparams.vport_id = vport_id;
6011 qparams.stats_id = vport_id;
6012 qparams.p_sb = fp->sb_info;
6013 qparams.sb_idx = RX_PI;
6016 rc = ecore_eth_rx_queue_start(p_hwfn,
6017 p_hwfn->hw_info.opaque_fid,
6019 fp->rxq->rx_buf_size, /* bd_max_bytes */
6020 /* bd_chain_phys_addr */
6021 fp->rxq->rx_bd_ring.p_phys_addr,
6023 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6025 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6029 QL_DPRINT1(ha, (ha->pci_dev,
6030 "%s: Start RXQ #%d failed %d\n", __func__,
6035 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6036 fp->rxq->handle = rx_ret_params.p_handle;
6037 fp->rxq->hw_cons_ptr =
6038 &fp->sb_info->sb_virt->pi_array[RX_PI];
6040 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6042 for (tc = 0; tc < ha->num_tc; tc++) {
6043 struct qlnx_tx_queue *txq = fp->txq[tc];
6046 sizeof(struct ecore_queue_start_common_params));
6047 bzero(&tx_ret_params,
6048 sizeof (struct ecore_txq_start_ret_params));
6050 qparams.queue_id = txq->index / cdev->num_hwfns ;
6051 qparams.vport_id = vport_id;
6052 qparams.stats_id = vport_id;
6053 qparams.p_sb = fp->sb_info;
6054 qparams.sb_idx = TX_PI(tc);
6056 rc = ecore_eth_tx_queue_start(p_hwfn,
6057 p_hwfn->hw_info.opaque_fid,
6059 /* bd_chain_phys_addr */
6060 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6061 ecore_chain_get_page_cnt(&txq->tx_pbl),
6065 QL_DPRINT1(ha, (ha->pci_dev,
6066 "%s: Start TXQ #%d failed %d\n",
6067 __func__, txq->index, rc));
6071 txq->doorbell_addr = tx_ret_params.p_doorbell;
6072 txq->handle = tx_ret_params.p_handle;
6075 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6076 SET_FIELD(txq->tx_db.data.params,
6077 ETH_DB_DATA_DEST, DB_DEST_XCM);
6078 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6080 SET_FIELD(txq->tx_db.data.params,
6081 ETH_DB_DATA_AGG_VAL_SEL,
6082 DQ_XCM_ETH_TX_BD_PROD_CMD);
6084 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6088 /* Fill struct with RSS params */
6089 if (ha->num_rss > 1) {
6091 rss_params->update_rss_config = 1;
6092 rss_params->rss_enable = 1;
6093 rss_params->update_rss_capabilities = 1;
6094 rss_params->update_rss_ind_table = 1;
6095 rss_params->update_rss_key = 1;
6096 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6097 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6098 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6100 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6101 fp = &ha->fp_array[(i % ha->num_rss)];
6102 rss_params->rss_ind_table[i] = fp->rxq->handle;
6105 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6106 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6109 memset(rss_params, 0, sizeof(*rss_params));
6113 /* Prepare and send the vport enable */
6114 memset(&vport_update_params, 0, sizeof(vport_update_params));
6115 vport_update_params.vport_id = vport_id;
6116 vport_update_params.update_vport_active_tx_flg = 1;
6117 vport_update_params.vport_active_tx_flg = 1;
6118 vport_update_params.update_vport_active_rx_flg = 1;
6119 vport_update_params.vport_active_rx_flg = 1;
6120 vport_update_params.rss_params = rss_params;
6121 vport_update_params.update_inner_vlan_removal_flg = 1;
6122 vport_update_params.inner_vlan_removal_flg = 1;
6124 if (hw_lro_enable) {
6125 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6127 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6129 tpa_params.update_tpa_en_flg = 1;
6130 tpa_params.tpa_ipv4_en_flg = 1;
6131 tpa_params.tpa_ipv6_en_flg = 1;
6133 tpa_params.update_tpa_param_flg = 1;
6134 tpa_params.tpa_pkt_split_flg = 0;
6135 tpa_params.tpa_hdr_data_split_flg = 0;
6136 tpa_params.tpa_gro_consistent_flg = 0;
6137 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6138 tpa_params.tpa_max_size = (uint16_t)(-1);
6139 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6140 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6142 vport_update_params.sge_tpa_params = &tpa_params;
6145 rc = qlnx_update_vport(cdev, &vport_update_params);
6147 QL_DPRINT1(ha, (ha->pci_dev,
6148 "%s: Update V-PORT failed %d\n", __func__, rc));
6156 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6157 struct qlnx_tx_queue *txq)
6159 uint16_t hw_bd_cons;
6160 uint16_t ecore_cons_idx;
6162 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
6164 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6166 while (hw_bd_cons !=
6167 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6169 mtx_lock(&fp->tx_mtx);
6171 (void)qlnx_tx_int(ha, fp, txq);
6173 mtx_unlock(&fp->tx_mtx);
6175 qlnx_mdelay(__func__, 2);
6177 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6180 QL_DPRINT2(ha, (ha->pci_dev, "%s[%d, %d]: done\n", __func__,
6181 fp->rss_id, txq->index));
6187 qlnx_stop_queues(qlnx_host_t *ha)
6189 struct qlnx_update_vport_params vport_update_params;
6190 struct ecore_dev *cdev;
6191 struct qlnx_fastpath *fp;
6196 /* Disable the vport */
6198 memset(&vport_update_params, 0, sizeof(vport_update_params));
6200 vport_update_params.vport_id = 0;
6201 vport_update_params.update_vport_active_tx_flg = 1;
6202 vport_update_params.vport_active_tx_flg = 0;
6203 vport_update_params.update_vport_active_rx_flg = 1;
6204 vport_update_params.vport_active_rx_flg = 0;
6205 vport_update_params.rss_params = &ha->rss_params;
6206 vport_update_params.rss_params->update_rss_config = 0;
6207 vport_update_params.rss_params->rss_enable = 0;
6208 vport_update_params.update_inner_vlan_removal_flg = 0;
6209 vport_update_params.inner_vlan_removal_flg = 0;
6211 rc = qlnx_update_vport(cdev, &vport_update_params);
6213 QL_DPRINT1(ha, (ha->pci_dev, "%s:Failed to update vport\n",
6218 /* Flush Tx queues. If needed, request drain from MCP */
6220 fp = &ha->fp_array[i];
6222 for (tc = 0; tc < ha->num_tc; tc++) {
6223 struct qlnx_tx_queue *txq = fp->txq[tc];
6225 rc = qlnx_drain_txq(ha, fp, txq);
6231 /* Stop all Queues in reverse order*/
6232 for (i = ha->num_rss - 1; i >= 0; i--) {
6234 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6236 fp = &ha->fp_array[i];
6238 /* Stop the Tx Queue(s)*/
6239 for (tc = 0; tc < ha->num_tc; tc++) {
6242 tx_queue_id = tc * ha->num_rss + i;
6243 rc = ecore_eth_tx_queue_stop(p_hwfn,
6244 fp->txq[tc]->handle);
6247 QL_DPRINT1(ha, (ha->pci_dev,
6248 "%s: Failed to stop TXQ #%d\n",
6249 __func__, tx_queue_id));
6254 /* Stop the Rx Queue*/
6255 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6258 QL_DPRINT1(ha, (ha->pci_dev,
6259 "%s: Failed to stop RXQ #%d\n", __func__, i));
6264 /* Stop the vport */
6265 for_each_hwfn(cdev, i) {
6267 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6269 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6272 QL_DPRINT1(ha, (ha->pci_dev,
6273 "%s: Failed to stop VPORT\n", __func__));
6282 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6283 enum ecore_filter_opcode opcode,
6284 unsigned char mac[ETH_ALEN])
6286 struct ecore_filter_ucast ucast;
6287 struct ecore_dev *cdev;
6292 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6294 ucast.opcode = opcode;
6295 ucast.type = ECORE_FILTER_MAC;
6296 ucast.is_rx_filter = 1;
6297 ucast.vport_to_add_to = 0;
6298 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6300 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6306 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6308 struct ecore_filter_ucast ucast;
6309 struct ecore_dev *cdev;
6312 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6314 ucast.opcode = ECORE_FILTER_REPLACE;
6315 ucast.type = ECORE_FILTER_MAC;
6316 ucast.is_rx_filter = 1;
6320 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6326 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6328 struct ecore_filter_mcast *mcast;
6329 struct ecore_dev *cdev;
6334 mcast = &ha->ecore_mcast;
6335 bzero(mcast, sizeof(struct ecore_filter_mcast));
6337 mcast->opcode = ECORE_FILTER_REMOVE;
6339 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6341 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6342 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6343 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6345 memcpy(&mcast->mac[0], &ha->mcast[i].addr[0], ETH_ALEN);
6346 mcast->num_mc_addrs++;
6350 mcast = &ha->ecore_mcast;
6352 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6354 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6361 qlnx_clean_filters(qlnx_host_t *ha)
6365 /* Remove all unicast macs */
6366 qlnx_remove_all_ucast_mac(ha);
6370 /* Remove all multicast macs */
6371 rc = qlnx_remove_all_mcast_mac(ha);
6375 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6381 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6383 struct ecore_filter_accept_flags accept;
6385 struct ecore_dev *cdev;
6389 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6391 accept.update_rx_mode_config = 1;
6392 accept.rx_accept_filter = filter;
6394 accept.update_tx_mode_config = 1;
6395 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6396 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6398 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6399 ECORE_SPQ_MODE_CB, NULL);
6405 qlnx_set_rx_mode(qlnx_host_t *ha)
6410 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6414 rc = qlnx_remove_all_mcast_mac(ha);
6418 filter = ECORE_ACCEPT_UCAST_MATCHED |
6419 ECORE_ACCEPT_MCAST_MATCHED |
6421 ha->filter = filter;
6423 rc = qlnx_set_rx_accept_filter(ha, filter);
6429 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6432 struct ecore_dev *cdev;
6433 struct ecore_hwfn *hwfn;
6434 struct ecore_ptt *ptt;
6438 for_each_hwfn(cdev, i) {
6440 hwfn = &cdev->hwfns[i];
6442 ptt = ecore_ptt_acquire(hwfn);
6446 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6448 ecore_ptt_release(hwfn, ptt);
6456 #if __FreeBSD_version >= 1100000
6458 qlnx_get_counter(if_t ifp, ift_counter cnt)
6463 ha = (qlnx_host_t *)if_getsoftc(ifp);
6467 case IFCOUNTER_IPACKETS:
6468 count = ha->hw_stats.common.rx_ucast_pkts +
6469 ha->hw_stats.common.rx_mcast_pkts +
6470 ha->hw_stats.common.rx_bcast_pkts;
6473 case IFCOUNTER_IERRORS:
6474 count = ha->hw_stats.common.rx_crc_errors +
6475 ha->hw_stats.common.rx_align_errors +
6476 ha->hw_stats.common.rx_oversize_packets +
6477 ha->hw_stats.common.rx_undersize_packets;
6480 case IFCOUNTER_OPACKETS:
6481 count = ha->hw_stats.common.tx_ucast_pkts +
6482 ha->hw_stats.common.tx_mcast_pkts +
6483 ha->hw_stats.common.tx_bcast_pkts;
6486 case IFCOUNTER_OERRORS:
6487 count = ha->hw_stats.common.tx_err_drop_pkts;
6490 case IFCOUNTER_COLLISIONS:
6493 case IFCOUNTER_IBYTES:
6494 count = ha->hw_stats.common.rx_ucast_bytes +
6495 ha->hw_stats.common.rx_mcast_bytes +
6496 ha->hw_stats.common.rx_bcast_bytes;
6499 case IFCOUNTER_OBYTES:
6500 count = ha->hw_stats.common.tx_ucast_bytes +
6501 ha->hw_stats.common.tx_mcast_bytes +
6502 ha->hw_stats.common.tx_bcast_bytes;
6505 case IFCOUNTER_IMCASTS:
6506 count = ha->hw_stats.common.rx_mcast_bytes;
6509 case IFCOUNTER_OMCASTS:
6510 count = ha->hw_stats.common.tx_mcast_bytes;
6513 case IFCOUNTER_IQDROPS:
6514 case IFCOUNTER_OQDROPS:
6515 case IFCOUNTER_NOPROTO:
6518 return (if_get_counter_default(ifp, cnt));
6526 qlnx_timer(void *arg)
6530 ha = (qlnx_host_t *)arg;
6532 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6534 if (ha->storm_stats_enable)
6535 qlnx_sample_storm_stats(ha);
6537 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6543 qlnx_load(qlnx_host_t *ha)
6547 struct ecore_dev *cdev;
6553 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
6555 rc = qlnx_alloc_mem_arrays(ha);
6557 goto qlnx_load_exit0;
6561 rc = qlnx_alloc_mem_load(ha);
6563 goto qlnx_load_exit1;
6565 QL_DPRINT2(ha, (dev, "%s: Allocated %d RSS queues on %d TC/s\n",
6566 __func__, ha->num_rss, ha->num_tc));
6568 for (i = 0; i < ha->num_rss; i++) {
6570 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6571 (INTR_TYPE_NET | INTR_MPSAFE),
6572 NULL, qlnx_fp_isr, &ha->irq_vec[i],
6573 &ha->irq_vec[i].handle))) {
6575 QL_DPRINT1(ha, (dev, "could not setup interrupt\n"));
6577 goto qlnx_load_exit2;
6580 QL_DPRINT2(ha, (dev, "%s: rss_id = %d irq_rid %d"
6581 " irq %p handle %p\n", __func__, i,
6582 ha->irq_vec[i].irq_rid,
6583 ha->irq_vec[i].irq, ha->irq_vec[i].handle));
6585 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6588 rc = qlnx_start_queues(ha);
6590 goto qlnx_load_exit2;
6592 QL_DPRINT2(ha, (dev, "%s: Start VPORT, RXQ and TXQ succeeded\n",
6595 /* Add primary mac and set Rx filters */
6596 rc = qlnx_set_rx_mode(ha);
6598 goto qlnx_load_exit2;
6600 /* Ask for link-up using current configuration */
6601 qlnx_set_link(ha, true);
6603 ha->state = QLNX_STATE_OPEN;
6605 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
6607 if (ha->flags.callout_init)
6608 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6610 goto qlnx_load_exit0;
6613 qlnx_free_mem_load(ha);
6619 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit [%d]\n", __func__, rc));
6624 qlnx_drain_soft_lro(qlnx_host_t *ha)
6626 #ifdef QLNX_SOFT_LRO
6634 if (ifp->if_capenable & IFCAP_LRO) {
6636 for (i = 0; i < ha->num_rss; i++) {
6638 struct qlnx_fastpath *fp = &ha->fp_array[i];
6639 struct lro_ctrl *lro;
6641 lro = &fp->rxq->lro;
6643 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6645 tcp_lro_flush_all(lro);
6648 struct lro_entry *queued;
6650 while ((!SLIST_EMPTY(&lro->lro_active))){
6651 queued = SLIST_FIRST(&lro->lro_active);
6652 SLIST_REMOVE_HEAD(&lro->lro_active, next);
6653 tcp_lro_flush(lro, queued);
6656 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6661 #endif /* #ifdef QLNX_SOFT_LRO */
6667 qlnx_unload(qlnx_host_t *ha)
6669 struct ecore_dev *cdev;
6676 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
6678 if (ha->state == QLNX_STATE_OPEN) {
6680 qlnx_set_link(ha, false);
6681 qlnx_clean_filters(ha);
6682 qlnx_stop_queues(ha);
6683 ecore_hw_stop_fastpath(cdev);
6685 for (i = 0; i < ha->num_rss; i++) {
6686 if (ha->irq_vec[i].handle) {
6687 (void)bus_teardown_intr(dev,
6689 ha->irq_vec[i].handle);
6690 ha->irq_vec[i].handle = NULL;
6694 qlnx_drain_fp_taskqueues(ha);
6695 qlnx_drain_soft_lro(ha);
6696 qlnx_free_mem_load(ha);
6699 if (ha->flags.callout_init)
6700 callout_drain(&ha->qlnx_callout);
6702 qlnx_mdelay(__func__, 1000);
6704 ha->state = QLNX_STATE_CLOSED;
6706 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
6711 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6714 struct ecore_hwfn *p_hwfn;
6715 struct ecore_ptt *p_ptt;
6717 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6719 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6720 p_ptt = ecore_ptt_acquire(p_hwfn);
6723 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n",
6728 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6730 if (rval == DBG_STATUS_OK)
6733 QL_DPRINT1(ha, (ha->pci_dev,
6734 "%s : ecore_dbg_grc_get_dump_buf_size failed [0x%x]\n",
6738 ecore_ptt_release(p_hwfn, p_ptt);
6744 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6747 struct ecore_hwfn *p_hwfn;
6748 struct ecore_ptt *p_ptt;
6750 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6752 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6753 p_ptt = ecore_ptt_acquire(p_hwfn);
6756 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n",
6761 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6763 if (rval == DBG_STATUS_OK)
6766 QL_DPRINT1(ha, (ha->pci_dev, "%s : "
6767 "ecore_dbg_idle_chk_get_dump_buf_size failed [0x%x]\n",
6771 ecore_ptt_release(p_hwfn, p_ptt);
6778 qlnx_sample_storm_stats(qlnx_host_t *ha)
6781 struct ecore_dev *cdev;
6782 qlnx_storm_stats_t *s_stats;
6784 struct ecore_ptt *p_ptt;
6785 struct ecore_hwfn *hwfn;
6787 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
6788 ha->storm_stats_enable = 0;
6794 for_each_hwfn(cdev, i) {
6796 hwfn = &cdev->hwfns[i];
6798 p_ptt = ecore_ptt_acquire(hwfn);
6802 index = ha->storm_stats_index +
6803 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
6805 s_stats = &ha->storm_stats[index];
6808 reg = XSEM_REG_FAST_MEMORY +
6809 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6810 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6812 reg = XSEM_REG_FAST_MEMORY +
6813 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6814 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6816 reg = XSEM_REG_FAST_MEMORY +
6817 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6818 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6820 reg = XSEM_REG_FAST_MEMORY +
6821 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6822 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6825 reg = YSEM_REG_FAST_MEMORY +
6826 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6827 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6829 reg = YSEM_REG_FAST_MEMORY +
6830 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6831 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6833 reg = YSEM_REG_FAST_MEMORY +
6834 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6835 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6837 reg = YSEM_REG_FAST_MEMORY +
6838 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6839 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6842 reg = PSEM_REG_FAST_MEMORY +
6843 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6844 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6846 reg = PSEM_REG_FAST_MEMORY +
6847 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6848 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6850 reg = PSEM_REG_FAST_MEMORY +
6851 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6852 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6854 reg = PSEM_REG_FAST_MEMORY +
6855 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6856 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6859 reg = TSEM_REG_FAST_MEMORY +
6860 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6861 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6863 reg = TSEM_REG_FAST_MEMORY +
6864 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6865 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6867 reg = TSEM_REG_FAST_MEMORY +
6868 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6869 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6871 reg = TSEM_REG_FAST_MEMORY +
6872 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6873 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6876 reg = MSEM_REG_FAST_MEMORY +
6877 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6878 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6880 reg = MSEM_REG_FAST_MEMORY +
6881 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6882 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6884 reg = MSEM_REG_FAST_MEMORY +
6885 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6886 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6888 reg = MSEM_REG_FAST_MEMORY +
6889 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6890 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6893 reg = USEM_REG_FAST_MEMORY +
6894 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6895 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6897 reg = USEM_REG_FAST_MEMORY +
6898 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6899 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6901 reg = USEM_REG_FAST_MEMORY +
6902 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6903 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6905 reg = USEM_REG_FAST_MEMORY +
6906 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6907 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6909 ecore_ptt_release(hwfn, p_ptt);
6912 ha->storm_stats_index++;
6918 * Name: qlnx_dump_buf8
6919 * Function: dumps a buffer as bytes
6922 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
6931 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
6934 device_printf(dev,"0x%08x:"
6935 " %02x %02x %02x %02x %02x %02x %02x %02x"
6936 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6937 buf[0], buf[1], buf[2], buf[3],
6938 buf[4], buf[5], buf[6], buf[7],
6939 buf[8], buf[9], buf[10], buf[11],
6940 buf[12], buf[13], buf[14], buf[15]);
6947 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
6950 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
6953 device_printf(dev,"0x%08x: %02x %02x %02x\n",
6954 i, buf[0], buf[1], buf[2]);
6957 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
6958 buf[0], buf[1], buf[2], buf[3]);
6961 device_printf(dev,"0x%08x:"
6962 " %02x %02x %02x %02x %02x\n", i,
6963 buf[0], buf[1], buf[2], buf[3], buf[4]);
6966 device_printf(dev,"0x%08x:"
6967 " %02x %02x %02x %02x %02x %02x\n", i,
6968 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
6971 device_printf(dev,"0x%08x:"
6972 " %02x %02x %02x %02x %02x %02x %02x\n", i,
6973 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
6976 device_printf(dev,"0x%08x:"
6977 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6978 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6982 device_printf(dev,"0x%08x:"
6983 " %02x %02x %02x %02x %02x %02x %02x %02x"
6985 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6989 device_printf(dev,"0x%08x:"
6990 " %02x %02x %02x %02x %02x %02x %02x %02x"
6992 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6993 buf[7], buf[8], buf[9]);
6996 device_printf(dev,"0x%08x:"
6997 " %02x %02x %02x %02x %02x %02x %02x %02x"
6998 " %02x %02x %02x\n", i,
6999 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7000 buf[7], buf[8], buf[9], buf[10]);
7003 device_printf(dev,"0x%08x:"
7004 " %02x %02x %02x %02x %02x %02x %02x %02x"
7005 " %02x %02x %02x %02x\n", i,
7006 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7007 buf[7], buf[8], buf[9], buf[10], buf[11]);
7010 device_printf(dev,"0x%08x:"
7011 " %02x %02x %02x %02x %02x %02x %02x %02x"
7012 " %02x %02x %02x %02x %02x\n", i,
7013 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7014 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7017 device_printf(dev,"0x%08x:"
7018 " %02x %02x %02x %02x %02x %02x %02x %02x"
7019 " %02x %02x %02x %02x %02x %02x\n", i,
7020 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7021 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7025 device_printf(dev,"0x%08x:"
7026 " %02x %02x %02x %02x %02x %02x %02x %02x"
7027 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7028 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7029 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7036 device_printf(dev, "%s: %s dump end\n", __func__, msg);