2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
62 #include "qlnx_ioctl.h"
72 * ioctl related functions
74 static void qlnx_add_sysctls(qlnx_host_t *ha);
79 static void qlnx_release(qlnx_host_t *ha);
80 static void qlnx_fp_isr(void *arg);
81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82 static void qlnx_init(void *arg);
83 static void qlnx_init_locked(qlnx_host_t *ha);
84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85 static int qlnx_set_promisc(qlnx_host_t *ha);
86 static int qlnx_set_allmulti(qlnx_host_t *ha);
87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qlnx_media_change(struct ifnet *ifp);
89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 static void qlnx_stop(qlnx_host_t *ha);
91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92 struct mbuf **m_headp);
93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95 struct qlnx_link_output *if_link);
96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
97 static void qlnx_qflush(struct ifnet *ifp);
99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
109 static int qlnx_nic_setup(struct ecore_dev *cdev,
110 struct ecore_pf_params *func_params);
111 static int qlnx_nic_start(struct ecore_dev *cdev);
112 static int qlnx_slowpath_start(qlnx_host_t *ha);
113 static int qlnx_slowpath_stop(qlnx_host_t *ha);
114 static int qlnx_init_hw(qlnx_host_t *ha);
115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
116 char ver_str[VER_SIZE]);
117 static void qlnx_unload(qlnx_host_t *ha);
118 static int qlnx_load(qlnx_host_t *ha);
119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
126 struct qlnx_rx_queue *rxq);
127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
132 static void qlnx_timer(void *arg);
133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
135 static void qlnx_trigger_dump(qlnx_host_t *ha);
136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
137 struct qlnx_tx_queue *txq);
138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
140 static void qlnx_fp_taskqueue(void *context, int pending);
141 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
143 struct qlnx_agg_info *tpa);
144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
146 #if __FreeBSD_version >= 1100000
147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
152 * Hooks to the Operating Systems
154 static int qlnx_pci_probe (device_t);
155 static int qlnx_pci_attach (device_t);
156 static int qlnx_pci_detach (device_t);
158 static device_method_t qlnx_pci_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, qlnx_pci_probe),
161 DEVMETHOD(device_attach, qlnx_pci_attach),
162 DEVMETHOD(device_detach, qlnx_pci_detach),
166 static driver_t qlnx_pci_driver = {
167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
170 static devclass_t qlnx_devclass;
172 MODULE_VERSION(if_qlnxe,1);
173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
181 char qlnx_dev_str[64];
182 char qlnx_ver_str[VER_SIZE];
183 char qlnx_name_str[NAME_SIZE];
186 * Some PCI Configuration Space Related Defines
189 #ifndef PCI_VENDOR_QLOGIC
190 #define PCI_VENDOR_QLOGIC 0x1077
193 /* 40G Adapter QLE45xxx*/
194 #ifndef QLOGIC_PCI_DEVICE_ID_1634
195 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
198 /* 100G Adapter QLE45xxx*/
199 #ifndef QLOGIC_PCI_DEVICE_ID_1644
200 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
203 /* 25G Adapter QLE45xxx*/
204 #ifndef QLOGIC_PCI_DEVICE_ID_1656
205 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
208 /* 50G Adapter QLE45xxx*/
209 #ifndef QLOGIC_PCI_DEVICE_ID_1654
210 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
214 qlnx_valid_device(device_t dev)
218 device_id = pci_get_device(dev);
220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
223 (device_id == QLOGIC_PCI_DEVICE_ID_1654))
230 * Name: qlnx_pci_probe
231 * Function: Validate the PCI device to be a QLA80XX device
234 qlnx_pci_probe(device_t dev)
236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
244 switch (pci_get_device(dev)) {
246 case QLOGIC_PCI_DEVICE_ID_1644:
247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
251 device_set_desc_copy(dev, qlnx_dev_str);
255 case QLOGIC_PCI_DEVICE_ID_1634:
256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
260 device_set_desc_copy(dev, qlnx_dev_str);
264 case QLOGIC_PCI_DEVICE_ID_1656:
265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
269 device_set_desc_copy(dev, qlnx_dev_str);
273 case QLOGIC_PCI_DEVICE_ID_1654:
274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
278 device_set_desc_copy(dev, qlnx_dev_str);
286 return (BUS_PROBE_DEFAULT);
291 qlnx_sp_intr(void *arg)
293 struct ecore_hwfn *p_hwfn;
299 if (p_hwfn == NULL) {
300 printf("%s: spurious slowpath intr\n", __func__);
304 ha = (qlnx_host_t *)p_hwfn->p_dev;
306 QL_DPRINT2(ha, "enter\n");
308 for (i = 0; i < ha->cdev.num_hwfns; i++) {
309 if (&ha->cdev.hwfns[i] == p_hwfn) {
310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
314 QL_DPRINT2(ha, "exit\n");
320 qlnx_sp_taskqueue(void *context, int pending)
322 struct ecore_hwfn *p_hwfn;
326 if (p_hwfn != NULL) {
333 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
338 for (i = 0; i < ha->cdev.num_hwfns; i++) {
340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
342 bzero(tq_name, sizeof (tq_name));
343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
350 if (ha->sp_taskqueue[i] == NULL)
353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
356 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
363 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
367 for (i = 0; i < ha->cdev.num_hwfns; i++) {
368 if (ha->sp_taskqueue[i] != NULL) {
369 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
370 taskqueue_free(ha->sp_taskqueue[i]);
377 qlnx_fp_taskqueue(void *context, int pending)
379 struct qlnx_fastpath *fp;
385 int rx_int = 0, total_rx_count = 0;
386 struct thread *cthread;
395 thread_lock(cthread);
397 if (!sched_is_bound(cthread))
398 sched_bind(cthread, fp->rss_id);
400 thread_unlock(cthread);
402 ha = (qlnx_host_t *)fp->edev;
406 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
408 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
411 fp->rx_pkts += rx_int;
412 total_rx_count += rx_int;
417 struct lro_ctrl *lro;
421 if (lro_enable && total_rx_count) {
423 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
425 if (ha->dbg_trace_lro_cnt) {
426 if (lro->lro_mbuf_count & ~1023)
428 else if (lro->lro_mbuf_count & ~511)
430 else if (lro->lro_mbuf_count & ~255)
432 else if (lro->lro_mbuf_count & ~127)
434 else if (lro->lro_mbuf_count & ~63)
437 tcp_lro_flush_all(lro);
440 struct lro_entry *queued;
442 while ((!SLIST_EMPTY(&lro->lro_active))) {
443 queued = SLIST_FIRST(&lro->lro_active);
444 SLIST_REMOVE_HEAD(&lro->lro_active, next);
445 tcp_lro_flush(lro, queued);
447 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
450 #endif /* #ifdef QLNX_SOFT_LRO */
452 ecore_sb_update_sb_idx(fp->sb_info);
455 mtx_lock(&fp->tx_mtx);
457 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
458 IFF_DRV_RUNNING) || (!ha->link_up)) {
460 mtx_unlock(&fp->tx_mtx);
461 goto qlnx_fp_taskqueue_exit;
464 for (tc = 0; tc < ha->num_tc; tc++) {
465 (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
468 mp = drbr_peek(ifp, fp->tx_br);
472 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
473 ret = qlnx_send(ha, fp, &mp);
481 drbr_putback(ifp, fp->tx_br, mp);
483 fp->tx_pkts_processed++;
484 drbr_advance(ifp, fp->tx_br);
487 mtx_unlock(&fp->tx_mtx);
489 goto qlnx_fp_taskqueue_exit;
492 drbr_advance(ifp, fp->tx_br);
493 fp->tx_pkts_transmitted++;
494 fp->tx_pkts_processed++;
497 if (fp->tx_ring_full)
500 mp = drbr_peek(ifp, fp->tx_br);
503 for (tc = 0; tc < ha->num_tc; tc++) {
504 (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
507 mtx_unlock(&fp->tx_mtx);
509 qlnx_fp_taskqueue_exit:
511 if (fp->fp_taskqueue != NULL)
512 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
514 if (fp->tx_ring_full) {
515 qlnx_mdelay(__func__, 100);
517 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
520 QL_DPRINT2(ha, "exit ret = %d\n", ret);
525 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
529 struct qlnx_fastpath *fp;
531 for (i = 0; i < ha->num_rss; i++) {
533 fp = &ha->fp_array[i];
535 bzero(tq_name, sizeof (tq_name));
536 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
538 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
540 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
541 taskqueue_thread_enqueue,
544 if (fp->fp_taskqueue == NULL)
547 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
550 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
557 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
560 struct qlnx_fastpath *fp;
562 for (i = 0; i < ha->num_rss; i++) {
564 fp = &ha->fp_array[i];
566 if (fp->fp_taskqueue != NULL) {
568 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
569 taskqueue_free(fp->fp_taskqueue);
570 fp->fp_taskqueue = NULL;
577 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
580 struct qlnx_fastpath *fp;
582 for (i = 0; i < ha->num_rss; i++) {
583 fp = &ha->fp_array[i];
585 if (fp->fp_taskqueue != NULL) {
587 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
595 * Name: qlnx_pci_attach
596 * Function: attaches the device to the operating system
599 qlnx_pci_attach(device_t dev)
601 qlnx_host_t *ha = NULL;
602 uint32_t rsrc_len_reg = 0;
603 uint32_t rsrc_len_dbells = 0;
604 uint32_t rsrc_len_msix = 0;
608 if ((ha = device_get_softc(dev)) == NULL) {
609 device_printf(dev, "cannot get softc\n");
613 memset(ha, 0, sizeof (qlnx_host_t));
615 if (qlnx_valid_device(dev) != 0) {
616 device_printf(dev, "device is not valid device\n");
619 ha->pci_func = pci_get_function(dev);
623 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
625 ha->flags.lock_init = 1;
627 pci_enable_busmaster(dev);
633 ha->reg_rid = PCIR_BAR(0);
634 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
637 if (ha->pci_reg == NULL) {
638 device_printf(dev, "unable to map BAR0\n");
639 goto qlnx_pci_attach_err;
642 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
645 ha->dbells_rid = PCIR_BAR(2);
646 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
647 &ha->dbells_rid, RF_ACTIVE);
649 if (ha->pci_dbells == NULL) {
650 device_printf(dev, "unable to map BAR1\n");
651 goto qlnx_pci_attach_err;
654 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
657 ha->dbells_phys_addr = (uint64_t)
658 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
659 ha->dbells_size = rsrc_len_dbells;
661 ha->msix_rid = PCIR_BAR(4);
662 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
663 &ha->msix_rid, RF_ACTIVE);
665 if (ha->msix_bar == NULL) {
666 device_printf(dev, "unable to map BAR2\n");
667 goto qlnx_pci_attach_err;
670 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
676 if (qlnx_alloc_parent_dma_tag(ha))
677 goto qlnx_pci_attach_err;
679 if (qlnx_alloc_tx_dma_tag(ha))
680 goto qlnx_pci_attach_err;
682 if (qlnx_alloc_rx_dma_tag(ha))
683 goto qlnx_pci_attach_err;
686 if (qlnx_init_hw(ha) != 0)
687 goto qlnx_pci_attach_err;
690 * Allocate MSI-x vectors
692 ha->num_rss = QLNX_MAX_RSS;
693 ha->num_tc = QLNX_MAX_TC;
695 ha->msix_count = pci_msix_count(dev);
697 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
698 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
700 if (!ha->msix_count ||
701 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
702 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
704 goto qlnx_pci_attach_err;
707 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
708 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
710 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
712 QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
713 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
714 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
715 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
716 ha->pci_reg, rsrc_len_reg,
717 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
718 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
719 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
720 if (pci_alloc_msix(dev, &ha->msix_count)) {
721 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
724 goto qlnx_pci_attach_err;
728 * Initialize slow path interrupt and task queue
730 if (qlnx_create_sp_taskqueues(ha) != 0)
731 goto qlnx_pci_attach_err;
733 for (i = 0; i < ha->cdev.num_hwfns; i++) {
735 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
737 ha->sp_irq_rid[i] = i + 1;
738 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
740 (RF_ACTIVE | RF_SHAREABLE));
741 if (ha->sp_irq[i] == NULL) {
743 "could not allocate mbx interrupt\n");
744 goto qlnx_pci_attach_err;
747 if (bus_setup_intr(dev, ha->sp_irq[i],
748 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
749 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
751 "could not setup slow path interrupt\n");
752 goto qlnx_pci_attach_err;
755 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
756 " sp_irq %p sp_handle %p\n", p_hwfn,
757 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
762 * initialize fast path interrupt
764 if (qlnx_create_fp_taskqueues(ha) != 0)
765 goto qlnx_pci_attach_err;
767 for (i = 0; i < ha->num_rss; i++) {
768 ha->irq_vec[i].rss_idx = i;
769 ha->irq_vec[i].ha = ha;
770 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
772 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
773 &ha->irq_vec[i].irq_rid,
774 (RF_ACTIVE | RF_SHAREABLE));
776 if (ha->irq_vec[i].irq == NULL) {
778 "could not allocate interrupt[%d]\n", i);
779 goto qlnx_pci_attach_err;
782 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
783 device_printf(dev, "could not allocate tx_br[%d]\n", i);
784 goto qlnx_pci_attach_err;
789 callout_init(&ha->qlnx_callout, 1);
790 ha->flags.callout_init = 1;
792 for (i = 0; i < ha->cdev.num_hwfns; i++) {
794 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
795 goto qlnx_pci_attach_err;
796 if (ha->grcdump_size[i] == 0)
797 goto qlnx_pci_attach_err;
799 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
800 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
801 i, ha->grcdump_size[i]);
803 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
804 if (ha->grcdump[i] == NULL) {
805 device_printf(dev, "grcdump alloc[%d] failed\n", i);
806 goto qlnx_pci_attach_err;
809 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
810 goto qlnx_pci_attach_err;
811 if (ha->idle_chk_size[i] == 0)
812 goto qlnx_pci_attach_err;
814 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
815 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
816 i, ha->idle_chk_size[i]);
818 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
820 if (ha->idle_chk[i] == NULL) {
821 device_printf(dev, "idle_chk alloc failed\n");
822 goto qlnx_pci_attach_err;
826 if (qlnx_slowpath_start(ha) != 0) {
828 qlnx_mdelay(__func__, 1000);
829 qlnx_trigger_dump(ha);
831 goto qlnx_pci_attach_err0;
833 ha->flags.slowpath_start = 1;
835 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
836 qlnx_mdelay(__func__, 1000);
837 qlnx_trigger_dump(ha);
839 goto qlnx_pci_attach_err0;
842 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
843 qlnx_mdelay(__func__, 1000);
844 qlnx_trigger_dump(ha);
846 goto qlnx_pci_attach_err0;
848 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
849 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
850 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
851 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
852 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
853 FW_ENGINEERING_VERSION);
855 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
856 ha->stormfw_ver, ha->mfw_ver);
858 qlnx_init_ifnet(dev, ha);
863 qlnx_add_sysctls(ha);
865 qlnx_pci_attach_err0:
867 * create ioctl device interface
869 if (qlnx_make_cdev(ha)) {
870 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
871 goto qlnx_pci_attach_err;
874 QL_DPRINT2(ha, "success\n");
886 * Name: qlnx_pci_detach
887 * Function: Unhooks the device from the operating system
890 qlnx_pci_detach(device_t dev)
892 qlnx_host_t *ha = NULL;
894 if ((ha = device_get_softc(dev)) == NULL) {
895 device_printf(dev, "cannot get softc\n");
909 qlnx_init_hw(qlnx_host_t *ha)
912 struct ecore_hw_prepare_params params;
914 ecore_init_struct(&ha->cdev);
916 /* ha->dp_module = ECORE_MSG_PROBE |
922 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
923 ha->dp_level = ECORE_LEVEL_NOTICE;
925 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
927 ha->cdev.regview = ha->pci_reg;
928 ha->cdev.doorbells = ha->pci_dbells;
929 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
930 ha->cdev.db_size = ha->dbells_size;
932 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
934 ha->personality = ECORE_PCI_DEFAULT;
936 params.personality = ha->personality;
938 params.drv_resc_alloc = false;
939 params.chk_reg_fifo = false;
940 params.initiate_pf_flr = true;
943 ecore_hw_prepare(&ha->cdev, ¶ms);
945 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
951 qlnx_release(qlnx_host_t *ha)
958 QL_DPRINT2(ha, "enter\n");
960 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
961 if (ha->idle_chk[i] != NULL) {
962 free(ha->idle_chk[i], M_QLNXBUF);
963 ha->idle_chk[i] = NULL;
966 if (ha->grcdump[i] != NULL) {
967 free(ha->grcdump[i], M_QLNXBUF);
968 ha->grcdump[i] = NULL;
972 if (ha->flags.callout_init)
973 callout_drain(&ha->qlnx_callout);
975 if (ha->flags.slowpath_start) {
976 qlnx_slowpath_stop(ha);
979 ecore_hw_remove(&ha->cdev);
984 ether_ifdetach(ha->ifp);
986 qlnx_free_tx_dma_tag(ha);
988 qlnx_free_rx_dma_tag(ha);
990 qlnx_free_parent_dma_tag(ha);
992 for (i = 0; i < ha->num_rss; i++) {
993 struct qlnx_fastpath *fp = &ha->fp_array[i];
995 if (ha->irq_vec[i].handle) {
996 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
997 ha->irq_vec[i].handle);
1000 if (ha->irq_vec[i].irq) {
1001 (void)bus_release_resource(dev, SYS_RES_IRQ,
1002 ha->irq_vec[i].irq_rid,
1003 ha->irq_vec[i].irq);
1006 qlnx_free_tx_br(ha, fp);
1008 qlnx_destroy_fp_taskqueues(ha);
1010 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1011 if (ha->sp_handle[i])
1012 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1016 (void) bus_release_resource(dev, SYS_RES_IRQ,
1017 ha->sp_irq_rid[i], ha->sp_irq[i]);
1020 qlnx_destroy_sp_taskqueues(ha);
1023 pci_release_msi(dev);
1025 if (ha->flags.lock_init) {
1026 mtx_destroy(&ha->hw_lock);
1030 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1034 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1038 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1041 QL_DPRINT2(ha, "exit\n");
1046 qlnx_trigger_dump(qlnx_host_t *ha)
1050 if (ha->ifp != NULL)
1051 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1053 QL_DPRINT2(ha, "enter\n");
1055 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1056 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1057 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1060 QL_DPRINT2(ha, "exit\n");
1066 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1071 err = sysctl_handle_int(oidp, &ret, 0, req);
1073 if (err || !req->newptr)
1077 ha = (qlnx_host_t *)arg1;
1078 qlnx_trigger_dump(ha);
1084 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1086 int err, i, ret = 0, usecs = 0;
1088 struct ecore_hwfn *p_hwfn;
1089 struct qlnx_fastpath *fp;
1091 err = sysctl_handle_int(oidp, &usecs, 0, req);
1093 if (err || !req->newptr || !usecs || (usecs > 255))
1096 ha = (qlnx_host_t *)arg1;
1098 for (i = 0; i < ha->num_rss; i++) {
1100 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1102 fp = &ha->fp_array[i];
1104 if (fp->txq[0]->handle != NULL) {
1105 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1106 (uint16_t)usecs, fp->txq[0]->handle);
1111 ha->tx_coalesce_usecs = (uint8_t)usecs;
1117 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1119 int err, i, ret = 0, usecs = 0;
1121 struct ecore_hwfn *p_hwfn;
1122 struct qlnx_fastpath *fp;
1124 err = sysctl_handle_int(oidp, &usecs, 0, req);
1126 if (err || !req->newptr || !usecs || (usecs > 255))
1129 ha = (qlnx_host_t *)arg1;
1131 for (i = 0; i < ha->num_rss; i++) {
1133 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1135 fp = &ha->fp_array[i];
1137 if (fp->rxq->handle != NULL) {
1138 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1139 0, fp->rxq->handle);
1144 ha->rx_coalesce_usecs = (uint8_t)usecs;
1150 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1152 struct sysctl_ctx_list *ctx;
1153 struct sysctl_oid_list *children;
1154 struct sysctl_oid *ctx_oid;
1156 ctx = device_get_sysctl_ctx(ha->pci_dev);
1157 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1159 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1160 CTLFLAG_RD, NULL, "spstat");
1161 children = SYSCTL_CHILDREN(ctx_oid);
1163 SYSCTL_ADD_QUAD(ctx, children,
1164 OID_AUTO, "sp_interrupts",
1165 CTLFLAG_RD, &ha->sp_interrupts,
1166 "No. of slowpath interrupts");
1172 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1174 struct sysctl_ctx_list *ctx;
1175 struct sysctl_oid_list *children;
1176 struct sysctl_oid_list *node_children;
1177 struct sysctl_oid *ctx_oid;
1179 uint8_t name_str[16];
1181 ctx = device_get_sysctl_ctx(ha->pci_dev);
1182 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1184 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1185 CTLFLAG_RD, NULL, "fpstat");
1186 children = SYSCTL_CHILDREN(ctx_oid);
1188 for (i = 0; i < ha->num_rss; i++) {
1190 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1191 snprintf(name_str, sizeof(name_str), "%d", i);
1193 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1194 CTLFLAG_RD, NULL, name_str);
1195 node_children = SYSCTL_CHILDREN(ctx_oid);
1199 SYSCTL_ADD_QUAD(ctx, node_children,
1200 OID_AUTO, "tx_pkts_processed",
1201 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1202 "No. of packets processed for transmission");
1204 SYSCTL_ADD_QUAD(ctx, node_children,
1205 OID_AUTO, "tx_pkts_freed",
1206 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1207 "No. of freed packets");
1209 SYSCTL_ADD_QUAD(ctx, node_children,
1210 OID_AUTO, "tx_pkts_transmitted",
1211 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1212 "No. of transmitted packets");
1214 SYSCTL_ADD_QUAD(ctx, node_children,
1215 OID_AUTO, "tx_pkts_completed",
1216 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1217 "No. of transmit completions");
1219 SYSCTL_ADD_QUAD(ctx, node_children,
1220 OID_AUTO, "tx_lso_wnd_min_len",
1221 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1222 "tx_lso_wnd_min_len");
1224 SYSCTL_ADD_QUAD(ctx, node_children,
1225 OID_AUTO, "tx_defrag",
1226 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1229 SYSCTL_ADD_QUAD(ctx, node_children,
1230 OID_AUTO, "tx_nsegs_gt_elem_left",
1231 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1232 "tx_nsegs_gt_elem_left");
1234 SYSCTL_ADD_UINT(ctx, node_children,
1235 OID_AUTO, "tx_tso_max_nsegs",
1236 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1237 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1239 SYSCTL_ADD_UINT(ctx, node_children,
1240 OID_AUTO, "tx_tso_min_nsegs",
1241 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1242 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1244 SYSCTL_ADD_UINT(ctx, node_children,
1245 OID_AUTO, "tx_tso_max_pkt_len",
1246 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1247 ha->fp_array[i].tx_tso_max_pkt_len,
1248 "tx_tso_max_pkt_len");
1250 SYSCTL_ADD_UINT(ctx, node_children,
1251 OID_AUTO, "tx_tso_min_pkt_len",
1252 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1253 ha->fp_array[i].tx_tso_min_pkt_len,
1254 "tx_tso_min_pkt_len");
1256 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1258 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1259 snprintf(name_str, sizeof(name_str),
1260 "tx_pkts_nseg_%02d", (j+1));
1262 SYSCTL_ADD_QUAD(ctx, node_children,
1263 OID_AUTO, name_str, CTLFLAG_RD,
1264 &ha->fp_array[i].tx_pkts[j], name_str);
1267 SYSCTL_ADD_QUAD(ctx, node_children,
1268 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1269 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1270 "err_tx_nsegs_gt_elem_left");
1272 SYSCTL_ADD_QUAD(ctx, node_children,
1273 OID_AUTO, "err_tx_dmamap_create",
1274 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1275 "err_tx_dmamap_create");
1277 SYSCTL_ADD_QUAD(ctx, node_children,
1278 OID_AUTO, "err_tx_defrag_dmamap_load",
1279 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1280 "err_tx_defrag_dmamap_load");
1282 SYSCTL_ADD_QUAD(ctx, node_children,
1283 OID_AUTO, "err_tx_non_tso_max_seg",
1284 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1285 "err_tx_non_tso_max_seg");
1287 SYSCTL_ADD_QUAD(ctx, node_children,
1288 OID_AUTO, "err_tx_dmamap_load",
1289 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1290 "err_tx_dmamap_load");
1292 SYSCTL_ADD_QUAD(ctx, node_children,
1293 OID_AUTO, "err_tx_defrag",
1294 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1297 SYSCTL_ADD_QUAD(ctx, node_children,
1298 OID_AUTO, "err_tx_free_pkt_null",
1299 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1300 "err_tx_free_pkt_null");
1302 SYSCTL_ADD_QUAD(ctx, node_children,
1303 OID_AUTO, "err_tx_cons_idx_conflict",
1304 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1305 "err_tx_cons_idx_conflict");
1307 SYSCTL_ADD_QUAD(ctx, node_children,
1308 OID_AUTO, "lro_cnt_64",
1309 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1312 SYSCTL_ADD_QUAD(ctx, node_children,
1313 OID_AUTO, "lro_cnt_128",
1314 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1317 SYSCTL_ADD_QUAD(ctx, node_children,
1318 OID_AUTO, "lro_cnt_256",
1319 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1322 SYSCTL_ADD_QUAD(ctx, node_children,
1323 OID_AUTO, "lro_cnt_512",
1324 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1327 SYSCTL_ADD_QUAD(ctx, node_children,
1328 OID_AUTO, "lro_cnt_1024",
1329 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1334 SYSCTL_ADD_QUAD(ctx, node_children,
1335 OID_AUTO, "rx_pkts",
1336 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1337 "No. of received packets");
1339 SYSCTL_ADD_QUAD(ctx, node_children,
1340 OID_AUTO, "tpa_start",
1341 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1342 "No. of tpa_start packets");
1344 SYSCTL_ADD_QUAD(ctx, node_children,
1345 OID_AUTO, "tpa_cont",
1346 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1347 "No. of tpa_cont packets");
1349 SYSCTL_ADD_QUAD(ctx, node_children,
1350 OID_AUTO, "tpa_end",
1351 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1352 "No. of tpa_end packets");
1354 SYSCTL_ADD_QUAD(ctx, node_children,
1355 OID_AUTO, "err_m_getcl",
1356 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1359 SYSCTL_ADD_QUAD(ctx, node_children,
1360 OID_AUTO, "err_m_getjcl",
1361 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1364 SYSCTL_ADD_QUAD(ctx, node_children,
1365 OID_AUTO, "err_rx_hw_errors",
1366 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1367 "err_rx_hw_errors");
1369 SYSCTL_ADD_QUAD(ctx, node_children,
1370 OID_AUTO, "err_rx_alloc_errors",
1371 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1372 "err_rx_alloc_errors");
1379 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1381 struct sysctl_ctx_list *ctx;
1382 struct sysctl_oid_list *children;
1383 struct sysctl_oid *ctx_oid;
1385 ctx = device_get_sysctl_ctx(ha->pci_dev);
1386 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1388 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1389 CTLFLAG_RD, NULL, "hwstat");
1390 children = SYSCTL_CHILDREN(ctx_oid);
1392 SYSCTL_ADD_QUAD(ctx, children,
1393 OID_AUTO, "no_buff_discards",
1394 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1395 "No. of packets discarded due to lack of buffer");
1397 SYSCTL_ADD_QUAD(ctx, children,
1398 OID_AUTO, "packet_too_big_discard",
1399 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1400 "No. of packets discarded because packet was too big");
1402 SYSCTL_ADD_QUAD(ctx, children,
1403 OID_AUTO, "ttl0_discard",
1404 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1407 SYSCTL_ADD_QUAD(ctx, children,
1408 OID_AUTO, "rx_ucast_bytes",
1409 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1412 SYSCTL_ADD_QUAD(ctx, children,
1413 OID_AUTO, "rx_mcast_bytes",
1414 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1417 SYSCTL_ADD_QUAD(ctx, children,
1418 OID_AUTO, "rx_bcast_bytes",
1419 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1422 SYSCTL_ADD_QUAD(ctx, children,
1423 OID_AUTO, "rx_ucast_pkts",
1424 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1427 SYSCTL_ADD_QUAD(ctx, children,
1428 OID_AUTO, "rx_mcast_pkts",
1429 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1432 SYSCTL_ADD_QUAD(ctx, children,
1433 OID_AUTO, "rx_bcast_pkts",
1434 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1437 SYSCTL_ADD_QUAD(ctx, children,
1438 OID_AUTO, "mftag_filter_discards",
1439 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1440 "mftag_filter_discards");
1442 SYSCTL_ADD_QUAD(ctx, children,
1443 OID_AUTO, "mac_filter_discards",
1444 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1445 "mac_filter_discards");
1447 SYSCTL_ADD_QUAD(ctx, children,
1448 OID_AUTO, "tx_ucast_bytes",
1449 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1452 SYSCTL_ADD_QUAD(ctx, children,
1453 OID_AUTO, "tx_mcast_bytes",
1454 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1457 SYSCTL_ADD_QUAD(ctx, children,
1458 OID_AUTO, "tx_bcast_bytes",
1459 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1462 SYSCTL_ADD_QUAD(ctx, children,
1463 OID_AUTO, "tx_ucast_pkts",
1464 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1467 SYSCTL_ADD_QUAD(ctx, children,
1468 OID_AUTO, "tx_mcast_pkts",
1469 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1472 SYSCTL_ADD_QUAD(ctx, children,
1473 OID_AUTO, "tx_bcast_pkts",
1474 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1477 SYSCTL_ADD_QUAD(ctx, children,
1478 OID_AUTO, "tx_err_drop_pkts",
1479 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1480 "tx_err_drop_pkts");
1482 SYSCTL_ADD_QUAD(ctx, children,
1483 OID_AUTO, "tpa_coalesced_pkts",
1484 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1485 "tpa_coalesced_pkts");
1487 SYSCTL_ADD_QUAD(ctx, children,
1488 OID_AUTO, "tpa_coalesced_events",
1489 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1490 "tpa_coalesced_events");
1492 SYSCTL_ADD_QUAD(ctx, children,
1493 OID_AUTO, "tpa_aborts_num",
1494 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1497 SYSCTL_ADD_QUAD(ctx, children,
1498 OID_AUTO, "tpa_not_coalesced_pkts",
1499 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1500 "tpa_not_coalesced_pkts");
1502 SYSCTL_ADD_QUAD(ctx, children,
1503 OID_AUTO, "tpa_coalesced_bytes",
1504 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1505 "tpa_coalesced_bytes");
1507 SYSCTL_ADD_QUAD(ctx, children,
1508 OID_AUTO, "rx_64_byte_packets",
1509 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1510 "rx_64_byte_packets");
1512 SYSCTL_ADD_QUAD(ctx, children,
1513 OID_AUTO, "rx_65_to_127_byte_packets",
1514 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1515 "rx_65_to_127_byte_packets");
1517 SYSCTL_ADD_QUAD(ctx, children,
1518 OID_AUTO, "rx_128_to_255_byte_packets",
1519 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1520 "rx_128_to_255_byte_packets");
1522 SYSCTL_ADD_QUAD(ctx, children,
1523 OID_AUTO, "rx_256_to_511_byte_packets",
1524 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1525 "rx_256_to_511_byte_packets");
1527 SYSCTL_ADD_QUAD(ctx, children,
1528 OID_AUTO, "rx_512_to_1023_byte_packets",
1529 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1530 "rx_512_to_1023_byte_packets");
1532 SYSCTL_ADD_QUAD(ctx, children,
1533 OID_AUTO, "rx_1024_to_1518_byte_packets",
1534 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1535 "rx_1024_to_1518_byte_packets");
1537 SYSCTL_ADD_QUAD(ctx, children,
1538 OID_AUTO, "rx_1519_to_1522_byte_packets",
1539 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1540 "rx_1519_to_1522_byte_packets");
1542 SYSCTL_ADD_QUAD(ctx, children,
1543 OID_AUTO, "rx_1523_to_2047_byte_packets",
1544 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1545 "rx_1523_to_2047_byte_packets");
1547 SYSCTL_ADD_QUAD(ctx, children,
1548 OID_AUTO, "rx_2048_to_4095_byte_packets",
1549 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1550 "rx_2048_to_4095_byte_packets");
1552 SYSCTL_ADD_QUAD(ctx, children,
1553 OID_AUTO, "rx_4096_to_9216_byte_packets",
1554 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1555 "rx_4096_to_9216_byte_packets");
1557 SYSCTL_ADD_QUAD(ctx, children,
1558 OID_AUTO, "rx_9217_to_16383_byte_packets",
1559 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1560 "rx_9217_to_16383_byte_packets");
1562 SYSCTL_ADD_QUAD(ctx, children,
1563 OID_AUTO, "rx_crc_errors",
1564 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1567 SYSCTL_ADD_QUAD(ctx, children,
1568 OID_AUTO, "rx_mac_crtl_frames",
1569 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1570 "rx_mac_crtl_frames");
1572 SYSCTL_ADD_QUAD(ctx, children,
1573 OID_AUTO, "rx_pause_frames",
1574 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1577 SYSCTL_ADD_QUAD(ctx, children,
1578 OID_AUTO, "rx_pfc_frames",
1579 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1582 SYSCTL_ADD_QUAD(ctx, children,
1583 OID_AUTO, "rx_align_errors",
1584 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1587 SYSCTL_ADD_QUAD(ctx, children,
1588 OID_AUTO, "rx_carrier_errors",
1589 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1590 "rx_carrier_errors");
1592 SYSCTL_ADD_QUAD(ctx, children,
1593 OID_AUTO, "rx_oversize_packets",
1594 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1595 "rx_oversize_packets");
1597 SYSCTL_ADD_QUAD(ctx, children,
1598 OID_AUTO, "rx_jabbers",
1599 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1602 SYSCTL_ADD_QUAD(ctx, children,
1603 OID_AUTO, "rx_undersize_packets",
1604 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1605 "rx_undersize_packets");
1607 SYSCTL_ADD_QUAD(ctx, children,
1608 OID_AUTO, "rx_fragments",
1609 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1612 SYSCTL_ADD_QUAD(ctx, children,
1613 OID_AUTO, "tx_64_byte_packets",
1614 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1615 "tx_64_byte_packets");
1617 SYSCTL_ADD_QUAD(ctx, children,
1618 OID_AUTO, "tx_65_to_127_byte_packets",
1619 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1620 "tx_65_to_127_byte_packets");
1622 SYSCTL_ADD_QUAD(ctx, children,
1623 OID_AUTO, "tx_128_to_255_byte_packets",
1624 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1625 "tx_128_to_255_byte_packets");
1627 SYSCTL_ADD_QUAD(ctx, children,
1628 OID_AUTO, "tx_256_to_511_byte_packets",
1629 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1630 "tx_256_to_511_byte_packets");
1632 SYSCTL_ADD_QUAD(ctx, children,
1633 OID_AUTO, "tx_512_to_1023_byte_packets",
1634 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1635 "tx_512_to_1023_byte_packets");
1637 SYSCTL_ADD_QUAD(ctx, children,
1638 OID_AUTO, "tx_1024_to_1518_byte_packets",
1639 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1640 "tx_1024_to_1518_byte_packets");
1642 SYSCTL_ADD_QUAD(ctx, children,
1643 OID_AUTO, "tx_1519_to_2047_byte_packets",
1644 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1645 "tx_1519_to_2047_byte_packets");
1647 SYSCTL_ADD_QUAD(ctx, children,
1648 OID_AUTO, "tx_2048_to_4095_byte_packets",
1649 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1650 "tx_2048_to_4095_byte_packets");
1652 SYSCTL_ADD_QUAD(ctx, children,
1653 OID_AUTO, "tx_4096_to_9216_byte_packets",
1654 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1655 "tx_4096_to_9216_byte_packets");
1657 SYSCTL_ADD_QUAD(ctx, children,
1658 OID_AUTO, "tx_9217_to_16383_byte_packets",
1659 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1660 "tx_9217_to_16383_byte_packets");
1662 SYSCTL_ADD_QUAD(ctx, children,
1663 OID_AUTO, "tx_pause_frames",
1664 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1667 SYSCTL_ADD_QUAD(ctx, children,
1668 OID_AUTO, "tx_pfc_frames",
1669 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1672 SYSCTL_ADD_QUAD(ctx, children,
1673 OID_AUTO, "tx_lpi_entry_count",
1674 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1675 "tx_lpi_entry_count");
1677 SYSCTL_ADD_QUAD(ctx, children,
1678 OID_AUTO, "tx_total_collisions",
1679 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1680 "tx_total_collisions");
1682 SYSCTL_ADD_QUAD(ctx, children,
1683 OID_AUTO, "brb_truncates",
1684 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1687 SYSCTL_ADD_QUAD(ctx, children,
1688 OID_AUTO, "brb_discards",
1689 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1692 SYSCTL_ADD_QUAD(ctx, children,
1693 OID_AUTO, "rx_mac_bytes",
1694 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1697 SYSCTL_ADD_QUAD(ctx, children,
1698 OID_AUTO, "rx_mac_uc_packets",
1699 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1700 "rx_mac_uc_packets");
1702 SYSCTL_ADD_QUAD(ctx, children,
1703 OID_AUTO, "rx_mac_mc_packets",
1704 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1705 "rx_mac_mc_packets");
1707 SYSCTL_ADD_QUAD(ctx, children,
1708 OID_AUTO, "rx_mac_bc_packets",
1709 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1710 "rx_mac_bc_packets");
1712 SYSCTL_ADD_QUAD(ctx, children,
1713 OID_AUTO, "rx_mac_frames_ok",
1714 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1715 "rx_mac_frames_ok");
1717 SYSCTL_ADD_QUAD(ctx, children,
1718 OID_AUTO, "tx_mac_bytes",
1719 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1722 SYSCTL_ADD_QUAD(ctx, children,
1723 OID_AUTO, "tx_mac_uc_packets",
1724 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1725 "tx_mac_uc_packets");
1727 SYSCTL_ADD_QUAD(ctx, children,
1728 OID_AUTO, "tx_mac_mc_packets",
1729 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1730 "tx_mac_mc_packets");
1732 SYSCTL_ADD_QUAD(ctx, children,
1733 OID_AUTO, "tx_mac_bc_packets",
1734 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1735 "tx_mac_bc_packets");
1737 SYSCTL_ADD_QUAD(ctx, children,
1738 OID_AUTO, "tx_mac_ctrl_frames",
1739 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1740 "tx_mac_ctrl_frames");
1745 qlnx_add_sysctls(qlnx_host_t *ha)
1747 device_t dev = ha->pci_dev;
1748 struct sysctl_ctx_list *ctx;
1749 struct sysctl_oid_list *children;
1751 ctx = device_get_sysctl_ctx(dev);
1752 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1754 qlnx_add_fp_stats_sysctls(ha);
1755 qlnx_add_sp_stats_sysctls(ha);
1756 qlnx_add_hw_stats_sysctls(ha);
1758 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1759 CTLFLAG_RD, qlnx_ver_str, 0,
1762 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1763 CTLFLAG_RD, ha->stormfw_ver, 0,
1764 "STORM Firmware Version");
1766 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1767 CTLFLAG_RD, ha->mfw_ver, 0,
1768 "Management Firmware Version");
1770 SYSCTL_ADD_UINT(ctx, children,
1771 OID_AUTO, "personality", CTLFLAG_RD,
1772 &ha->personality, ha->personality,
1773 "\tpersonality = 0 => Ethernet Only\n"
1774 "\tpersonality = 3 => Ethernet and RoCE\n"
1775 "\tpersonality = 4 => Ethernet and iWARP\n"
1776 "\tpersonality = 6 => Default in Shared Memory\n");
1779 SYSCTL_ADD_UINT(ctx, children,
1780 OID_AUTO, "debug", CTLFLAG_RW,
1781 &ha->dbg_level, ha->dbg_level, "Debug Level");
1783 ha->dp_level = 0x01;
1784 SYSCTL_ADD_UINT(ctx, children,
1785 OID_AUTO, "dp_level", CTLFLAG_RW,
1786 &ha->dp_level, ha->dp_level, "DP Level");
1788 ha->dbg_trace_lro_cnt = 0;
1789 SYSCTL_ADD_UINT(ctx, children,
1790 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1791 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1792 "Trace LRO Counts");
1794 ha->dbg_trace_tso_pkt_len = 0;
1795 SYSCTL_ADD_UINT(ctx, children,
1796 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1797 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1798 "Trace TSO packet lengths");
1801 SYSCTL_ADD_UINT(ctx, children,
1802 OID_AUTO, "dp_module", CTLFLAG_RW,
1803 &ha->dp_module, ha->dp_module, "DP Module");
1807 SYSCTL_ADD_UINT(ctx, children,
1808 OID_AUTO, "err_inject", CTLFLAG_RW,
1809 &ha->err_inject, ha->err_inject, "Error Inject");
1811 ha->storm_stats_enable = 0;
1813 SYSCTL_ADD_UINT(ctx, children,
1814 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1815 &ha->storm_stats_enable, ha->storm_stats_enable,
1816 "Enable Storm Statistics Gathering");
1818 ha->storm_stats_index = 0;
1820 SYSCTL_ADD_UINT(ctx, children,
1821 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1822 &ha->storm_stats_index, ha->storm_stats_index,
1823 "Enable Storm Statistics Gathering Current Index");
1825 ha->grcdump_taken = 0;
1826 SYSCTL_ADD_UINT(ctx, children,
1827 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1828 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1830 ha->idle_chk_taken = 0;
1831 SYSCTL_ADD_UINT(ctx, children,
1832 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1833 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1835 SYSCTL_ADD_UINT(ctx, children,
1836 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1837 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1838 "rx_coalesce_usecs");
1840 SYSCTL_ADD_UINT(ctx, children,
1841 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1842 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1843 "tx_coalesce_usecs");
1845 ha->rx_pkt_threshold = 128;
1846 SYSCTL_ADD_UINT(ctx, children,
1847 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1848 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1849 "No. of Rx Pkts to process at a time");
1851 ha->rx_jumbo_buf_eq_mtu = 0;
1852 SYSCTL_ADD_UINT(ctx, children,
1853 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1854 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1855 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1856 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
1858 SYSCTL_ADD_PROC(ctx, children,
1859 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1861 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1863 SYSCTL_ADD_PROC(ctx, children,
1864 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1866 qlnx_set_rx_coalesce, "I",
1867 "rx interrupt coalesce period microseconds");
1869 SYSCTL_ADD_PROC(ctx, children,
1870 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1872 qlnx_set_tx_coalesce, "I",
1873 "tx interrupt coalesce period microseconds");
1875 SYSCTL_ADD_QUAD(ctx, children,
1876 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1877 &ha->err_illegal_intr, "err_illegal_intr");
1879 SYSCTL_ADD_QUAD(ctx, children,
1880 OID_AUTO, "err_fp_null", CTLFLAG_RD,
1881 &ha->err_fp_null, "err_fp_null");
1883 SYSCTL_ADD_QUAD(ctx, children,
1884 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1885 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
1891 /*****************************************************************************
1892 * Operating System Network Interface Functions
1893 *****************************************************************************/
1896 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
1901 ifp = ha->ifp = if_alloc(IFT_ETHER);
1904 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
1906 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1908 device_id = pci_get_device(ha->pci_dev);
1910 #if __FreeBSD_version >= 1000000
1912 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
1913 ifp->if_baudrate = IF_Gbps(40);
1914 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656)
1915 ifp->if_baudrate = IF_Gbps(25);
1916 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
1917 ifp->if_baudrate = IF_Gbps(50);
1918 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
1919 ifp->if_baudrate = IF_Gbps(100);
1921 ifp->if_capabilities = IFCAP_LINKSTATE;
1923 ifp->if_mtu = ETHERMTU;
1924 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
1926 #endif /* #if __FreeBSD_version >= 1000000 */
1928 ifp->if_init = qlnx_init;
1930 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1931 ifp->if_ioctl = qlnx_ioctl;
1932 ifp->if_transmit = qlnx_transmit;
1933 ifp->if_qflush = qlnx_qflush;
1935 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
1936 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
1937 IFQ_SET_READY(&ifp->if_snd);
1939 #if __FreeBSD_version >= 1100036
1940 if_setgetcounterfn(ifp, qlnx_get_counter);
1943 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1945 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
1946 ether_ifattach(ifp, ha->primary_mac);
1947 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
1949 ifp->if_capabilities = IFCAP_HWCSUM;
1950 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1952 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1953 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1954 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1955 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1956 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1957 ifp->if_capabilities |= IFCAP_TSO4;
1958 ifp->if_capabilities |= IFCAP_TSO6;
1959 ifp->if_capabilities |= IFCAP_LRO;
1961 ifp->if_capenable = ifp->if_capabilities;
1963 ifp->if_hwassist = CSUM_IP;
1964 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
1965 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
1966 ifp->if_hwassist |= CSUM_TSO;
1968 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1970 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
1973 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
1974 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
1975 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
1976 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
1977 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) {
1978 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
1979 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
1980 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
1981 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
1982 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
1983 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
1984 ifmedia_add(&ha->media,
1985 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
1986 ifmedia_add(&ha->media,
1987 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
1988 ifmedia_add(&ha->media,
1989 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
1992 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
1993 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
1996 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
1998 QL_DPRINT2(ha, "exit\n");
2004 qlnx_init_locked(qlnx_host_t *ha)
2006 struct ifnet *ifp = ha->ifp;
2008 QL_DPRINT1(ha, "Driver Initialization start \n");
2012 if (qlnx_load(ha) == 0) {
2013 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2014 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2021 qlnx_init(void *arg)
2025 ha = (qlnx_host_t *)arg;
2027 QL_DPRINT2(ha, "enter\n");
2030 qlnx_init_locked(ha);
2033 QL_DPRINT2(ha, "exit\n");
2039 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2041 struct ecore_filter_mcast *mcast;
2042 struct ecore_dev *cdev;
2047 mcast = &ha->ecore_mcast;
2048 bzero(mcast, sizeof(struct ecore_filter_mcast));
2051 mcast->opcode = ECORE_FILTER_ADD;
2053 mcast->opcode = ECORE_FILTER_REMOVE;
2055 mcast->num_mc_addrs = 1;
2056 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2058 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2064 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2068 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2070 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2071 return 0; /* its been already added */
2074 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2076 if ((ha->mcast[i].addr[0] == 0) &&
2077 (ha->mcast[i].addr[1] == 0) &&
2078 (ha->mcast[i].addr[2] == 0) &&
2079 (ha->mcast[i].addr[3] == 0) &&
2080 (ha->mcast[i].addr[4] == 0) &&
2081 (ha->mcast[i].addr[5] == 0)) {
2083 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2086 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2096 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2100 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2101 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2103 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2106 ha->mcast[i].addr[0] = 0;
2107 ha->mcast[i].addr[1] = 0;
2108 ha->mcast[i].addr[2] = 0;
2109 ha->mcast[i].addr[3] = 0;
2110 ha->mcast[i].addr[4] = 0;
2111 ha->mcast[i].addr[5] = 0;
2122 * Name: qls_hw_set_multi
2123 * Function: Sets the Multicast Addresses provided the host O.S into the
2124 * hardware (for the given interface)
2127 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2132 for (i = 0; i < mcnt; i++) {
2134 if (qlnx_hw_add_mcast(ha, mta))
2137 if (qlnx_hw_del_mcast(ha, mta))
2141 mta += ETHER_HDR_LEN;
2147 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2149 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2151 uint8_t mta[QLNX_MCAST_ADDRS_SIZE];
2152 struct ifmultiaddr *ifma;
2154 struct ifnet *ifp = ha->ifp;
2157 if_maddr_rlock(ifp);
2159 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2161 if (ifma->ifma_addr->sa_family != AF_LINK)
2164 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2167 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2168 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2173 if_maddr_runlock(ifp);
2176 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2183 qlnx_set_promisc(qlnx_host_t *ha)
2188 filter = ha->filter;
2189 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2190 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2192 rc = qlnx_set_rx_accept_filter(ha, filter);
2197 qlnx_set_allmulti(qlnx_host_t *ha)
2202 filter = ha->filter;
2203 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2204 rc = qlnx_set_rx_accept_filter(ha, filter);
2211 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2214 struct ifreq *ifr = (struct ifreq *)data;
2215 struct ifaddr *ifa = (struct ifaddr *)data;
2218 ha = (qlnx_host_t *)ifp->if_softc;
2222 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2224 if (ifa->ifa_addr->sa_family == AF_INET) {
2225 ifp->if_flags |= IFF_UP;
2226 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2228 qlnx_init_locked(ha);
2231 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2232 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2234 arp_ifinit(ifp, ifa);
2236 ether_ioctl(ifp, cmd, data);
2241 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2243 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2247 ifp->if_mtu = ifr->ifr_mtu;
2248 ha->max_frame_size =
2249 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2250 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2251 qlnx_init_locked(ha);
2260 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2264 if (ifp->if_flags & IFF_UP) {
2265 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2266 if ((ifp->if_flags ^ ha->if_flags) &
2268 ret = qlnx_set_promisc(ha);
2269 } else if ((ifp->if_flags ^ ha->if_flags) &
2271 ret = qlnx_set_allmulti(ha);
2274 ha->max_frame_size = ifp->if_mtu +
2275 ETHER_HDR_LEN + ETHER_CRC_LEN;
2276 qlnx_init_locked(ha);
2279 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2281 ha->if_flags = ifp->if_flags;
2288 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2291 if (qlnx_set_multi(ha, 1))
2297 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2300 if (qlnx_set_multi(ha, 0))
2307 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2309 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2314 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2316 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2318 if (mask & IFCAP_HWCSUM)
2319 ifp->if_capenable ^= IFCAP_HWCSUM;
2320 if (mask & IFCAP_TSO4)
2321 ifp->if_capenable ^= IFCAP_TSO4;
2322 if (mask & IFCAP_TSO6)
2323 ifp->if_capenable ^= IFCAP_TSO6;
2324 if (mask & IFCAP_VLAN_HWTAGGING)
2325 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2326 if (mask & IFCAP_VLAN_HWTSO)
2327 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2328 if (mask & IFCAP_LRO)
2329 ifp->if_capenable ^= IFCAP_LRO;
2331 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2334 VLAN_CAPABILITIES(ifp);
2337 #if (__FreeBSD_version >= 1100101)
2341 struct ifi2creq i2c;
2342 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2343 struct ecore_ptt *p_ptt;
2345 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2350 if ((i2c.len > sizeof (i2c.data)) ||
2351 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2356 p_ptt = ecore_ptt_acquire(p_hwfn);
2359 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2364 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2365 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2366 i2c.len, &i2c.data[0]);
2368 ecore_ptt_release(p_hwfn, p_ptt);
2375 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2377 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2378 len = %d addr = 0x%02x offset = 0x%04x \
2379 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2380 0x%02x 0x%02x 0x%02x\n",
2381 ret, i2c.len, i2c.dev_addr, i2c.offset,
2382 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2383 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2386 #endif /* #if (__FreeBSD_version >= 1100101) */
2389 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2390 ret = ether_ioctl(ifp, cmd, data);
2398 qlnx_media_change(struct ifnet *ifp)
2401 struct ifmedia *ifm;
2404 ha = (qlnx_host_t *)ifp->if_softc;
2406 QL_DPRINT2(ha, "enter\n");
2410 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2413 QL_DPRINT2(ha, "exit\n");
2419 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2423 ha = (qlnx_host_t *)ifp->if_softc;
2425 QL_DPRINT2(ha, "enter\n");
2427 ifmr->ifm_status = IFM_AVALID;
2428 ifmr->ifm_active = IFM_ETHER;
2431 ifmr->ifm_status |= IFM_ACTIVE;
2433 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2435 if (ha->if_link.link_partner_caps &
2436 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2438 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2441 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2448 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2449 struct qlnx_tx_queue *txq)
2455 struct eth_tx_bd *tx_data_bd;
2456 struct eth_tx_1st_bd *first_bd;
2459 idx = txq->sw_tx_cons;
2460 mp = txq->sw_tx_ring[idx].mp;
2461 map = txq->sw_tx_ring[idx].map;
2463 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2465 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2467 QL_DPRINT1(ha, "(mp == NULL) "
2469 " ecore_prod_idx = 0x%x"
2470 " ecore_cons_idx = 0x%x"
2471 " hw_bd_cons = 0x%x"
2472 " txq_db_last = 0x%x"
2473 " elem_left = 0x%x\n",
2475 ecore_chain_get_prod_idx(&txq->tx_pbl),
2476 ecore_chain_get_cons_idx(&txq->tx_pbl),
2477 le16toh(*txq->hw_cons_ptr),
2479 ecore_chain_get_elem_left(&txq->tx_pbl));
2481 fp->err_tx_free_pkt_null++;
2484 qlnx_trigger_dump(ha);
2489 QLNX_INC_OPACKETS((ha->ifp));
2490 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2492 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2493 bus_dmamap_unload(ha->tx_tag, map);
2495 fp->tx_pkts_freed++;
2496 fp->tx_pkts_completed++;
2501 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2502 nbds = first_bd->data.nbds;
2504 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2506 for (i = 1; i < nbds; i++) {
2507 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2508 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2510 txq->sw_tx_ring[idx].flags = 0;
2511 txq->sw_tx_ring[idx].mp = NULL;
2512 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2518 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2519 struct qlnx_tx_queue *txq)
2525 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2527 while (hw_bd_cons !=
2528 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2530 if (hw_bd_cons < ecore_cons_idx) {
2531 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2533 diff = hw_bd_cons - ecore_cons_idx;
2535 if ((diff > TX_RING_SIZE) ||
2536 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2538 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2540 QL_DPRINT1(ha, "(diff = 0x%x) "
2542 " ecore_prod_idx = 0x%x"
2543 " ecore_cons_idx = 0x%x"
2544 " hw_bd_cons = 0x%x"
2545 " txq_db_last = 0x%x"
2546 " elem_left = 0x%x\n",
2549 ecore_chain_get_prod_idx(&txq->tx_pbl),
2550 ecore_chain_get_cons_idx(&txq->tx_pbl),
2551 le16toh(*txq->hw_cons_ptr),
2553 ecore_chain_get_elem_left(&txq->tx_pbl));
2555 fp->err_tx_cons_idx_conflict++;
2558 qlnx_trigger_dump(ha);
2561 qlnx_free_tx_pkt(ha, fp, txq);
2563 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2569 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
2571 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
2572 struct qlnx_fastpath *fp;
2573 int rss_id = 0, ret = 0;
2575 QL_DPRINT2(ha, "enter\n");
2577 #if __FreeBSD_version >= 1100000
2578 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2580 if (mp->m_flags & M_FLOWID)
2582 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2585 fp = &ha->fp_array[rss_id];
2587 if (fp->tx_br == NULL) {
2589 goto qlnx_transmit_exit;
2593 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2596 if (fp->fp_taskqueue != NULL)
2597 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2603 QL_DPRINT2(ha, "exit ret = %d\n", ret);
2608 qlnx_qflush(struct ifnet *ifp)
2611 struct qlnx_fastpath *fp;
2615 ha = (qlnx_host_t *)ifp->if_softc;
2617 QL_DPRINT2(ha, "enter\n");
2619 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2621 fp = &ha->fp_array[rss_id];
2627 mtx_lock(&fp->tx_mtx);
2629 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2630 fp->tx_pkts_freed++;
2633 mtx_unlock(&fp->tx_mtx);
2636 QL_DPRINT2(ha, "exit\n");
2642 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2644 struct ecore_dev *cdev;
2649 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2651 bus_write_4(ha->pci_dbells, offset, value);
2652 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
2653 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
2659 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2661 struct ether_vlan_header *eh = NULL;
2662 struct ip *ip = NULL;
2663 struct ip6_hdr *ip6 = NULL;
2664 struct tcphdr *th = NULL;
2665 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
2668 uint8_t buf[sizeof(struct ip6_hdr)];
2672 eh = mtod(mp, struct ether_vlan_header *);
2674 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2675 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2676 etype = ntohs(eh->evl_proto);
2678 ehdrlen = ETHER_HDR_LEN;
2679 etype = ntohs(eh->evl_encap_proto);
2685 ip = (struct ip *)(mp->m_data + ehdrlen);
2687 ip_hlen = sizeof (struct ip);
2689 if (mp->m_len < (ehdrlen + ip_hlen)) {
2690 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2691 ip = (struct ip *)buf;
2694 th = (struct tcphdr *)(ip + 1);
2695 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2698 case ETHERTYPE_IPV6:
2699 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2701 ip_hlen = sizeof(struct ip6_hdr);
2703 if (mp->m_len < (ehdrlen + ip_hlen)) {
2704 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2706 ip6 = (struct ip6_hdr *)buf;
2708 th = (struct tcphdr *)(ip6 + 1);
2709 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2720 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2724 uint32_t sum, nbds_in_hdr = 1;
2725 bus_dma_segment_t *t_segs = segs;
2727 /* count the number of segments spanned by TCP header */
2730 while ((i < nsegs) && (offset > t_segs->ds_len)) {
2732 offset = offset - t_segs->ds_len;
2737 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2741 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2742 sum += segs->ds_len;
2746 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2747 fp->tx_lso_wnd_min_len++;
2751 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2758 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2760 bus_dma_segment_t *segs;
2761 bus_dmamap_t map = 0;
2764 struct mbuf *m_head = *m_headp;
2769 struct qlnx_tx_queue *txq;
2771 struct eth_tx_1st_bd *first_bd;
2772 struct eth_tx_2nd_bd *second_bd;
2773 struct eth_tx_3rd_bd *third_bd;
2774 struct eth_tx_bd *tx_data_bd;
2777 uint32_t nbds_in_hdr = 0;
2778 uint32_t offset = 0;
2780 QL_DPRINT8(ha, "enter\n");
2792 if (fp->tx_ring_full) {
2793 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
2795 if (elem_left < (TX_RING_SIZE >> 4))
2798 fp->tx_ring_full = 0;
2801 idx = txq->sw_tx_prod;
2803 map = txq->sw_tx_ring[idx].map;
2806 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
2809 if (ha->dbg_trace_tso_pkt_len) {
2810 if (!fp->tx_tso_min_pkt_len) {
2811 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2812 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2814 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
2815 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2816 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
2817 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len;
2821 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2822 offset = qlnx_tcp_offset(ha, m_head);
2824 if ((ret == EFBIG) ||
2825 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2826 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2827 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2828 qlnx_tso_check(fp, segs, nsegs, offset))))) {
2832 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
2836 m = m_defrag(m_head, M_NOWAIT);
2838 fp->err_tx_defrag++;
2839 fp->tx_pkts_freed++;
2842 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
2849 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2850 segs, &nsegs, BUS_DMA_NOWAIT))) {
2852 fp->err_tx_defrag_dmamap_load++;
2855 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
2856 ret, m_head->m_pkthdr.len);
2858 fp->tx_pkts_freed++;
2865 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2866 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2868 fp->err_tx_non_tso_max_seg++;
2871 "(%d) nsegs too many for non-TSO [%d, %d]\n",
2872 ret, nsegs, m_head->m_pkthdr.len);
2874 fp->tx_pkts_freed++;
2880 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2881 offset = qlnx_tcp_offset(ha, m_head);
2885 fp->err_tx_dmamap_load++;
2887 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
2888 ret, m_head->m_pkthdr.len);
2889 fp->tx_pkts_freed++;
2895 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2897 if (ha->dbg_trace_tso_pkt_len) {
2898 if (nsegs < QLNX_FP_MAX_SEGS)
2899 fp->tx_pkts[(nsegs - 1)]++;
2901 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
2904 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2905 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2907 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
2908 " in chain[%d] trying to free packets\n",
2909 nsegs, elem_left, fp->rss_id);
2911 fp->tx_nsegs_gt_elem_left++;
2913 (void)qlnx_tx_int(ha, fp, txq);
2915 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2916 ecore_chain_get_elem_left(&txq->tx_pbl))) {
2919 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
2920 nsegs, elem_left, fp->rss_id);
2922 fp->err_tx_nsegs_gt_elem_left++;
2923 fp->tx_ring_full = 1;
2924 ha->storm_stats_enable = 1;
2929 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
2931 txq->sw_tx_ring[idx].mp = m_head;
2933 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2935 memset(first_bd, 0, sizeof(*first_bd));
2937 first_bd->data.bd_flags.bitfields =
2938 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2940 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
2944 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
2945 first_bd->data.bd_flags.bitfields |=
2946 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2949 if (m_head->m_pkthdr.csum_flags &
2950 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
2951 first_bd->data.bd_flags.bitfields |=
2952 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
2955 if (m_head->m_flags & M_VLANTAG) {
2956 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
2957 first_bd->data.bd_flags.bitfields |=
2958 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
2961 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2963 first_bd->data.bd_flags.bitfields |=
2964 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
2965 first_bd->data.bd_flags.bitfields |=
2966 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2970 if (offset == segs->ds_len) {
2971 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2975 second_bd = (struct eth_tx_2nd_bd *)
2976 ecore_chain_produce(&txq->tx_pbl);
2977 memset(second_bd, 0, sizeof(*second_bd));
2980 if (seg_idx < nsegs) {
2981 BD_SET_UNMAP_ADDR_LEN(second_bd, \
2982 (segs->ds_addr), (segs->ds_len));
2987 third_bd = (struct eth_tx_3rd_bd *)
2988 ecore_chain_produce(&txq->tx_pbl);
2989 memset(third_bd, 0, sizeof(*third_bd));
2990 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
2991 third_bd->data.bitfields |=
2992 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2995 if (seg_idx < nsegs) {
2996 BD_SET_UNMAP_ADDR_LEN(third_bd, \
2997 (segs->ds_addr), (segs->ds_len));
3002 for (; seg_idx < nsegs; seg_idx++) {
3003 tx_data_bd = (struct eth_tx_bd *)
3004 ecore_chain_produce(&txq->tx_pbl);
3005 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3006 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3013 } else if (offset < segs->ds_len) {
3014 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3016 second_bd = (struct eth_tx_2nd_bd *)
3017 ecore_chain_produce(&txq->tx_pbl);
3018 memset(second_bd, 0, sizeof(*second_bd));
3019 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3020 (segs->ds_addr + offset),\
3021 (segs->ds_len - offset));
3025 third_bd = (struct eth_tx_3rd_bd *)
3026 ecore_chain_produce(&txq->tx_pbl);
3027 memset(third_bd, 0, sizeof(*third_bd));
3029 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3032 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3033 third_bd->data.bitfields |=
3034 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3038 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3039 tx_data_bd = (struct eth_tx_bd *)
3040 ecore_chain_produce(&txq->tx_pbl);
3041 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3042 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3050 offset = offset - segs->ds_len;
3053 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3058 tx_data_bd = (struct eth_tx_bd *)
3059 ecore_chain_produce(&txq->tx_pbl);
3060 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3062 if (second_bd == NULL) {
3063 second_bd = (struct eth_tx_2nd_bd *)
3065 } else if (third_bd == NULL) {
3066 third_bd = (struct eth_tx_3rd_bd *)
3070 if (offset && (offset < segs->ds_len)) {
3071 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3072 segs->ds_addr, offset);
3074 tx_data_bd = (struct eth_tx_bd *)
3075 ecore_chain_produce(&txq->tx_pbl);
3077 memset(tx_data_bd, 0,
3078 sizeof(*tx_data_bd));
3080 if (second_bd == NULL) {
3082 (struct eth_tx_2nd_bd *)tx_data_bd;
3083 } else if (third_bd == NULL) {
3085 (struct eth_tx_3rd_bd *)tx_data_bd;
3087 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3088 (segs->ds_addr + offset), \
3089 (segs->ds_len - offset));
3094 offset = offset - segs->ds_len;
3095 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3096 segs->ds_addr, segs->ds_len);
3102 if (third_bd == NULL) {
3103 third_bd = (struct eth_tx_3rd_bd *)
3104 ecore_chain_produce(&txq->tx_pbl);
3105 memset(third_bd, 0, sizeof(*third_bd));
3108 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3109 third_bd->data.bitfields |=
3110 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3114 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3115 tx_data_bd = (struct eth_tx_bd *)
3116 ecore_chain_produce(&txq->tx_pbl);
3117 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3118 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3123 first_bd->data.bitfields =
3124 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3125 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3126 first_bd->data.bitfields =
3127 htole16(first_bd->data.bitfields);
3131 first_bd->data.nbds = nbd;
3133 if (ha->dbg_trace_tso_pkt_len) {
3134 if (fp->tx_tso_max_nsegs < nsegs)
3135 fp->tx_tso_max_nsegs = nsegs;
3137 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3138 fp->tx_tso_min_nsegs = nsegs;
3141 txq->sw_tx_ring[idx].nsegs = nsegs;
3142 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3144 txq->tx_db.data.bd_prod =
3145 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3147 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3149 QL_DPRINT8(ha, "exit\n");
3154 qlnx_stop(qlnx_host_t *ha)
3156 struct ifnet *ifp = ha->ifp;
3162 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3165 * We simply lock and unlock each fp->tx_mtx to
3166 * propagate the if_drv_flags
3167 * state to each tx thread
3169 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3171 if (ha->state == QLNX_STATE_OPEN) {
3172 for (i = 0; i < ha->num_rss; i++) {
3173 struct qlnx_fastpath *fp = &ha->fp_array[i];
3175 mtx_lock(&fp->tx_mtx);
3176 mtx_unlock(&fp->tx_mtx);
3178 if (fp->fp_taskqueue != NULL)
3179 taskqueue_enqueue(fp->fp_taskqueue,
3190 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3192 return(TX_RING_SIZE - 1);
3196 qlnx_get_mac_addr(qlnx_host_t *ha)
3198 struct ecore_hwfn *p_hwfn;
3200 p_hwfn = &ha->cdev.hwfns[0];
3201 return (p_hwfn->hw_info.hw_mac_addr);
3205 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3207 uint32_t ifm_type = 0;
3209 switch (if_link->media_type) {
3211 case MEDIA_MODULE_FIBER:
3212 case MEDIA_UNSPECIFIED:
3213 if (if_link->speed == (100 * 1000))
3214 ifm_type = QLNX_IFM_100G_SR4;
3215 else if (if_link->speed == (40 * 1000))
3216 ifm_type = IFM_40G_SR4;
3217 else if (if_link->speed == (25 * 1000))
3218 ifm_type = QLNX_IFM_25G_SR;
3221 case MEDIA_DA_TWINAX:
3222 if (if_link->speed == (100 * 1000))
3223 ifm_type = QLNX_IFM_100G_CR4;
3224 else if (if_link->speed == (40 * 1000))
3225 ifm_type = IFM_40G_CR4;
3226 else if (if_link->speed == (25 * 1000))
3227 ifm_type = QLNX_IFM_25G_CR;
3231 ifm_type = IFM_UNKNOWN;
3239 /*****************************************************************************
3240 * Interrupt Service Functions
3241 *****************************************************************************/
3244 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3245 struct mbuf *mp_head, uint16_t len)
3247 struct mbuf *mp, *mpf, *mpl;
3248 struct sw_rx_data *sw_rx_data;
3249 struct qlnx_rx_queue *rxq;
3250 uint16_t len_in_buffer;
3253 mpf = mpl = mp = NULL;
3257 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3259 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3260 mp = sw_rx_data->data;
3263 QL_DPRINT1(ha, "mp = NULL\n");
3264 fp->err_rx_mp_null++;
3266 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3273 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3274 BUS_DMASYNC_POSTREAD);
3276 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3278 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3279 " incoming packet and reusing its buffer\n");
3281 qlnx_reuse_rx_data(rxq);
3282 fp->err_rx_alloc_errors++;
3289 ecore_chain_consume(&rxq->rx_bd_ring);
3291 if (len > rxq->rx_buf_size)
3292 len_in_buffer = rxq->rx_buf_size;
3294 len_in_buffer = len;
3296 len = len - len_in_buffer;
3298 mp->m_flags &= ~M_PKTHDR;
3300 mp->m_len = len_in_buffer;
3311 mp_head->m_next = mpf;
3317 qlnx_tpa_start(qlnx_host_t *ha,
3318 struct qlnx_fastpath *fp,
3319 struct qlnx_rx_queue *rxq,
3320 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3323 struct ifnet *ifp = ha->ifp;
3325 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3326 struct sw_rx_data *sw_rx_data;
3329 struct eth_rx_bd *rx_bd;
3332 #if __FreeBSD_version >= 1100000
3334 #endif /* #if __FreeBSD_version >= 1100000 */
3337 agg_index = cqe->tpa_agg_index;
3339 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3341 \t bitfields = 0x%x\n \
3342 \t seg_len = 0x%x\n \
3343 \t pars_flags = 0x%x\n \
3344 \t vlan_tag = 0x%x\n \
3345 \t rss_hash = 0x%x\n \
3346 \t len_on_first_bd = 0x%x\n \
3347 \t placement_offset = 0x%x\n \
3348 \t tpa_agg_index = 0x%x\n \
3349 \t header_len = 0x%x\n \
3350 \t ext_bd_len_list[0] = 0x%x\n \
3351 \t ext_bd_len_list[1] = 0x%x\n \
3352 \t ext_bd_len_list[2] = 0x%x\n \
3353 \t ext_bd_len_list[3] = 0x%x\n \
3354 \t ext_bd_len_list[4] = 0x%x\n",
3355 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3356 cqe->pars_flags.flags, cqe->vlan_tag,
3357 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3358 cqe->tpa_agg_index, cqe->header_len,
3359 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3360 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3361 cqe->ext_bd_len_list[4]);
3363 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3364 fp->err_rx_tpa_invalid_agg_num++;
3368 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3369 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3370 mp = sw_rx_data->data;
3372 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3375 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3376 fp->err_rx_mp_null++;
3377 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3382 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3384 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3385 " flags = %x, dropping incoming packet\n", fp->rss_id,
3386 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3388 fp->err_rx_hw_errors++;
3390 qlnx_reuse_rx_data(rxq);
3392 QLNX_INC_IERRORS(ifp);
3397 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3399 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3400 " dropping incoming packet and reusing its buffer\n",
3403 fp->err_rx_alloc_errors++;
3404 QLNX_INC_IQDROPS(ifp);
3407 * Load the tpa mbuf into the rx ring and save the
3411 map = sw_rx_data->map;
3412 addr = sw_rx_data->dma_addr;
3414 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3416 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3417 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3418 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3420 rxq->tpa_info[agg_index].rx_buf.data = mp;
3421 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3422 rxq->tpa_info[agg_index].rx_buf.map = map;
3424 rx_bd = (struct eth_rx_bd *)
3425 ecore_chain_produce(&rxq->rx_bd_ring);
3427 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3428 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3430 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3431 BUS_DMASYNC_PREREAD);
3433 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3434 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3436 ecore_chain_consume(&rxq->rx_bd_ring);
3438 /* Now reuse any buffers posted in ext_bd_len_list */
3439 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3441 if (cqe->ext_bd_len_list[i] == 0)
3444 qlnx_reuse_rx_data(rxq);
3447 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3451 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3453 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3454 " dropping incoming packet and reusing its buffer\n",
3457 QLNX_INC_IQDROPS(ifp);
3459 /* if we already have mbuf head in aggregation free it */
3460 if (rxq->tpa_info[agg_index].mpf) {
3461 m_freem(rxq->tpa_info[agg_index].mpf);
3462 rxq->tpa_info[agg_index].mpl = NULL;
3464 rxq->tpa_info[agg_index].mpf = mp;
3465 rxq->tpa_info[agg_index].mpl = NULL;
3467 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3468 ecore_chain_consume(&rxq->rx_bd_ring);
3470 /* Now reuse any buffers posted in ext_bd_len_list */
3471 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3473 if (cqe->ext_bd_len_list[i] == 0)
3476 qlnx_reuse_rx_data(rxq);
3478 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3484 * first process the ext_bd_len_list
3485 * if this fails then we simply drop the packet
3487 ecore_chain_consume(&rxq->rx_bd_ring);
3488 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3490 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3492 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3494 if (cqe->ext_bd_len_list[i] == 0)
3497 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3498 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3499 BUS_DMASYNC_POSTREAD);
3501 mpc = sw_rx_data->data;
3504 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3505 fp->err_rx_mp_null++;
3509 rxq->tpa_info[agg_index].agg_state =
3510 QLNX_AGG_STATE_ERROR;
3511 ecore_chain_consume(&rxq->rx_bd_ring);
3513 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3517 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3518 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3519 " dropping incoming packet and reusing its"
3520 " buffer\n", fp->rss_id);
3522 qlnx_reuse_rx_data(rxq);
3528 rxq->tpa_info[agg_index].agg_state =
3529 QLNX_AGG_STATE_ERROR;
3531 ecore_chain_consume(&rxq->rx_bd_ring);
3533 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3538 mpc->m_flags &= ~M_PKTHDR;
3540 mpc->m_len = cqe->ext_bd_len_list[i];
3546 mpl->m_len = ha->rx_buf_size;
3551 ecore_chain_consume(&rxq->rx_bd_ring);
3553 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3556 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3558 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3559 " incoming packet and reusing its buffer\n",
3562 QLNX_INC_IQDROPS(ifp);
3564 rxq->tpa_info[agg_index].mpf = mp;
3565 rxq->tpa_info[agg_index].mpl = NULL;
3570 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3573 mp->m_len = ha->rx_buf_size;
3575 rxq->tpa_info[agg_index].mpf = mp;
3576 rxq->tpa_info[agg_index].mpl = mpl;
3578 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3579 rxq->tpa_info[agg_index].mpf = mp;
3580 rxq->tpa_info[agg_index].mpl = mp;
3584 mp->m_flags |= M_PKTHDR;
3586 /* assign packet to this interface interface */
3587 mp->m_pkthdr.rcvif = ifp;
3589 /* assume no hardware checksum has complated */
3590 mp->m_pkthdr.csum_flags = 0;
3592 //mp->m_pkthdr.flowid = fp->rss_id;
3593 mp->m_pkthdr.flowid = cqe->rss_hash;
3595 #if __FreeBSD_version >= 1100000
3597 hash_type = cqe->bitfields &
3598 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3599 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3601 switch (hash_type) {
3603 case RSS_HASH_TYPE_IPV4:
3604 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3607 case RSS_HASH_TYPE_TCP_IPV4:
3608 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3611 case RSS_HASH_TYPE_IPV6:
3612 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3615 case RSS_HASH_TYPE_TCP_IPV6:
3616 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3620 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3625 mp->m_flags |= M_FLOWID;
3628 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3629 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3631 mp->m_pkthdr.csum_data = 0xFFFF;
3633 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3634 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3635 mp->m_flags |= M_VLANTAG;
3638 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3640 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3641 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3642 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3648 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3649 struct qlnx_rx_queue *rxq,
3650 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3652 struct sw_rx_data *sw_rx_data;
3654 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3661 QL_DPRINT7(ha, "[%d]: enter\n \
3663 \t tpa_agg_index = 0x%x\n \
3664 \t len_list[0] = 0x%x\n \
3665 \t len_list[1] = 0x%x\n \
3666 \t len_list[2] = 0x%x\n \
3667 \t len_list[3] = 0x%x\n \
3668 \t len_list[4] = 0x%x\n \
3669 \t len_list[5] = 0x%x\n",
3670 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3671 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3672 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3674 agg_index = cqe->tpa_agg_index;
3676 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3677 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3678 fp->err_rx_tpa_invalid_agg_num++;
3683 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3685 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3687 if (cqe->len_list[i] == 0)
3690 if (rxq->tpa_info[agg_index].agg_state !=
3691 QLNX_AGG_STATE_START) {
3692 qlnx_reuse_rx_data(rxq);
3696 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3697 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3698 BUS_DMASYNC_POSTREAD);
3700 mpc = sw_rx_data->data;
3704 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3706 fp->err_rx_mp_null++;
3710 rxq->tpa_info[agg_index].agg_state =
3711 QLNX_AGG_STATE_ERROR;
3712 ecore_chain_consume(&rxq->rx_bd_ring);
3714 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3718 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3720 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3721 " dropping incoming packet and reusing its"
3722 " buffer\n", fp->rss_id);
3724 qlnx_reuse_rx_data(rxq);
3730 rxq->tpa_info[agg_index].agg_state =
3731 QLNX_AGG_STATE_ERROR;
3733 ecore_chain_consume(&rxq->rx_bd_ring);
3735 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3740 mpc->m_flags &= ~M_PKTHDR;
3742 mpc->m_len = cqe->len_list[i];
3748 mpl->m_len = ha->rx_buf_size;
3753 ecore_chain_consume(&rxq->rx_bd_ring);
3755 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3758 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3759 fp->rss_id, mpf, mpl);
3762 mp = rxq->tpa_info[agg_index].mpl;
3763 mp->m_len = ha->rx_buf_size;
3765 rxq->tpa_info[agg_index].mpl = mpl;
3772 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3773 struct qlnx_rx_queue *rxq,
3774 struct eth_fast_path_rx_tpa_end_cqe *cqe)
3776 struct sw_rx_data *sw_rx_data;
3778 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3782 struct ifnet *ifp = ha->ifp;
3787 QL_DPRINT7(ha, "[%d]: enter\n \
3789 \t tpa_agg_index = 0x%x\n \
3790 \t total_packet_len = 0x%x\n \
3791 \t num_of_bds = 0x%x\n \
3792 \t end_reason = 0x%x\n \
3793 \t num_of_coalesced_segs = 0x%x\n \
3794 \t ts_delta = 0x%x\n \
3795 \t len_list[0] = 0x%x\n \
3796 \t len_list[1] = 0x%x\n \
3797 \t len_list[2] = 0x%x\n \
3798 \t len_list[3] = 0x%x\n",
3799 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3800 cqe->total_packet_len, cqe->num_of_bds,
3801 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3802 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3805 agg_index = cqe->tpa_agg_index;
3807 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3809 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3811 fp->err_rx_tpa_invalid_agg_num++;
3816 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3818 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3820 if (cqe->len_list[i] == 0)
3823 if (rxq->tpa_info[agg_index].agg_state !=
3824 QLNX_AGG_STATE_START) {
3826 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
3828 qlnx_reuse_rx_data(rxq);
3832 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3833 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3834 BUS_DMASYNC_POSTREAD);
3836 mpc = sw_rx_data->data;
3840 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3842 fp->err_rx_mp_null++;
3846 rxq->tpa_info[agg_index].agg_state =
3847 QLNX_AGG_STATE_ERROR;
3848 ecore_chain_consume(&rxq->rx_bd_ring);
3850 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3854 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3855 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3856 " dropping incoming packet and reusing its"
3857 " buffer\n", fp->rss_id);
3859 qlnx_reuse_rx_data(rxq);
3865 rxq->tpa_info[agg_index].agg_state =
3866 QLNX_AGG_STATE_ERROR;
3868 ecore_chain_consume(&rxq->rx_bd_ring);
3870 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3875 mpc->m_flags &= ~M_PKTHDR;
3877 mpc->m_len = cqe->len_list[i];
3883 mpl->m_len = ha->rx_buf_size;
3888 ecore_chain_consume(&rxq->rx_bd_ring);
3890 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3893 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
3897 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
3899 mp = rxq->tpa_info[agg_index].mpl;
3900 mp->m_len = ha->rx_buf_size;
3904 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3906 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
3908 if (rxq->tpa_info[agg_index].mpf != NULL)
3909 m_freem(rxq->tpa_info[agg_index].mpf);
3910 rxq->tpa_info[agg_index].mpf = NULL;
3911 rxq->tpa_info[agg_index].mpl = NULL;
3912 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3916 mp = rxq->tpa_info[agg_index].mpf;
3917 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
3918 mp->m_pkthdr.len = cqe->total_packet_len;
3920 if (mp->m_next == NULL)
3921 mp->m_len = mp->m_pkthdr.len;
3923 /* compute the total packet length */
3925 while (mpf != NULL) {
3930 if (cqe->total_packet_len > len) {
3931 mpl = rxq->tpa_info[agg_index].mpl;
3932 mpl->m_len += (cqe->total_packet_len - len);
3936 QLNX_INC_IPACKETS(ifp);
3937 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3939 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
3940 m_len = 0x%x m_pkthdr_len = 0x%x\n",
3941 fp->rss_id, mp->m_pkthdr.csum_data,
3942 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
3944 (*ifp->if_input)(ifp, mp);
3946 rxq->tpa_info[agg_index].mpf = NULL;
3947 rxq->tpa_info[agg_index].mpl = NULL;
3948 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3950 return (cqe->num_of_coalesced_segs);
3954 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
3957 uint16_t hw_comp_cons, sw_comp_cons;
3959 struct qlnx_rx_queue *rxq = fp->rxq;
3960 struct ifnet *ifp = ha->ifp;
3961 struct ecore_dev *cdev = &ha->cdev;
3962 struct ecore_hwfn *p_hwfn;
3964 #ifdef QLNX_SOFT_LRO
3965 struct lro_ctrl *lro;
3968 #endif /* #ifdef QLNX_SOFT_LRO */
3970 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
3971 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
3973 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
3975 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
3976 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
3977 * read before it is written by FW, then FW writes CQE and SB, and then
3978 * the CPU reads the hw_comp_cons, it will use an old CQE.
3981 /* Loop to complete all indicated BDs */
3982 while (sw_comp_cons != hw_comp_cons) {
3983 union eth_rx_cqe *cqe;
3984 struct eth_fast_path_rx_reg_cqe *fp_cqe;
3985 struct sw_rx_data *sw_rx_data;
3986 register struct mbuf *mp;
3987 enum eth_rx_cqe_type cqe_type;
3988 uint16_t len, pad, len_on_first_bd;
3990 #if __FreeBSD_version >= 1100000
3992 #endif /* #if __FreeBSD_version >= 1100000 */
3994 /* Get the CQE from the completion ring */
3995 cqe = (union eth_rx_cqe *)
3996 ecore_chain_consume(&rxq->rx_comp_ring);
3997 cqe_type = cqe->fast_path_regular.type;
3999 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4000 QL_DPRINT3(ha, "Got a slowath CQE\n");
4002 ecore_eth_cqe_completion(p_hwfn,
4003 (struct eth_slow_path_rx_cqe *)cqe);
4007 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4011 case ETH_RX_CQE_TYPE_TPA_START:
4012 qlnx_tpa_start(ha, fp, rxq,
4013 &cqe->fast_path_tpa_start);
4017 case ETH_RX_CQE_TYPE_TPA_CONT:
4018 qlnx_tpa_cont(ha, fp, rxq,
4019 &cqe->fast_path_tpa_cont);
4023 case ETH_RX_CQE_TYPE_TPA_END:
4024 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4025 &cqe->fast_path_tpa_end);
4036 /* Get the data from the SW ring */
4037 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4038 mp = sw_rx_data->data;
4041 QL_DPRINT1(ha, "mp = NULL\n");
4042 fp->err_rx_mp_null++;
4044 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4047 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4048 BUS_DMASYNC_POSTREAD);
4051 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4052 len = le16toh(fp_cqe->pkt_len);
4053 pad = fp_cqe->placement_offset;
4055 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4056 " len %u, parsing flags = %d pad = %d\n",
4057 cqe_type, fp_cqe->bitfields,
4058 le16toh(fp_cqe->vlan_tag),
4059 len, le16toh(fp_cqe->pars_flags.flags), pad);
4061 data = mtod(mp, uint8_t *);
4065 qlnx_dump_buf8(ha, __func__, data, len);
4067 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4068 * is always with a fixed size. If allocation fails, we take the
4069 * consumed BD and return it to the ring in the PROD position.
4070 * The packet that was received on that BD will be dropped (and
4071 * not passed to the upper stack).
4073 /* If this is an error packet then drop it */
4074 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4077 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4078 " dropping incoming packet\n", sw_comp_cons,
4079 le16toh(cqe->fast_path_regular.pars_flags.flags));
4080 fp->err_rx_hw_errors++;
4082 qlnx_reuse_rx_data(rxq);
4084 QLNX_INC_IERRORS(ifp);
4089 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4091 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4092 " incoming packet and reusing its buffer\n");
4093 qlnx_reuse_rx_data(rxq);
4095 fp->err_rx_alloc_errors++;
4097 QLNX_INC_IQDROPS(ifp);
4102 ecore_chain_consume(&rxq->rx_bd_ring);
4104 len_on_first_bd = fp_cqe->len_on_first_bd;
4106 mp->m_pkthdr.len = len;
4108 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4109 len, len_on_first_bd);
4110 if ((len > 60 ) && (len > len_on_first_bd)) {
4112 mp->m_len = len_on_first_bd;
4114 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4115 (len - len_on_first_bd)) != 0) {
4119 QLNX_INC_IQDROPS(ifp);
4124 } else if (len_on_first_bd < len) {
4125 fp->err_rx_jumbo_chain_pkts++;
4130 mp->m_flags |= M_PKTHDR;
4132 /* assign packet to this interface interface */
4133 mp->m_pkthdr.rcvif = ifp;
4135 /* assume no hardware checksum has complated */
4136 mp->m_pkthdr.csum_flags = 0;
4138 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4140 #if __FreeBSD_version >= 1100000
4142 hash_type = fp_cqe->bitfields &
4143 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4144 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4146 switch (hash_type) {
4148 case RSS_HASH_TYPE_IPV4:
4149 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4152 case RSS_HASH_TYPE_TCP_IPV4:
4153 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4156 case RSS_HASH_TYPE_IPV6:
4157 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4160 case RSS_HASH_TYPE_TCP_IPV6:
4161 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4165 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4170 mp->m_flags |= M_FLOWID;
4173 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4174 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4177 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4178 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4181 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4182 mp->m_pkthdr.csum_data = 0xFFFF;
4183 mp->m_pkthdr.csum_flags |=
4184 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4187 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4188 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4189 mp->m_flags |= M_VLANTAG;
4192 QLNX_INC_IPACKETS(ifp);
4193 QLNX_INC_IBYTES(ifp, len);
4195 #ifdef QLNX_SOFT_LRO
4199 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4201 tcp_lro_queue_mbuf(lro, mp);
4205 if (tcp_lro_rx(lro, mp, 0))
4206 (*ifp->if_input)(ifp, mp);
4208 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4211 (*ifp->if_input)(ifp, mp);
4215 (*ifp->if_input)(ifp, mp);
4217 #endif /* #ifdef QLNX_SOFT_LRO */
4221 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4223 next_cqe: /* don't consume bd rx buffer */
4224 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4225 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4227 /* CR TPA - revisit how to handle budget in TPA perhaps
4228 increase on "end" */
4229 if (rx_pkt == budget)
4231 } /* repeat while sw_comp_cons != hw_comp_cons... */
4233 /* Update producers */
4234 qlnx_update_rx_prod(p_hwfn, rxq);
4240 * fast path interrupt
4244 qlnx_fp_isr(void *arg)
4246 qlnx_ivec_t *ivec = arg;
4248 struct qlnx_fastpath *fp = NULL;
4253 if (ha->state != QLNX_STATE_OPEN) {
4257 idx = ivec->rss_idx;
4259 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4260 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4261 ha->err_illegal_intr++;
4264 fp = &ha->fp_array[idx];
4269 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4270 if (fp->fp_taskqueue != NULL)
4271 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4279 * slow path interrupt processing function
4280 * can be invoked in polled mode or in interrupt mode via taskqueue.
4283 qlnx_sp_isr(void *arg)
4285 struct ecore_hwfn *p_hwfn;
4290 ha = (qlnx_host_t *)p_hwfn->p_dev;
4292 ha->sp_interrupts++;
4294 QL_DPRINT2(ha, "enter\n");
4296 ecore_int_sp_dpc(p_hwfn);
4298 QL_DPRINT2(ha, "exit\n");
4303 /*****************************************************************************
4304 * Support Functions for DMA'able Memory
4305 *****************************************************************************/
4308 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4310 *((bus_addr_t *)arg) = 0;
4313 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4317 *((bus_addr_t *)arg) = segs[0].ds_addr;
4323 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4331 ret = bus_dma_tag_create(
4332 ha->parent_tag,/* parent */
4334 ((bus_size_t)(1ULL << 32)),/* boundary */
4335 BUS_SPACE_MAXADDR, /* lowaddr */
4336 BUS_SPACE_MAXADDR, /* highaddr */
4337 NULL, NULL, /* filter, filterarg */
4338 dma_buf->size, /* maxsize */
4340 dma_buf->size, /* maxsegsize */
4342 NULL, NULL, /* lockfunc, lockarg */
4346 QL_DPRINT1(ha, "could not create dma tag\n");
4347 goto qlnx_alloc_dmabuf_exit;
4349 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4350 (void **)&dma_buf->dma_b,
4351 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4354 bus_dma_tag_destroy(dma_buf->dma_tag);
4355 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4356 goto qlnx_alloc_dmabuf_exit;
4359 ret = bus_dmamap_load(dma_buf->dma_tag,
4363 qlnx_dmamap_callback,
4364 &b_addr, BUS_DMA_NOWAIT);
4366 if (ret || !b_addr) {
4367 bus_dma_tag_destroy(dma_buf->dma_tag);
4368 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4371 goto qlnx_alloc_dmabuf_exit;
4374 dma_buf->dma_addr = b_addr;
4376 qlnx_alloc_dmabuf_exit:
4382 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4384 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4385 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4386 bus_dma_tag_destroy(dma_buf->dma_tag);
4391 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4398 ha = (qlnx_host_t *)ecore_dev;
4401 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4403 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4405 dma_buf.size = size + PAGE_SIZE;
4406 dma_buf.alignment = 8;
4408 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4410 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4412 *phys = dma_buf.dma_addr;
4414 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4416 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4418 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4419 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4420 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4422 return (dma_buf.dma_b);
4426 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4429 qlnx_dma_t dma_buf, *dma_p;
4433 ha = (qlnx_host_t *)ecore_dev;
4439 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4441 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4443 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4444 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4445 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4449 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4454 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4462 * Allocate parent DMA Tag
4464 ret = bus_dma_tag_create(
4465 bus_get_dma_tag(dev), /* parent */
4466 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4467 BUS_SPACE_MAXADDR, /* lowaddr */
4468 BUS_SPACE_MAXADDR, /* highaddr */
4469 NULL, NULL, /* filter, filterarg */
4470 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4472 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4474 NULL, NULL, /* lockfunc, lockarg */
4478 QL_DPRINT1(ha, "could not create parent dma tag\n");
4482 ha->flags.parent_tag = 1;
4488 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4490 if (ha->parent_tag != NULL) {
4491 bus_dma_tag_destroy(ha->parent_tag);
4492 ha->parent_tag = NULL;
4498 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4500 if (bus_dma_tag_create(NULL, /* parent */
4501 1, 0, /* alignment, bounds */
4502 BUS_SPACE_MAXADDR, /* lowaddr */
4503 BUS_SPACE_MAXADDR, /* highaddr */
4504 NULL, NULL, /* filter, filterarg */
4505 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4506 QLNX_MAX_SEGMENTS, /* nsegments */
4507 (PAGE_SIZE * 4), /* maxsegsize */
4508 BUS_DMA_ALLOCNOW, /* flags */
4509 NULL, /* lockfunc */
4510 NULL, /* lockfuncarg */
4513 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4521 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4523 if (ha->tx_tag != NULL) {
4524 bus_dma_tag_destroy(ha->tx_tag);
4531 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4533 if (bus_dma_tag_create(NULL, /* parent */
4534 1, 0, /* alignment, bounds */
4535 BUS_SPACE_MAXADDR, /* lowaddr */
4536 BUS_SPACE_MAXADDR, /* highaddr */
4537 NULL, NULL, /* filter, filterarg */
4538 MJUM9BYTES, /* maxsize */
4540 MJUM9BYTES, /* maxsegsize */
4541 BUS_DMA_ALLOCNOW, /* flags */
4542 NULL, /* lockfunc */
4543 NULL, /* lockfuncarg */
4546 QL_DPRINT1(ha, " rx_tag alloc failed\n");
4554 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4556 if (ha->rx_tag != NULL) {
4557 bus_dma_tag_destroy(ha->rx_tag);
4563 /*********************************
4564 * Exported functions
4565 *********************************/
4567 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4571 bar_id = bar_id * 2;
4573 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4581 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
4583 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4589 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
4590 uint16_t *reg_value)
4592 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4598 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
4599 uint32_t *reg_value)
4601 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4607 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
4609 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4610 pci_reg, reg_value, 1);
4615 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
4618 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4619 pci_reg, reg_value, 2);
4624 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
4627 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4628 pci_reg, reg_value, 4);
4634 qlnx_pci_find_capability(void *ecore_dev, int cap)
4641 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
4644 QL_DPRINT1(ha, "failed\n");
4650 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4653 struct ecore_dev *cdev;
4654 struct ecore_hwfn *p_hwfn;
4658 cdev = p_hwfn->p_dev;
4660 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4661 (uint8_t *)(cdev->regview)) + reg_addr;
4663 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
4669 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4671 struct ecore_dev *cdev;
4672 struct ecore_hwfn *p_hwfn;
4676 cdev = p_hwfn->p_dev;
4678 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4679 (uint8_t *)(cdev->regview)) + reg_addr;
4681 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4687 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
4689 struct ecore_dev *cdev;
4690 struct ecore_hwfn *p_hwfn;
4694 cdev = p_hwfn->p_dev;
4696 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4697 (uint8_t *)(cdev->regview)) + reg_addr;
4699 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4705 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4707 struct ecore_dev *cdev;
4708 struct ecore_hwfn *p_hwfn;
4712 cdev = p_hwfn->p_dev;
4714 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
4715 (uint8_t *)(cdev->doorbells)) + reg_addr;
4717 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
4723 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
4727 struct ecore_dev *cdev;
4729 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4730 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4732 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
4738 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
4741 struct ecore_dev *cdev;
4743 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4744 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4746 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4752 qlnx_zalloc(uint32_t size)
4756 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
4758 return ((void *)va);
4762 qlnx_barrier(void *p_hwfn)
4766 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4767 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
4771 qlnx_link_update(void *p_hwfn)
4774 int prev_link_state;
4776 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4778 qlnx_fill_link(p_hwfn, &ha->if_link);
4780 prev_link_state = ha->link_up;
4781 ha->link_up = ha->if_link.link_up;
4783 if (prev_link_state != ha->link_up) {
4785 if_link_state_change(ha->ifp, LINK_STATE_UP);
4787 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
4794 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
4796 struct ecore_mcp_link_params link_params;
4797 struct ecore_mcp_link_state link_state;
4799 memset(if_link, 0, sizeof(*if_link));
4800 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
4801 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
4803 /* Prepare source inputs */
4804 /* we only deal with physical functions */
4805 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
4806 sizeof(link_params));
4807 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
4808 sizeof(link_state));
4810 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
4812 /* Set the link parameters to pass to protocol driver */
4813 if (link_state.link_up) {
4814 if_link->link_up = true;
4815 if_link->speed = link_state.speed;
4818 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
4820 if (link_params.speed.autoneg)
4821 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
4823 if (link_params.pause.autoneg ||
4824 (link_params.pause.forced_rx && link_params.pause.forced_tx))
4825 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
4827 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
4828 link_params.pause.forced_tx)
4829 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
4831 if (link_params.speed.advertised_speeds &
4832 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
4833 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
4834 QLNX_LINK_CAP_1000baseT_Full;
4836 if (link_params.speed.advertised_speeds &
4837 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
4838 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4840 if (link_params.speed.advertised_speeds &
4841 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
4842 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4844 if (link_params.speed.advertised_speeds &
4845 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
4846 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4848 if (link_params.speed.advertised_speeds &
4849 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
4850 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4852 if (link_params.speed.advertised_speeds &
4853 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
4854 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4856 if_link->advertised_caps = if_link->supported_caps;
4858 if_link->autoneg = link_params.speed.autoneg;
4859 if_link->duplex = QLNX_LINK_DUPLEX;
4861 /* Link partner capabilities */
4863 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
4864 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
4866 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
4867 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
4869 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
4870 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4872 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
4873 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4875 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
4876 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4878 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
4879 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4881 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
4882 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4884 if (link_state.an_complete)
4885 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
4887 if (link_state.partner_adv_pause)
4888 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
4890 if ((link_state.partner_adv_pause ==
4891 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
4892 (link_state.partner_adv_pause ==
4893 ECORE_LINK_PARTNER_BOTH_PAUSE))
4894 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
4900 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
4904 for (i = 0; i < cdev->num_hwfns; i++) {
4905 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
4906 p_hwfn->pf_params = *func_params;
4909 rc = ecore_resc_alloc(cdev);
4911 goto qlnx_nic_setup_exit;
4913 ecore_resc_setup(cdev);
4915 qlnx_nic_setup_exit:
4921 qlnx_nic_start(struct ecore_dev *cdev)
4924 struct ecore_hw_init_params params;
4926 bzero(¶ms, sizeof (struct ecore_hw_init_params));
4928 params.p_tunn = NULL;
4929 params.b_hw_start = true;
4930 params.int_mode = cdev->int_mode;
4931 params.allow_npar_tx_switch = true;
4932 params.bin_fw_data = NULL;
4934 rc = ecore_hw_init(cdev, ¶ms);
4936 ecore_resc_free(cdev);
4944 qlnx_slowpath_start(qlnx_host_t *ha)
4946 struct ecore_dev *cdev;
4947 struct ecore_pf_params pf_params;
4950 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
4951 pf_params.eth_pf_params.num_cons =
4952 (ha->num_rss) * (ha->num_tc + 1);
4956 rc = qlnx_nic_setup(cdev, &pf_params);
4958 goto qlnx_slowpath_start_exit;
4960 cdev->int_mode = ECORE_INT_MODE_MSIX;
4961 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
4963 #ifdef QLNX_MAX_COALESCE
4964 cdev->rx_coalesce_usecs = 255;
4965 cdev->tx_coalesce_usecs = 255;
4968 rc = qlnx_nic_start(cdev);
4970 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
4971 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
4973 qlnx_slowpath_start_exit:
4979 qlnx_slowpath_stop(qlnx_host_t *ha)
4981 struct ecore_dev *cdev;
4982 device_t dev = ha->pci_dev;
4987 ecore_hw_stop(cdev);
4989 for (i = 0; i < ha->cdev.num_hwfns; i++) {
4991 if (ha->sp_handle[i])
4992 (void)bus_teardown_intr(dev, ha->sp_irq[i],
4995 ha->sp_handle[i] = NULL;
4998 (void) bus_release_resource(dev, SYS_RES_IRQ,
4999 ha->sp_irq_rid[i], ha->sp_irq[i]);
5000 ha->sp_irq[i] = NULL;
5003 ecore_resc_free(cdev);
5009 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5010 char ver_str[VER_SIZE])
5014 memcpy(cdev->name, name, NAME_SIZE);
5016 for_each_hwfn(cdev, i) {
5017 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5020 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5026 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5028 enum ecore_mcp_protocol_type type;
5029 union ecore_mcp_protocol_stats *stats;
5030 struct ecore_eth_stats eth_stats;
5034 stats = proto_stats;
5039 case ECORE_MCP_LAN_STATS:
5040 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5041 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5042 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5043 stats->lan_stats.fcs_err = -1;
5047 ha->err_get_proto_invalid_type++;
5049 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5056 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5058 struct ecore_hwfn *p_hwfn;
5059 struct ecore_ptt *p_ptt;
5061 p_hwfn = &ha->cdev.hwfns[0];
5062 p_ptt = ecore_ptt_acquire(p_hwfn);
5064 if (p_ptt == NULL) {
5065 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5068 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5070 ecore_ptt_release(p_hwfn, p_ptt);
5076 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5078 struct ecore_hwfn *p_hwfn;
5079 struct ecore_ptt *p_ptt;
5081 p_hwfn = &ha->cdev.hwfns[0];
5082 p_ptt = ecore_ptt_acquire(p_hwfn);
5084 if (p_ptt == NULL) {
5085 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5088 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5090 ecore_ptt_release(p_hwfn, p_ptt);
5096 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5098 struct ecore_dev *cdev;
5102 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5103 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5104 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5110 qlnx_init_fp(qlnx_host_t *ha)
5112 int rss_id, txq_array_index, tc;
5114 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5116 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5118 fp->rss_id = rss_id;
5120 fp->sb_info = &ha->sb_array[rss_id];
5121 fp->rxq = &ha->rxq_array[rss_id];
5122 fp->rxq->rxq_id = rss_id;
5124 for (tc = 0; tc < ha->num_tc; tc++) {
5125 txq_array_index = tc * ha->num_rss + rss_id;
5126 fp->txq[tc] = &ha->txq_array[txq_array_index];
5127 fp->txq[tc]->index = txq_array_index;
5130 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5133 fp->tx_ring_full = 0;
5135 /* reset all the statistics counters */
5137 fp->tx_pkts_processed = 0;
5138 fp->tx_pkts_freed = 0;
5139 fp->tx_pkts_transmitted = 0;
5140 fp->tx_pkts_completed = 0;
5141 fp->tx_lso_wnd_min_len = 0;
5143 fp->tx_nsegs_gt_elem_left = 0;
5144 fp->tx_tso_max_nsegs = 0;
5145 fp->tx_tso_min_nsegs = 0;
5146 fp->err_tx_nsegs_gt_elem_left = 0;
5147 fp->err_tx_dmamap_create = 0;
5148 fp->err_tx_defrag_dmamap_load = 0;
5149 fp->err_tx_non_tso_max_seg = 0;
5150 fp->err_tx_dmamap_load = 0;
5151 fp->err_tx_defrag = 0;
5152 fp->err_tx_free_pkt_null = 0;
5153 fp->err_tx_cons_idx_conflict = 0;
5156 fp->err_m_getcl = 0;
5157 fp->err_m_getjcl = 0;
5163 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5165 struct ecore_dev *cdev;
5169 if (sb_info->sb_virt) {
5170 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5171 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5172 sb_info->sb_virt = NULL;
5177 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5178 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5180 struct ecore_hwfn *p_hwfn;
5184 hwfn_index = sb_id % cdev->num_hwfns;
5185 p_hwfn = &cdev->hwfns[hwfn_index];
5186 rel_sb_id = sb_id / cdev->num_hwfns;
5188 QL_DPRINT2(((qlnx_host_t *)cdev),
5189 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5190 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5191 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5192 sb_virt_addr, (void *)sb_phy_addr);
5194 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5195 sb_virt_addr, sb_phy_addr, rel_sb_id);
5200 /* This function allocates fast-path status block memory */
5202 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5204 struct status_block *sb_virt;
5208 struct ecore_dev *cdev;
5212 size = sizeof(*sb_virt);
5213 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5216 QL_DPRINT1(ha, "Status block allocation failed\n");
5220 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5222 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5229 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5232 struct sw_rx_data *rx_buf;
5234 for (i = 0; i < rxq->num_rx_buffers; i++) {
5236 rx_buf = &rxq->sw_rx_ring[i];
5238 if (rx_buf->data != NULL) {
5239 if (rx_buf->map != NULL) {
5240 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5241 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5244 m_freem(rx_buf->data);
5245 rx_buf->data = NULL;
5252 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5254 struct ecore_dev *cdev;
5259 qlnx_free_rx_buffers(ha, rxq);
5261 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5262 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5263 if (rxq->tpa_info[i].mpf != NULL)
5264 m_freem(rxq->tpa_info[i].mpf);
5267 bzero((void *)&rxq->sw_rx_ring[0],
5268 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5270 /* Free the real RQ ring used by FW */
5271 if (rxq->rx_bd_ring.p_virt_addr) {
5272 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5273 rxq->rx_bd_ring.p_virt_addr = NULL;
5276 /* Free the real completion ring used by FW */
5277 if (rxq->rx_comp_ring.p_virt_addr &&
5278 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5279 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5280 rxq->rx_comp_ring.p_virt_addr = NULL;
5281 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5284 #ifdef QLNX_SOFT_LRO
5286 struct lro_ctrl *lro;
5291 #endif /* #ifdef QLNX_SOFT_LRO */
5297 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5299 register struct mbuf *mp;
5300 uint16_t rx_buf_size;
5301 struct sw_rx_data *sw_rx_data;
5302 struct eth_rx_bd *rx_bd;
5303 dma_addr_t dma_addr;
5305 bus_dma_segment_t segs[1];
5308 struct ecore_dev *cdev;
5312 rx_buf_size = rxq->rx_buf_size;
5314 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5317 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5321 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5323 map = (bus_dmamap_t)0;
5325 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5327 dma_addr = segs[0].ds_addr;
5329 if (ret || !dma_addr || (nsegs != 1)) {
5331 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5332 ret, (long long unsigned int)dma_addr, nsegs);
5336 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5337 sw_rx_data->data = mp;
5338 sw_rx_data->dma_addr = dma_addr;
5339 sw_rx_data->map = map;
5341 /* Advance PROD and get BD pointer */
5342 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5343 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5344 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5345 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5347 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5353 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5354 struct qlnx_agg_info *tpa)
5357 dma_addr_t dma_addr;
5359 bus_dma_segment_t segs[1];
5362 struct sw_rx_data *rx_buf;
5364 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5367 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5371 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5373 map = (bus_dmamap_t)0;
5375 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5377 dma_addr = segs[0].ds_addr;
5379 if (ret || !dma_addr || (nsegs != 1)) {
5381 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5382 ret, (long long unsigned int)dma_addr, nsegs);
5386 rx_buf = &tpa->rx_buf;
5388 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5391 rx_buf->dma_addr = dma_addr;
5394 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5400 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5402 struct sw_rx_data *rx_buf;
5404 rx_buf = &tpa->rx_buf;
5406 if (rx_buf->data != NULL) {
5407 if (rx_buf->map != NULL) {
5408 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5409 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5412 m_freem(rx_buf->data);
5413 rx_buf->data = NULL;
5418 /* This function allocates all memory needed per Rx queue */
5420 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5422 int i, rc, num_allocated;
5424 struct ecore_dev *cdev;
5429 rxq->num_rx_buffers = RX_RING_SIZE;
5431 rxq->rx_buf_size = ha->rx_buf_size;
5433 /* Allocate the parallel driver ring for Rx buffers */
5434 bzero((void *)&rxq->sw_rx_ring[0],
5435 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5437 /* Allocate FW Rx ring */
5439 rc = ecore_chain_alloc(cdev,
5440 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5441 ECORE_CHAIN_MODE_NEXT_PTR,
5442 ECORE_CHAIN_CNT_TYPE_U16,
5444 sizeof(struct eth_rx_bd),
5445 &rxq->rx_bd_ring, NULL);
5450 /* Allocate FW completion ring */
5451 rc = ecore_chain_alloc(cdev,
5452 ECORE_CHAIN_USE_TO_CONSUME,
5453 ECORE_CHAIN_MODE_PBL,
5454 ECORE_CHAIN_CNT_TYPE_U16,
5456 sizeof(union eth_rx_cqe),
5457 &rxq->rx_comp_ring, NULL);
5462 /* Allocate buffers for the Rx ring */
5464 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5465 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5472 for (i = 0; i < rxq->num_rx_buffers; i++) {
5473 rc = qlnx_alloc_rx_buffer(ha, rxq);
5478 if (!num_allocated) {
5479 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5481 } else if (num_allocated < rxq->num_rx_buffers) {
5482 QL_DPRINT1(ha, "Allocated less buffers than"
5483 " desired (%d allocated)\n", num_allocated);
5486 #ifdef QLNX_SOFT_LRO
5489 struct lro_ctrl *lro;
5493 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5494 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5495 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5500 if (tcp_lro_init(lro)) {
5501 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5505 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5509 #endif /* #ifdef QLNX_SOFT_LRO */
5513 qlnx_free_mem_rxq(ha, rxq);
5519 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5520 struct qlnx_tx_queue *txq)
5522 struct ecore_dev *cdev;
5526 bzero((void *)&txq->sw_tx_ring[0],
5527 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5529 /* Free the real RQ ring used by FW */
5530 if (txq->tx_pbl.p_virt_addr) {
5531 ecore_chain_free(cdev, &txq->tx_pbl);
5532 txq->tx_pbl.p_virt_addr = NULL;
5537 /* This function allocates all memory needed per Tx queue */
5539 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5540 struct qlnx_tx_queue *txq)
5542 int ret = ECORE_SUCCESS;
5543 union eth_tx_bd_types *p_virt;
5544 struct ecore_dev *cdev;
5548 bzero((void *)&txq->sw_tx_ring[0],
5549 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5551 /* Allocate the real Tx ring to be used by FW */
5552 ret = ecore_chain_alloc(cdev,
5553 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5554 ECORE_CHAIN_MODE_PBL,
5555 ECORE_CHAIN_CNT_TYPE_U16,
5558 &txq->tx_pbl, NULL);
5560 if (ret != ECORE_SUCCESS) {
5564 txq->num_tx_buffers = TX_RING_SIZE;
5569 qlnx_free_mem_txq(ha, fp, txq);
5574 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5577 struct ifnet *ifp = ha->ifp;
5579 if (mtx_initialized(&fp->tx_mtx)) {
5581 if (fp->tx_br != NULL) {
5583 mtx_lock(&fp->tx_mtx);
5585 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
5586 fp->tx_pkts_freed++;
5590 mtx_unlock(&fp->tx_mtx);
5592 buf_ring_free(fp->tx_br, M_DEVBUF);
5595 mtx_destroy(&fp->tx_mtx);
5601 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5605 qlnx_free_mem_sb(ha, fp->sb_info);
5607 qlnx_free_mem_rxq(ha, fp->rxq);
5609 for (tc = 0; tc < ha->num_tc; tc++)
5610 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
5616 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5618 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5619 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5621 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5623 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5624 M_NOWAIT, &fp->tx_mtx);
5625 if (fp->tx_br == NULL) {
5626 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
5627 ha->dev_unit, fp->rss_id);
5634 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5638 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
5642 if (ha->rx_jumbo_buf_eq_mtu) {
5643 if (ha->max_frame_size <= MCLBYTES)
5644 ha->rx_buf_size = MCLBYTES;
5645 else if (ha->max_frame_size <= MJUMPAGESIZE)
5646 ha->rx_buf_size = MJUMPAGESIZE;
5647 else if (ha->max_frame_size <= MJUM9BYTES)
5648 ha->rx_buf_size = MJUM9BYTES;
5649 else if (ha->max_frame_size <= MJUM16BYTES)
5650 ha->rx_buf_size = MJUM16BYTES;
5652 if (ha->max_frame_size <= MCLBYTES)
5653 ha->rx_buf_size = MCLBYTES;
5655 ha->rx_buf_size = MJUMPAGESIZE;
5658 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
5662 for (tc = 0; tc < ha->num_tc; tc++) {
5663 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
5671 qlnx_free_mem_fp(ha, fp);
5676 qlnx_free_mem_load(qlnx_host_t *ha)
5679 struct ecore_dev *cdev;
5683 for (i = 0; i < ha->num_rss; i++) {
5684 struct qlnx_fastpath *fp = &ha->fp_array[i];
5686 qlnx_free_mem_fp(ha, fp);
5692 qlnx_alloc_mem_load(qlnx_host_t *ha)
5696 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5697 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5699 rc = qlnx_alloc_mem_fp(ha, fp);
5707 qlnx_start_vport(struct ecore_dev *cdev,
5711 u8 inner_vlan_removal_en_flg,
5716 struct ecore_sp_vport_start_params vport_start_params = { 0 };
5719 ha = (qlnx_host_t *)cdev;
5721 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
5722 vport_start_params.tx_switching = 0;
5723 vport_start_params.handle_ptp_pkts = 0;
5724 vport_start_params.only_untagged = 0;
5725 vport_start_params.drop_ttl0 = drop_ttl0_flg;
5727 vport_start_params.tpa_mode =
5728 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5729 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5731 vport_start_params.vport_id = vport_id;
5732 vport_start_params.mtu = mtu;
5735 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
5737 for_each_hwfn(cdev, i) {
5738 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5740 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5741 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5743 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5746 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
5747 " with MTU %d\n" , vport_id, mtu);
5751 ecore_hw_start_fastpath(p_hwfn);
5753 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
5761 qlnx_update_vport(struct ecore_dev *cdev,
5762 struct qlnx_update_vport_params *params)
5764 struct ecore_sp_vport_update_params sp_params;
5765 int rc, i, j, fp_index;
5766 struct ecore_hwfn *p_hwfn;
5767 struct ecore_rss_params *rss;
5768 qlnx_host_t *ha = (qlnx_host_t *)cdev;
5769 struct qlnx_fastpath *fp;
5771 memset(&sp_params, 0, sizeof(sp_params));
5772 /* Translate protocol params into sp params */
5773 sp_params.vport_id = params->vport_id;
5775 sp_params.update_vport_active_rx_flg =
5776 params->update_vport_active_rx_flg;
5777 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
5779 sp_params.update_vport_active_tx_flg =
5780 params->update_vport_active_tx_flg;
5781 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
5783 sp_params.update_inner_vlan_removal_flg =
5784 params->update_inner_vlan_removal_flg;
5785 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5787 sp_params.sge_tpa_params = params->sge_tpa_params;
5789 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5790 * We need to re-fix the rss values per engine for CMT.
5792 if (params->rss_params->update_rss_config)
5793 sp_params.rss_params = params->rss_params;
5795 sp_params.rss_params = NULL;
5797 for_each_hwfn(cdev, i) {
5799 p_hwfn = &cdev->hwfns[i];
5801 if ((cdev->num_hwfns > 1) &&
5802 params->rss_params->update_rss_config &&
5803 params->rss_params->rss_enable) {
5805 rss = params->rss_params;
5807 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
5809 fp_index = ((cdev->num_hwfns * j) + i) %
5812 fp = &ha->fp_array[fp_index];
5813 rss->rss_ind_table[j] = fp->rxq->handle;
5816 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5817 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
5818 rss->rss_ind_table[j],
5819 rss->rss_ind_table[j+1],
5820 rss->rss_ind_table[j+2],
5821 rss->rss_ind_table[j+3],
5822 rss->rss_ind_table[j+4],
5823 rss->rss_ind_table[j+5],
5824 rss->rss_ind_table[j+6],
5825 rss->rss_ind_table[j+7]);
5830 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5832 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
5834 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5835 ECORE_SPQ_MODE_EBLOCK, NULL);
5837 QL_DPRINT1(ha, "Failed to update VPORT\n");
5841 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
5842 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5843 params->vport_id, params->vport_active_tx_flg,
5844 params->vport_active_rx_flg,
5845 params->update_vport_active_tx_flg,
5846 params->update_vport_active_rx_flg);
5853 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5855 struct eth_rx_bd *rx_bd_cons =
5856 ecore_chain_consume(&rxq->rx_bd_ring);
5857 struct eth_rx_bd *rx_bd_prod =
5858 ecore_chain_produce(&rxq->rx_bd_ring);
5859 struct sw_rx_data *sw_rx_data_cons =
5860 &rxq->sw_rx_ring[rxq->sw_rx_cons];
5861 struct sw_rx_data *sw_rx_data_prod =
5862 &rxq->sw_rx_ring[rxq->sw_rx_prod];
5864 sw_rx_data_prod->data = sw_rx_data_cons->data;
5865 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
5867 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
5868 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5874 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
5880 struct eth_rx_prod_data rx_prod_data;
5884 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
5885 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
5887 /* Update producers */
5888 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
5889 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
5891 /* Make sure that the BD and SGE data is updated before updating the
5892 * producers since FW might read the BD/SGE right after the producer
5897 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
5898 sizeof(rx_prods), &rx_prods.data32);
5900 /* mmiowb is needed to synchronize doorbell writes from more than one
5901 * processor. It guarantees that the write arrives to the device before
5902 * the napi lock is released and another qlnx_poll is called (possibly
5903 * on another CPU). Without this barrier, the next doorbell can bypass
5904 * this doorbell. This is applicable to IA64/Altix systems.
5911 static uint32_t qlnx_hash_key[] = {
5912 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
5913 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
5914 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
5915 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
5916 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
5917 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
5918 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
5919 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
5920 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
5921 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
5924 qlnx_start_queues(qlnx_host_t *ha)
5926 int rc, tc, i, vport_id = 0,
5927 drop_ttl0_flg = 1, vlan_removal_en = 1,
5928 tx_switching = 0, hw_lro_enable = 0;
5929 struct ecore_dev *cdev = &ha->cdev;
5930 struct ecore_rss_params *rss_params = &ha->rss_params;
5931 struct qlnx_update_vport_params vport_update_params;
5933 struct ecore_hwfn *p_hwfn;
5934 struct ecore_sge_tpa_params tpa_params;
5935 struct ecore_queue_start_common_params qparams;
5936 struct qlnx_fastpath *fp;
5940 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
5943 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
5944 " are no Rx queues\n");
5948 #ifndef QLNX_SOFT_LRO
5949 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
5950 #endif /* #ifndef QLNX_SOFT_LRO */
5952 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
5953 vlan_removal_en, tx_switching, hw_lro_enable);
5956 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
5960 QL_DPRINT2(ha, "Start vport ramrod passed, "
5961 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
5962 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
5965 struct ecore_rxq_start_ret_params rx_ret_params;
5966 struct ecore_txq_start_ret_params tx_ret_params;
5968 fp = &ha->fp_array[i];
5969 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
5971 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
5972 bzero(&rx_ret_params,
5973 sizeof (struct ecore_rxq_start_ret_params));
5975 qparams.queue_id = i ;
5976 qparams.vport_id = vport_id;
5977 qparams.stats_id = vport_id;
5978 qparams.p_sb = fp->sb_info;
5979 qparams.sb_idx = RX_PI;
5982 rc = ecore_eth_rx_queue_start(p_hwfn,
5983 p_hwfn->hw_info.opaque_fid,
5985 fp->rxq->rx_buf_size, /* bd_max_bytes */
5986 /* bd_chain_phys_addr */
5987 fp->rxq->rx_bd_ring.p_phys_addr,
5989 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
5991 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
5995 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
5999 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6000 fp->rxq->handle = rx_ret_params.p_handle;
6001 fp->rxq->hw_cons_ptr =
6002 &fp->sb_info->sb_virt->pi_array[RX_PI];
6004 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6006 for (tc = 0; tc < ha->num_tc; tc++) {
6007 struct qlnx_tx_queue *txq = fp->txq[tc];
6010 sizeof(struct ecore_queue_start_common_params));
6011 bzero(&tx_ret_params,
6012 sizeof (struct ecore_txq_start_ret_params));
6014 qparams.queue_id = txq->index / cdev->num_hwfns ;
6015 qparams.vport_id = vport_id;
6016 qparams.stats_id = vport_id;
6017 qparams.p_sb = fp->sb_info;
6018 qparams.sb_idx = TX_PI(tc);
6020 rc = ecore_eth_tx_queue_start(p_hwfn,
6021 p_hwfn->hw_info.opaque_fid,
6023 /* bd_chain_phys_addr */
6024 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6025 ecore_chain_get_page_cnt(&txq->tx_pbl),
6029 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6034 txq->doorbell_addr = tx_ret_params.p_doorbell;
6035 txq->handle = tx_ret_params.p_handle;
6038 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6039 SET_FIELD(txq->tx_db.data.params,
6040 ETH_DB_DATA_DEST, DB_DEST_XCM);
6041 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6043 SET_FIELD(txq->tx_db.data.params,
6044 ETH_DB_DATA_AGG_VAL_SEL,
6045 DQ_XCM_ETH_TX_BD_PROD_CMD);
6047 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6051 /* Fill struct with RSS params */
6052 if (ha->num_rss > 1) {
6054 rss_params->update_rss_config = 1;
6055 rss_params->rss_enable = 1;
6056 rss_params->update_rss_capabilities = 1;
6057 rss_params->update_rss_ind_table = 1;
6058 rss_params->update_rss_key = 1;
6059 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6060 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6061 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6063 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6064 fp = &ha->fp_array[(i % ha->num_rss)];
6065 rss_params->rss_ind_table[i] = fp->rxq->handle;
6068 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6069 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6072 memset(rss_params, 0, sizeof(*rss_params));
6076 /* Prepare and send the vport enable */
6077 memset(&vport_update_params, 0, sizeof(vport_update_params));
6078 vport_update_params.vport_id = vport_id;
6079 vport_update_params.update_vport_active_tx_flg = 1;
6080 vport_update_params.vport_active_tx_flg = 1;
6081 vport_update_params.update_vport_active_rx_flg = 1;
6082 vport_update_params.vport_active_rx_flg = 1;
6083 vport_update_params.rss_params = rss_params;
6084 vport_update_params.update_inner_vlan_removal_flg = 1;
6085 vport_update_params.inner_vlan_removal_flg = 1;
6087 if (hw_lro_enable) {
6088 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6090 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6092 tpa_params.update_tpa_en_flg = 1;
6093 tpa_params.tpa_ipv4_en_flg = 1;
6094 tpa_params.tpa_ipv6_en_flg = 1;
6096 tpa_params.update_tpa_param_flg = 1;
6097 tpa_params.tpa_pkt_split_flg = 0;
6098 tpa_params.tpa_hdr_data_split_flg = 0;
6099 tpa_params.tpa_gro_consistent_flg = 0;
6100 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6101 tpa_params.tpa_max_size = (uint16_t)(-1);
6102 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6103 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6105 vport_update_params.sge_tpa_params = &tpa_params;
6108 rc = qlnx_update_vport(cdev, &vport_update_params);
6110 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6118 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6119 struct qlnx_tx_queue *txq)
6121 uint16_t hw_bd_cons;
6122 uint16_t ecore_cons_idx;
6124 QL_DPRINT2(ha, "enter\n");
6126 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6128 while (hw_bd_cons !=
6129 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6131 mtx_lock(&fp->tx_mtx);
6133 (void)qlnx_tx_int(ha, fp, txq);
6135 mtx_unlock(&fp->tx_mtx);
6137 qlnx_mdelay(__func__, 2);
6139 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6142 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6148 qlnx_stop_queues(qlnx_host_t *ha)
6150 struct qlnx_update_vport_params vport_update_params;
6151 struct ecore_dev *cdev;
6152 struct qlnx_fastpath *fp;
6157 /* Disable the vport */
6159 memset(&vport_update_params, 0, sizeof(vport_update_params));
6161 vport_update_params.vport_id = 0;
6162 vport_update_params.update_vport_active_tx_flg = 1;
6163 vport_update_params.vport_active_tx_flg = 0;
6164 vport_update_params.update_vport_active_rx_flg = 1;
6165 vport_update_params.vport_active_rx_flg = 0;
6166 vport_update_params.rss_params = &ha->rss_params;
6167 vport_update_params.rss_params->update_rss_config = 0;
6168 vport_update_params.rss_params->rss_enable = 0;
6169 vport_update_params.update_inner_vlan_removal_flg = 0;
6170 vport_update_params.inner_vlan_removal_flg = 0;
6172 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6174 rc = qlnx_update_vport(cdev, &vport_update_params);
6176 QL_DPRINT1(ha, "Failed to update vport\n");
6180 /* Flush Tx queues. If needed, request drain from MCP */
6182 fp = &ha->fp_array[i];
6184 for (tc = 0; tc < ha->num_tc; tc++) {
6185 struct qlnx_tx_queue *txq = fp->txq[tc];
6187 rc = qlnx_drain_txq(ha, fp, txq);
6193 /* Stop all Queues in reverse order*/
6194 for (i = ha->num_rss - 1; i >= 0; i--) {
6196 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6198 fp = &ha->fp_array[i];
6200 /* Stop the Tx Queue(s)*/
6201 for (tc = 0; tc < ha->num_tc; tc++) {
6204 tx_queue_id = tc * ha->num_rss + i;
6205 rc = ecore_eth_tx_queue_stop(p_hwfn,
6206 fp->txq[tc]->handle);
6209 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6215 /* Stop the Rx Queue*/
6216 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6219 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6224 /* Stop the vport */
6225 for_each_hwfn(cdev, i) {
6227 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6229 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6232 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6241 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6242 enum ecore_filter_opcode opcode,
6243 unsigned char mac[ETH_ALEN])
6245 struct ecore_filter_ucast ucast;
6246 struct ecore_dev *cdev;
6251 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6253 ucast.opcode = opcode;
6254 ucast.type = ECORE_FILTER_MAC;
6255 ucast.is_rx_filter = 1;
6256 ucast.vport_to_add_to = 0;
6257 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6259 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6265 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6267 struct ecore_filter_ucast ucast;
6268 struct ecore_dev *cdev;
6271 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6273 ucast.opcode = ECORE_FILTER_REPLACE;
6274 ucast.type = ECORE_FILTER_MAC;
6275 ucast.is_rx_filter = 1;
6279 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6285 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6287 struct ecore_filter_mcast *mcast;
6288 struct ecore_dev *cdev;
6293 mcast = &ha->ecore_mcast;
6294 bzero(mcast, sizeof(struct ecore_filter_mcast));
6296 mcast->opcode = ECORE_FILTER_REMOVE;
6298 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6300 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6301 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6302 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6304 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6305 mcast->num_mc_addrs++;
6308 mcast = &ha->ecore_mcast;
6310 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6312 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6319 qlnx_clean_filters(qlnx_host_t *ha)
6323 /* Remove all unicast macs */
6324 rc = qlnx_remove_all_ucast_mac(ha);
6328 /* Remove all multicast macs */
6329 rc = qlnx_remove_all_mcast_mac(ha);
6333 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6339 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6341 struct ecore_filter_accept_flags accept;
6343 struct ecore_dev *cdev;
6347 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6349 accept.update_rx_mode_config = 1;
6350 accept.rx_accept_filter = filter;
6352 accept.update_tx_mode_config = 1;
6353 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6354 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6356 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6357 ECORE_SPQ_MODE_CB, NULL);
6363 qlnx_set_rx_mode(qlnx_host_t *ha)
6368 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6372 rc = qlnx_remove_all_mcast_mac(ha);
6376 filter = ECORE_ACCEPT_UCAST_MATCHED |
6377 ECORE_ACCEPT_MCAST_MATCHED |
6379 ha->filter = filter;
6381 rc = qlnx_set_rx_accept_filter(ha, filter);
6387 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6390 struct ecore_dev *cdev;
6391 struct ecore_hwfn *hwfn;
6392 struct ecore_ptt *ptt;
6396 for_each_hwfn(cdev, i) {
6398 hwfn = &cdev->hwfns[i];
6400 ptt = ecore_ptt_acquire(hwfn);
6404 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6406 ecore_ptt_release(hwfn, ptt);
6414 #if __FreeBSD_version >= 1100000
6416 qlnx_get_counter(if_t ifp, ift_counter cnt)
6421 ha = (qlnx_host_t *)if_getsoftc(ifp);
6425 case IFCOUNTER_IPACKETS:
6426 count = ha->hw_stats.common.rx_ucast_pkts +
6427 ha->hw_stats.common.rx_mcast_pkts +
6428 ha->hw_stats.common.rx_bcast_pkts;
6431 case IFCOUNTER_IERRORS:
6432 count = ha->hw_stats.common.rx_crc_errors +
6433 ha->hw_stats.common.rx_align_errors +
6434 ha->hw_stats.common.rx_oversize_packets +
6435 ha->hw_stats.common.rx_undersize_packets;
6438 case IFCOUNTER_OPACKETS:
6439 count = ha->hw_stats.common.tx_ucast_pkts +
6440 ha->hw_stats.common.tx_mcast_pkts +
6441 ha->hw_stats.common.tx_bcast_pkts;
6444 case IFCOUNTER_OERRORS:
6445 count = ha->hw_stats.common.tx_err_drop_pkts;
6448 case IFCOUNTER_COLLISIONS:
6451 case IFCOUNTER_IBYTES:
6452 count = ha->hw_stats.common.rx_ucast_bytes +
6453 ha->hw_stats.common.rx_mcast_bytes +
6454 ha->hw_stats.common.rx_bcast_bytes;
6457 case IFCOUNTER_OBYTES:
6458 count = ha->hw_stats.common.tx_ucast_bytes +
6459 ha->hw_stats.common.tx_mcast_bytes +
6460 ha->hw_stats.common.tx_bcast_bytes;
6463 case IFCOUNTER_IMCASTS:
6464 count = ha->hw_stats.common.rx_mcast_bytes;
6467 case IFCOUNTER_OMCASTS:
6468 count = ha->hw_stats.common.tx_mcast_bytes;
6471 case IFCOUNTER_IQDROPS:
6472 case IFCOUNTER_OQDROPS:
6473 case IFCOUNTER_NOPROTO:
6476 return (if_get_counter_default(ifp, cnt));
6484 qlnx_timer(void *arg)
6488 ha = (qlnx_host_t *)arg;
6490 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6492 if (ha->storm_stats_enable)
6493 qlnx_sample_storm_stats(ha);
6495 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6501 qlnx_load(qlnx_host_t *ha)
6505 struct ecore_dev *cdev;
6511 QL_DPRINT2(ha, "enter\n");
6513 rc = qlnx_alloc_mem_arrays(ha);
6515 goto qlnx_load_exit0;
6519 rc = qlnx_alloc_mem_load(ha);
6521 goto qlnx_load_exit1;
6523 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6524 ha->num_rss, ha->num_tc);
6526 for (i = 0; i < ha->num_rss; i++) {
6528 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6529 (INTR_TYPE_NET | INTR_MPSAFE),
6530 NULL, qlnx_fp_isr, &ha->irq_vec[i],
6531 &ha->irq_vec[i].handle))) {
6533 QL_DPRINT1(ha, "could not setup interrupt\n");
6534 goto qlnx_load_exit2;
6537 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6538 irq %p handle %p\n", i,
6539 ha->irq_vec[i].irq_rid,
6540 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6542 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6545 rc = qlnx_start_queues(ha);
6547 goto qlnx_load_exit2;
6549 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6551 /* Add primary mac and set Rx filters */
6552 rc = qlnx_set_rx_mode(ha);
6554 goto qlnx_load_exit2;
6556 /* Ask for link-up using current configuration */
6557 qlnx_set_link(ha, true);
6559 ha->state = QLNX_STATE_OPEN;
6561 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
6563 if (ha->flags.callout_init)
6564 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6566 goto qlnx_load_exit0;
6569 qlnx_free_mem_load(ha);
6575 QL_DPRINT2(ha, "exit [%d]\n", rc);
6580 qlnx_drain_soft_lro(qlnx_host_t *ha)
6582 #ifdef QLNX_SOFT_LRO
6590 if (ifp->if_capenable & IFCAP_LRO) {
6592 for (i = 0; i < ha->num_rss; i++) {
6594 struct qlnx_fastpath *fp = &ha->fp_array[i];
6595 struct lro_ctrl *lro;
6597 lro = &fp->rxq->lro;
6599 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6601 tcp_lro_flush_all(lro);
6604 struct lro_entry *queued;
6606 while ((!SLIST_EMPTY(&lro->lro_active))){
6607 queued = SLIST_FIRST(&lro->lro_active);
6608 SLIST_REMOVE_HEAD(&lro->lro_active, next);
6609 tcp_lro_flush(lro, queued);
6612 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6617 #endif /* #ifdef QLNX_SOFT_LRO */
6623 qlnx_unload(qlnx_host_t *ha)
6625 struct ecore_dev *cdev;
6632 QL_DPRINT2(ha, "enter\n");
6633 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
6635 if (ha->state == QLNX_STATE_OPEN) {
6637 qlnx_set_link(ha, false);
6638 qlnx_clean_filters(ha);
6639 qlnx_stop_queues(ha);
6640 ecore_hw_stop_fastpath(cdev);
6642 for (i = 0; i < ha->num_rss; i++) {
6643 if (ha->irq_vec[i].handle) {
6644 (void)bus_teardown_intr(dev,
6646 ha->irq_vec[i].handle);
6647 ha->irq_vec[i].handle = NULL;
6651 qlnx_drain_fp_taskqueues(ha);
6652 qlnx_drain_soft_lro(ha);
6653 qlnx_free_mem_load(ha);
6656 if (ha->flags.callout_init)
6657 callout_drain(&ha->qlnx_callout);
6659 qlnx_mdelay(__func__, 1000);
6661 ha->state = QLNX_STATE_CLOSED;
6663 QL_DPRINT2(ha, "exit\n");
6668 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6671 struct ecore_hwfn *p_hwfn;
6672 struct ecore_ptt *p_ptt;
6674 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6676 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6677 p_ptt = ecore_ptt_acquire(p_hwfn);
6680 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6684 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6686 if (rval == DBG_STATUS_OK)
6689 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
6693 ecore_ptt_release(p_hwfn, p_ptt);
6699 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6702 struct ecore_hwfn *p_hwfn;
6703 struct ecore_ptt *p_ptt;
6705 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6707 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6708 p_ptt = ecore_ptt_acquire(p_hwfn);
6711 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6715 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6717 if (rval == DBG_STATUS_OK)
6720 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
6724 ecore_ptt_release(p_hwfn, p_ptt);
6731 qlnx_sample_storm_stats(qlnx_host_t *ha)
6734 struct ecore_dev *cdev;
6735 qlnx_storm_stats_t *s_stats;
6737 struct ecore_ptt *p_ptt;
6738 struct ecore_hwfn *hwfn;
6740 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
6741 ha->storm_stats_enable = 0;
6747 for_each_hwfn(cdev, i) {
6749 hwfn = &cdev->hwfns[i];
6751 p_ptt = ecore_ptt_acquire(hwfn);
6755 index = ha->storm_stats_index +
6756 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
6758 s_stats = &ha->storm_stats[index];
6761 reg = XSEM_REG_FAST_MEMORY +
6762 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6763 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6765 reg = XSEM_REG_FAST_MEMORY +
6766 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6767 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6769 reg = XSEM_REG_FAST_MEMORY +
6770 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6771 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6773 reg = XSEM_REG_FAST_MEMORY +
6774 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6775 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6778 reg = YSEM_REG_FAST_MEMORY +
6779 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6780 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6782 reg = YSEM_REG_FAST_MEMORY +
6783 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6784 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6786 reg = YSEM_REG_FAST_MEMORY +
6787 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6788 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6790 reg = YSEM_REG_FAST_MEMORY +
6791 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6792 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6795 reg = PSEM_REG_FAST_MEMORY +
6796 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6797 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6799 reg = PSEM_REG_FAST_MEMORY +
6800 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6801 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6803 reg = PSEM_REG_FAST_MEMORY +
6804 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6805 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6807 reg = PSEM_REG_FAST_MEMORY +
6808 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6809 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6812 reg = TSEM_REG_FAST_MEMORY +
6813 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6814 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6816 reg = TSEM_REG_FAST_MEMORY +
6817 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6818 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6820 reg = TSEM_REG_FAST_MEMORY +
6821 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6822 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6824 reg = TSEM_REG_FAST_MEMORY +
6825 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6826 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6829 reg = MSEM_REG_FAST_MEMORY +
6830 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6831 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6833 reg = MSEM_REG_FAST_MEMORY +
6834 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6835 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6837 reg = MSEM_REG_FAST_MEMORY +
6838 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6839 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6841 reg = MSEM_REG_FAST_MEMORY +
6842 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6843 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6846 reg = USEM_REG_FAST_MEMORY +
6847 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6848 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
6850 reg = USEM_REG_FAST_MEMORY +
6851 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6852 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
6854 reg = USEM_REG_FAST_MEMORY +
6855 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6856 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
6858 reg = USEM_REG_FAST_MEMORY +
6859 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6860 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
6862 ecore_ptt_release(hwfn, p_ptt);
6865 ha->storm_stats_index++;
6871 * Name: qlnx_dump_buf8
6872 * Function: dumps a buffer as bytes
6875 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
6884 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
6887 device_printf(dev,"0x%08x:"
6888 " %02x %02x %02x %02x %02x %02x %02x %02x"
6889 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6890 buf[0], buf[1], buf[2], buf[3],
6891 buf[4], buf[5], buf[6], buf[7],
6892 buf[8], buf[9], buf[10], buf[11],
6893 buf[12], buf[13], buf[14], buf[15]);
6900 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
6903 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
6906 device_printf(dev,"0x%08x: %02x %02x %02x\n",
6907 i, buf[0], buf[1], buf[2]);
6910 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
6911 buf[0], buf[1], buf[2], buf[3]);
6914 device_printf(dev,"0x%08x:"
6915 " %02x %02x %02x %02x %02x\n", i,
6916 buf[0], buf[1], buf[2], buf[3], buf[4]);
6919 device_printf(dev,"0x%08x:"
6920 " %02x %02x %02x %02x %02x %02x\n", i,
6921 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
6924 device_printf(dev,"0x%08x:"
6925 " %02x %02x %02x %02x %02x %02x %02x\n", i,
6926 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
6929 device_printf(dev,"0x%08x:"
6930 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6931 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6935 device_printf(dev,"0x%08x:"
6936 " %02x %02x %02x %02x %02x %02x %02x %02x"
6938 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6942 device_printf(dev,"0x%08x:"
6943 " %02x %02x %02x %02x %02x %02x %02x %02x"
6945 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6946 buf[7], buf[8], buf[9]);
6949 device_printf(dev,"0x%08x:"
6950 " %02x %02x %02x %02x %02x %02x %02x %02x"
6951 " %02x %02x %02x\n", i,
6952 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6953 buf[7], buf[8], buf[9], buf[10]);
6956 device_printf(dev,"0x%08x:"
6957 " %02x %02x %02x %02x %02x %02x %02x %02x"
6958 " %02x %02x %02x %02x\n", i,
6959 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6960 buf[7], buf[8], buf[9], buf[10], buf[11]);
6963 device_printf(dev,"0x%08x:"
6964 " %02x %02x %02x %02x %02x %02x %02x %02x"
6965 " %02x %02x %02x %02x %02x\n", i,
6966 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6967 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
6970 device_printf(dev,"0x%08x:"
6971 " %02x %02x %02x %02x %02x %02x %02x %02x"
6972 " %02x %02x %02x %02x %02x %02x\n", i,
6973 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6974 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
6978 device_printf(dev,"0x%08x:"
6979 " %02x %02x %02x %02x %02x %02x %02x %02x"
6980 " %02x %02x %02x %02x %02x %02x %02x\n", i,
6981 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6982 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
6989 device_printf(dev, "%s: %s dump end\n", __func__, msg);