2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
62 #include "qlnx_ioctl.h"
72 * ioctl related functions
74 static void qlnx_add_sysctls(qlnx_host_t *ha);
79 static void qlnx_release(qlnx_host_t *ha);
80 static void qlnx_fp_isr(void *arg);
81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82 static void qlnx_init(void *arg);
83 static void qlnx_init_locked(qlnx_host_t *ha);
84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85 static int qlnx_set_promisc(qlnx_host_t *ha);
86 static int qlnx_set_allmulti(qlnx_host_t *ha);
87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qlnx_media_change(struct ifnet *ifp);
89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 static void qlnx_stop(qlnx_host_t *ha);
91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92 struct mbuf **m_headp);
93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95 struct qlnx_link_output *if_link);
96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
97 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
99 static void qlnx_qflush(struct ifnet *ifp);
101 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
102 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
103 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
104 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
105 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
106 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
108 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
109 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
111 static int qlnx_nic_setup(struct ecore_dev *cdev,
112 struct ecore_pf_params *func_params);
113 static int qlnx_nic_start(struct ecore_dev *cdev);
114 static int qlnx_slowpath_start(qlnx_host_t *ha);
115 static int qlnx_slowpath_stop(qlnx_host_t *ha);
116 static int qlnx_init_hw(qlnx_host_t *ha);
117 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
118 char ver_str[VER_SIZE]);
119 static void qlnx_unload(qlnx_host_t *ha);
120 static int qlnx_load(qlnx_host_t *ha);
121 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
123 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
125 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
126 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
127 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
128 struct qlnx_rx_queue *rxq);
129 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
130 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
132 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
134 static void qlnx_timer(void *arg);
135 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
136 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
137 static void qlnx_trigger_dump(qlnx_host_t *ha);
138 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
139 struct qlnx_tx_queue *txq);
140 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
141 struct qlnx_tx_queue *txq);
142 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
144 static void qlnx_fp_taskqueue(void *context, int pending);
145 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
146 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
147 struct qlnx_agg_info *tpa);
148 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
150 #if __FreeBSD_version >= 1100000
151 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
156 * Hooks to the Operating Systems
158 static int qlnx_pci_probe (device_t);
159 static int qlnx_pci_attach (device_t);
160 static int qlnx_pci_detach (device_t);
162 static device_method_t qlnx_pci_methods[] = {
163 /* Device interface */
164 DEVMETHOD(device_probe, qlnx_pci_probe),
165 DEVMETHOD(device_attach, qlnx_pci_attach),
166 DEVMETHOD(device_detach, qlnx_pci_detach),
170 static driver_t qlnx_pci_driver = {
171 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
174 static devclass_t qlnx_devclass;
176 MODULE_VERSION(if_qlnxe,1);
177 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
179 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
180 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
182 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
185 char qlnx_dev_str[64];
186 char qlnx_ver_str[VER_SIZE];
187 char qlnx_name_str[NAME_SIZE];
190 * Some PCI Configuration Space Related Defines
193 #ifndef PCI_VENDOR_QLOGIC
194 #define PCI_VENDOR_QLOGIC 0x1077
197 /* 40G Adapter QLE45xxx*/
198 #ifndef QLOGIC_PCI_DEVICE_ID_1634
199 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
202 /* 100G Adapter QLE45xxx*/
203 #ifndef QLOGIC_PCI_DEVICE_ID_1644
204 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
207 /* 25G Adapter QLE45xxx*/
208 #ifndef QLOGIC_PCI_DEVICE_ID_1656
209 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
212 /* 50G Adapter QLE45xxx*/
213 #ifndef QLOGIC_PCI_DEVICE_ID_1654
214 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
217 /* 10G/25G/40G Adapter QLE41xxx*/
218 #ifndef QLOGIC_PCI_DEVICE_ID_8070
219 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
222 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters");
223 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
224 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
225 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
226 &qlnxe_queue_count, 0, "Multi-Queue queue count");
229 qlnx_valid_device(device_t dev)
233 device_id = pci_get_device(dev);
235 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
236 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
237 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
238 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
239 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
246 * Name: qlnx_pci_probe
247 * Function: Validate the PCI device to be a QLA80XX device
250 qlnx_pci_probe(device_t dev)
252 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
253 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
254 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
256 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
260 switch (pci_get_device(dev)) {
262 case QLOGIC_PCI_DEVICE_ID_1644:
263 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
264 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
265 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
267 device_set_desc_copy(dev, qlnx_dev_str);
271 case QLOGIC_PCI_DEVICE_ID_1634:
272 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
273 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
274 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
276 device_set_desc_copy(dev, qlnx_dev_str);
280 case QLOGIC_PCI_DEVICE_ID_1656:
281 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
282 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
283 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
285 device_set_desc_copy(dev, qlnx_dev_str);
289 case QLOGIC_PCI_DEVICE_ID_1654:
290 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
291 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
292 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
294 device_set_desc_copy(dev, qlnx_dev_str);
298 case QLOGIC_PCI_DEVICE_ID_8070:
299 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
300 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) "
301 "Adapter-Ethernet Function",
302 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
304 device_set_desc_copy(dev, qlnx_dev_str);
312 return (BUS_PROBE_DEFAULT);
316 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
317 struct qlnx_tx_queue *txq)
323 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
325 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
326 if (hw_bd_cons < ecore_cons_idx) {
327 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
329 diff = hw_bd_cons - ecore_cons_idx;
336 qlnx_sp_intr(void *arg)
338 struct ecore_hwfn *p_hwfn;
344 if (p_hwfn == NULL) {
345 printf("%s: spurious slowpath intr\n", __func__);
349 ha = (qlnx_host_t *)p_hwfn->p_dev;
351 QL_DPRINT2(ha, "enter\n");
353 for (i = 0; i < ha->cdev.num_hwfns; i++) {
354 if (&ha->cdev.hwfns[i] == p_hwfn) {
355 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
359 QL_DPRINT2(ha, "exit\n");
365 qlnx_sp_taskqueue(void *context, int pending)
367 struct ecore_hwfn *p_hwfn;
371 if (p_hwfn != NULL) {
378 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
383 for (i = 0; i < ha->cdev.num_hwfns; i++) {
385 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
387 bzero(tq_name, sizeof (tq_name));
388 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
390 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
392 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
393 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
395 if (ha->sp_taskqueue[i] == NULL)
398 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
401 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
408 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
412 for (i = 0; i < ha->cdev.num_hwfns; i++) {
413 if (ha->sp_taskqueue[i] != NULL) {
414 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
415 taskqueue_free(ha->sp_taskqueue[i]);
422 qlnx_fp_taskqueue(void *context, int pending)
424 struct qlnx_fastpath *fp;
428 #ifdef QLNX_RCV_IN_TASKQ
430 int rx_int = 0, total_rx_count = 0;
431 struct thread *cthread;
432 #endif /* #ifdef QLNX_RCV_IN_TASKQ */
439 ha = (qlnx_host_t *)fp->edev;
443 #ifdef QLNX_RCV_IN_TASKQ
447 thread_lock(cthread);
449 if (!sched_is_bound(cthread))
450 sched_bind(cthread, fp->rss_id);
452 thread_unlock(cthread);
454 lro_enable = ifp->if_capenable & IFCAP_LRO;
456 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
459 fp->rx_pkts += rx_int;
460 total_rx_count += rx_int;
465 struct lro_ctrl *lro;
469 if (lro_enable && total_rx_count) {
471 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
473 if (ha->dbg_trace_lro_cnt) {
474 if (lro->lro_mbuf_count & ~1023)
476 else if (lro->lro_mbuf_count & ~511)
478 else if (lro->lro_mbuf_count & ~255)
480 else if (lro->lro_mbuf_count & ~127)
482 else if (lro->lro_mbuf_count & ~63)
485 tcp_lro_flush_all(lro);
488 struct lro_entry *queued;
490 while ((!SLIST_EMPTY(&lro->lro_active))) {
491 queued = SLIST_FIRST(&lro->lro_active);
492 SLIST_REMOVE_HEAD(&lro->lro_active, next);
493 tcp_lro_flush(lro, queued);
495 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
498 #endif /* #ifdef QLNX_SOFT_LRO */
500 ecore_sb_update_sb_idx(fp->sb_info);
503 #endif /* #ifdef QLNX_RCV_IN_TASKQ */
505 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
507 if (!drbr_empty(ifp, fp->tx_br)) {
509 if(mtx_trylock(&fp->tx_mtx)) {
511 #ifdef QLNX_TRACE_PERF_DATA
512 tx_pkts = fp->tx_pkts_transmitted;
513 tx_compl = fp->tx_pkts_completed;
516 qlnx_transmit_locked(ifp, fp, NULL);
518 #ifdef QLNX_TRACE_PERF_DATA
519 fp->tx_pkts_trans_fp +=
520 (fp->tx_pkts_transmitted - tx_pkts);
521 fp->tx_pkts_compl_fp +=
522 (fp->tx_pkts_completed - tx_compl);
524 mtx_unlock(&fp->tx_mtx);
529 #ifdef QLNX_RCV_IN_TASKQ
531 if (fp->fp_taskqueue != NULL)
532 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
534 if (fp->tx_ring_full) {
535 qlnx_mdelay(__func__, 100);
537 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
539 #endif /* #ifdef QLNX_RCV_IN_TASKQ */
541 QL_DPRINT2(ha, "exit \n");
546 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
550 struct qlnx_fastpath *fp;
552 for (i = 0; i < ha->num_rss; i++) {
554 fp = &ha->fp_array[i];
556 bzero(tq_name, sizeof (tq_name));
557 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
559 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
561 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
562 taskqueue_thread_enqueue,
565 if (fp->fp_taskqueue == NULL)
568 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
571 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
578 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
581 struct qlnx_fastpath *fp;
583 for (i = 0; i < ha->num_rss; i++) {
585 fp = &ha->fp_array[i];
587 if (fp->fp_taskqueue != NULL) {
589 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
590 taskqueue_free(fp->fp_taskqueue);
591 fp->fp_taskqueue = NULL;
598 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
601 struct qlnx_fastpath *fp;
603 for (i = 0; i < ha->num_rss; i++) {
604 fp = &ha->fp_array[i];
606 if (fp->fp_taskqueue != NULL) {
608 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
616 qlnx_get_params(qlnx_host_t *ha)
618 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
619 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
621 qlnxe_queue_count = 0;
627 * Name: qlnx_pci_attach
628 * Function: attaches the device to the operating system
631 qlnx_pci_attach(device_t dev)
633 qlnx_host_t *ha = NULL;
634 uint32_t rsrc_len_reg = 0;
635 uint32_t rsrc_len_dbells = 0;
636 uint32_t rsrc_len_msix = 0;
640 if ((ha = device_get_softc(dev)) == NULL) {
641 device_printf(dev, "cannot get softc\n");
645 memset(ha, 0, sizeof (qlnx_host_t));
647 if (qlnx_valid_device(dev) != 0) {
648 device_printf(dev, "device is not valid device\n");
651 ha->pci_func = pci_get_function(dev);
655 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
657 ha->flags.lock_init = 1;
659 pci_enable_busmaster(dev);
665 ha->reg_rid = PCIR_BAR(0);
666 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
669 if (ha->pci_reg == NULL) {
670 device_printf(dev, "unable to map BAR0\n");
671 goto qlnx_pci_attach_err;
674 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
677 ha->dbells_rid = PCIR_BAR(2);
678 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
679 &ha->dbells_rid, RF_ACTIVE);
681 if (ha->pci_dbells == NULL) {
682 device_printf(dev, "unable to map BAR1\n");
683 goto qlnx_pci_attach_err;
686 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
689 ha->dbells_phys_addr = (uint64_t)
690 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
691 ha->dbells_size = rsrc_len_dbells;
693 ha->msix_rid = PCIR_BAR(4);
694 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
695 &ha->msix_rid, RF_ACTIVE);
697 if (ha->msix_bar == NULL) {
698 device_printf(dev, "unable to map BAR2\n");
699 goto qlnx_pci_attach_err;
702 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
708 if (qlnx_alloc_parent_dma_tag(ha))
709 goto qlnx_pci_attach_err;
711 if (qlnx_alloc_tx_dma_tag(ha))
712 goto qlnx_pci_attach_err;
714 if (qlnx_alloc_rx_dma_tag(ha))
715 goto qlnx_pci_attach_err;
718 if (qlnx_init_hw(ha) != 0)
719 goto qlnx_pci_attach_err;
723 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
724 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
725 qlnxe_queue_count = QLNX_MAX_RSS;
729 * Allocate MSI-x vectors
731 if(qlnxe_queue_count == 0)
732 ha->num_rss = QLNX_DEFAULT_RSS;
734 ha->num_rss = qlnxe_queue_count;
736 ha->num_tc = QLNX_MAX_TC;
738 ha->msix_count = pci_msix_count(dev);
740 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
741 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
743 if (!ha->msix_count ||
744 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
745 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
747 goto qlnx_pci_attach_err;
750 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
751 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
753 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
755 QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
756 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
757 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
758 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
759 ha->pci_reg, rsrc_len_reg,
760 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
761 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
762 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
763 if (pci_alloc_msix(dev, &ha->msix_count)) {
764 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
767 goto qlnx_pci_attach_err;
771 * Initialize slow path interrupt and task queue
773 if (qlnx_create_sp_taskqueues(ha) != 0)
774 goto qlnx_pci_attach_err;
776 for (i = 0; i < ha->cdev.num_hwfns; i++) {
778 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
780 ha->sp_irq_rid[i] = i + 1;
781 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
783 (RF_ACTIVE | RF_SHAREABLE));
784 if (ha->sp_irq[i] == NULL) {
786 "could not allocate mbx interrupt\n");
787 goto qlnx_pci_attach_err;
790 if (bus_setup_intr(dev, ha->sp_irq[i],
791 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
792 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
794 "could not setup slow path interrupt\n");
795 goto qlnx_pci_attach_err;
798 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
799 " sp_irq %p sp_handle %p\n", p_hwfn,
800 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
805 * initialize fast path interrupt
807 if (qlnx_create_fp_taskqueues(ha) != 0)
808 goto qlnx_pci_attach_err;
810 for (i = 0; i < ha->num_rss; i++) {
811 ha->irq_vec[i].rss_idx = i;
812 ha->irq_vec[i].ha = ha;
813 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
815 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
816 &ha->irq_vec[i].irq_rid,
817 (RF_ACTIVE | RF_SHAREABLE));
819 if (ha->irq_vec[i].irq == NULL) {
821 "could not allocate interrupt[%d]\n", i);
822 goto qlnx_pci_attach_err;
825 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
826 device_printf(dev, "could not allocate tx_br[%d]\n", i);
827 goto qlnx_pci_attach_err;
832 callout_init(&ha->qlnx_callout, 1);
833 ha->flags.callout_init = 1;
835 for (i = 0; i < ha->cdev.num_hwfns; i++) {
837 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
838 goto qlnx_pci_attach_err;
839 if (ha->grcdump_size[i] == 0)
840 goto qlnx_pci_attach_err;
842 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
843 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
844 i, ha->grcdump_size[i]);
846 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
847 if (ha->grcdump[i] == NULL) {
848 device_printf(dev, "grcdump alloc[%d] failed\n", i);
849 goto qlnx_pci_attach_err;
852 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
853 goto qlnx_pci_attach_err;
854 if (ha->idle_chk_size[i] == 0)
855 goto qlnx_pci_attach_err;
857 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
858 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
859 i, ha->idle_chk_size[i]);
861 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
863 if (ha->idle_chk[i] == NULL) {
864 device_printf(dev, "idle_chk alloc failed\n");
865 goto qlnx_pci_attach_err;
869 if (qlnx_slowpath_start(ha) != 0) {
871 qlnx_mdelay(__func__, 1000);
872 qlnx_trigger_dump(ha);
874 goto qlnx_pci_attach_err0;
876 ha->flags.slowpath_start = 1;
878 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
879 qlnx_mdelay(__func__, 1000);
880 qlnx_trigger_dump(ha);
882 goto qlnx_pci_attach_err0;
885 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
886 qlnx_mdelay(__func__, 1000);
887 qlnx_trigger_dump(ha);
889 goto qlnx_pci_attach_err0;
891 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
892 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
893 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
894 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
895 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
896 FW_ENGINEERING_VERSION);
898 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
899 ha->stormfw_ver, ha->mfw_ver);
901 qlnx_init_ifnet(dev, ha);
906 qlnx_add_sysctls(ha);
908 qlnx_pci_attach_err0:
910 * create ioctl device interface
912 if (qlnx_make_cdev(ha)) {
913 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
914 goto qlnx_pci_attach_err;
917 QL_DPRINT2(ha, "success\n");
929 * Name: qlnx_pci_detach
930 * Function: Unhooks the device from the operating system
933 qlnx_pci_detach(device_t dev)
935 qlnx_host_t *ha = NULL;
937 if ((ha = device_get_softc(dev)) == NULL) {
938 device_printf(dev, "cannot get softc\n");
952 qlnx_init_hw(qlnx_host_t *ha)
955 struct ecore_hw_prepare_params params;
957 ecore_init_struct(&ha->cdev);
959 /* ha->dp_module = ECORE_MSG_PROBE |
965 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
966 ha->dp_level = ECORE_LEVEL_NOTICE;
968 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
970 ha->cdev.regview = ha->pci_reg;
971 ha->cdev.doorbells = ha->pci_dbells;
972 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
973 ha->cdev.db_size = ha->dbells_size;
975 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
977 ha->personality = ECORE_PCI_DEFAULT;
979 params.personality = ha->personality;
981 params.drv_resc_alloc = false;
982 params.chk_reg_fifo = false;
983 params.initiate_pf_flr = true;
986 ecore_hw_prepare(&ha->cdev, ¶ms);
988 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
994 qlnx_release(qlnx_host_t *ha)
1001 QL_DPRINT2(ha, "enter\n");
1003 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1004 if (ha->idle_chk[i] != NULL) {
1005 free(ha->idle_chk[i], M_QLNXBUF);
1006 ha->idle_chk[i] = NULL;
1009 if (ha->grcdump[i] != NULL) {
1010 free(ha->grcdump[i], M_QLNXBUF);
1011 ha->grcdump[i] = NULL;
1015 if (ha->flags.callout_init)
1016 callout_drain(&ha->qlnx_callout);
1018 if (ha->flags.slowpath_start) {
1019 qlnx_slowpath_stop(ha);
1022 ecore_hw_remove(&ha->cdev);
1026 if (ha->ifp != NULL)
1027 ether_ifdetach(ha->ifp);
1029 qlnx_free_tx_dma_tag(ha);
1031 qlnx_free_rx_dma_tag(ha);
1033 qlnx_free_parent_dma_tag(ha);
1035 for (i = 0; i < ha->num_rss; i++) {
1036 struct qlnx_fastpath *fp = &ha->fp_array[i];
1038 if (ha->irq_vec[i].handle) {
1039 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1040 ha->irq_vec[i].handle);
1043 if (ha->irq_vec[i].irq) {
1044 (void)bus_release_resource(dev, SYS_RES_IRQ,
1045 ha->irq_vec[i].irq_rid,
1046 ha->irq_vec[i].irq);
1049 qlnx_free_tx_br(ha, fp);
1051 qlnx_destroy_fp_taskqueues(ha);
1053 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1054 if (ha->sp_handle[i])
1055 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1059 (void) bus_release_resource(dev, SYS_RES_IRQ,
1060 ha->sp_irq_rid[i], ha->sp_irq[i]);
1063 qlnx_destroy_sp_taskqueues(ha);
1066 pci_release_msi(dev);
1068 if (ha->flags.lock_init) {
1069 mtx_destroy(&ha->hw_lock);
1073 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1077 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1081 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1084 QL_DPRINT2(ha, "exit\n");
1089 qlnx_trigger_dump(qlnx_host_t *ha)
1093 if (ha->ifp != NULL)
1094 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1096 QL_DPRINT2(ha, "enter\n");
1098 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1099 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1100 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1103 QL_DPRINT2(ha, "exit\n");
1109 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1114 err = sysctl_handle_int(oidp, &ret, 0, req);
1116 if (err || !req->newptr)
1120 ha = (qlnx_host_t *)arg1;
1121 qlnx_trigger_dump(ha);
1127 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1129 int err, i, ret = 0, usecs = 0;
1131 struct ecore_hwfn *p_hwfn;
1132 struct qlnx_fastpath *fp;
1134 err = sysctl_handle_int(oidp, &usecs, 0, req);
1136 if (err || !req->newptr || !usecs || (usecs > 255))
1139 ha = (qlnx_host_t *)arg1;
1141 for (i = 0; i < ha->num_rss; i++) {
1143 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1145 fp = &ha->fp_array[i];
1147 if (fp->txq[0]->handle != NULL) {
1148 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1149 (uint16_t)usecs, fp->txq[0]->handle);
1154 ha->tx_coalesce_usecs = (uint8_t)usecs;
1160 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1162 int err, i, ret = 0, usecs = 0;
1164 struct ecore_hwfn *p_hwfn;
1165 struct qlnx_fastpath *fp;
1167 err = sysctl_handle_int(oidp, &usecs, 0, req);
1169 if (err || !req->newptr || !usecs || (usecs > 255))
1172 ha = (qlnx_host_t *)arg1;
1174 for (i = 0; i < ha->num_rss; i++) {
1176 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1178 fp = &ha->fp_array[i];
1180 if (fp->rxq->handle != NULL) {
1181 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1182 0, fp->rxq->handle);
1187 ha->rx_coalesce_usecs = (uint8_t)usecs;
1193 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1195 struct sysctl_ctx_list *ctx;
1196 struct sysctl_oid_list *children;
1197 struct sysctl_oid *ctx_oid;
1199 ctx = device_get_sysctl_ctx(ha->pci_dev);
1200 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1202 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1203 CTLFLAG_RD, NULL, "spstat");
1204 children = SYSCTL_CHILDREN(ctx_oid);
1206 SYSCTL_ADD_QUAD(ctx, children,
1207 OID_AUTO, "sp_interrupts",
1208 CTLFLAG_RD, &ha->sp_interrupts,
1209 "No. of slowpath interrupts");
1215 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1217 struct sysctl_ctx_list *ctx;
1218 struct sysctl_oid_list *children;
1219 struct sysctl_oid_list *node_children;
1220 struct sysctl_oid *ctx_oid;
1222 uint8_t name_str[16];
1224 ctx = device_get_sysctl_ctx(ha->pci_dev);
1225 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1227 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1228 CTLFLAG_RD, NULL, "fpstat");
1229 children = SYSCTL_CHILDREN(ctx_oid);
1231 for (i = 0; i < ha->num_rss; i++) {
1233 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1234 snprintf(name_str, sizeof(name_str), "%d", i);
1236 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1237 CTLFLAG_RD, NULL, name_str);
1238 node_children = SYSCTL_CHILDREN(ctx_oid);
1242 SYSCTL_ADD_QUAD(ctx, node_children,
1243 OID_AUTO, "tx_pkts_processed",
1244 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1245 "No. of packets processed for transmission");
1247 SYSCTL_ADD_QUAD(ctx, node_children,
1248 OID_AUTO, "tx_pkts_freed",
1249 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1250 "No. of freed packets");
1252 SYSCTL_ADD_QUAD(ctx, node_children,
1253 OID_AUTO, "tx_pkts_transmitted",
1254 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1255 "No. of transmitted packets");
1257 SYSCTL_ADD_QUAD(ctx, node_children,
1258 OID_AUTO, "tx_pkts_completed",
1259 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1260 "No. of transmit completions");
1262 SYSCTL_ADD_QUAD(ctx, node_children,
1263 OID_AUTO, "tx_non_tso_pkts",
1264 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1265 "No. of non LSO transmited packets");
1267 #ifdef QLNX_TRACE_PERF_DATA
1269 SYSCTL_ADD_QUAD(ctx, node_children,
1270 OID_AUTO, "tx_pkts_trans_ctx",
1271 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1272 "No. of transmitted packets in transmit context");
1274 SYSCTL_ADD_QUAD(ctx, node_children,
1275 OID_AUTO, "tx_pkts_compl_ctx",
1276 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1277 "No. of transmit completions in transmit context");
1279 SYSCTL_ADD_QUAD(ctx, node_children,
1280 OID_AUTO, "tx_pkts_trans_fp",
1281 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1282 "No. of transmitted packets in taskqueue");
1284 SYSCTL_ADD_QUAD(ctx, node_children,
1285 OID_AUTO, "tx_pkts_compl_fp",
1286 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1287 "No. of transmit completions in taskqueue");
1289 SYSCTL_ADD_QUAD(ctx, node_children,
1290 OID_AUTO, "tx_pkts_compl_intr",
1291 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1292 "No. of transmit completions in interrupt ctx");
1295 SYSCTL_ADD_QUAD(ctx, node_children,
1296 OID_AUTO, "tx_tso_pkts",
1297 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1298 "No. of LSO transmited packets");
1300 SYSCTL_ADD_QUAD(ctx, node_children,
1301 OID_AUTO, "tx_lso_wnd_min_len",
1302 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1303 "tx_lso_wnd_min_len");
1305 SYSCTL_ADD_QUAD(ctx, node_children,
1306 OID_AUTO, "tx_defrag",
1307 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1310 SYSCTL_ADD_QUAD(ctx, node_children,
1311 OID_AUTO, "tx_nsegs_gt_elem_left",
1312 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1313 "tx_nsegs_gt_elem_left");
1315 SYSCTL_ADD_UINT(ctx, node_children,
1316 OID_AUTO, "tx_tso_max_nsegs",
1317 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1318 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1320 SYSCTL_ADD_UINT(ctx, node_children,
1321 OID_AUTO, "tx_tso_min_nsegs",
1322 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1323 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1325 SYSCTL_ADD_UINT(ctx, node_children,
1326 OID_AUTO, "tx_tso_max_pkt_len",
1327 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1328 ha->fp_array[i].tx_tso_max_pkt_len,
1329 "tx_tso_max_pkt_len");
1331 SYSCTL_ADD_UINT(ctx, node_children,
1332 OID_AUTO, "tx_tso_min_pkt_len",
1333 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1334 ha->fp_array[i].tx_tso_min_pkt_len,
1335 "tx_tso_min_pkt_len");
1337 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1339 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1340 snprintf(name_str, sizeof(name_str),
1341 "tx_pkts_nseg_%02d", (j+1));
1343 SYSCTL_ADD_QUAD(ctx, node_children,
1344 OID_AUTO, name_str, CTLFLAG_RD,
1345 &ha->fp_array[i].tx_pkts[j], name_str);
1348 #ifdef QLNX_TRACE_PERF_DATA
1349 for (j = 0; j < 18; j++) {
1351 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1352 snprintf(name_str, sizeof(name_str),
1353 "tx_pkts_hist_%02d", (j+1));
1355 SYSCTL_ADD_QUAD(ctx, node_children,
1356 OID_AUTO, name_str, CTLFLAG_RD,
1357 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1359 for (j = 0; j < 5; j++) {
1361 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1362 snprintf(name_str, sizeof(name_str),
1363 "tx_comInt_%02d", (j+1));
1365 SYSCTL_ADD_QUAD(ctx, node_children,
1366 OID_AUTO, name_str, CTLFLAG_RD,
1367 &ha->fp_array[i].tx_comInt[j], name_str);
1369 for (j = 0; j < 18; j++) {
1371 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1372 snprintf(name_str, sizeof(name_str),
1373 "tx_pkts_q_%02d", (j+1));
1375 SYSCTL_ADD_QUAD(ctx, node_children,
1376 OID_AUTO, name_str, CTLFLAG_RD,
1377 &ha->fp_array[i].tx_pkts_q[j], name_str);
1381 SYSCTL_ADD_QUAD(ctx, node_children,
1382 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1383 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1384 "err_tx_nsegs_gt_elem_left");
1386 SYSCTL_ADD_QUAD(ctx, node_children,
1387 OID_AUTO, "err_tx_dmamap_create",
1388 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1389 "err_tx_dmamap_create");
1391 SYSCTL_ADD_QUAD(ctx, node_children,
1392 OID_AUTO, "err_tx_defrag_dmamap_load",
1393 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1394 "err_tx_defrag_dmamap_load");
1396 SYSCTL_ADD_QUAD(ctx, node_children,
1397 OID_AUTO, "err_tx_non_tso_max_seg",
1398 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1399 "err_tx_non_tso_max_seg");
1401 SYSCTL_ADD_QUAD(ctx, node_children,
1402 OID_AUTO, "err_tx_dmamap_load",
1403 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1404 "err_tx_dmamap_load");
1406 SYSCTL_ADD_QUAD(ctx, node_children,
1407 OID_AUTO, "err_tx_defrag",
1408 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1411 SYSCTL_ADD_QUAD(ctx, node_children,
1412 OID_AUTO, "err_tx_free_pkt_null",
1413 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1414 "err_tx_free_pkt_null");
1416 SYSCTL_ADD_QUAD(ctx, node_children,
1417 OID_AUTO, "err_tx_cons_idx_conflict",
1418 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1419 "err_tx_cons_idx_conflict");
1421 SYSCTL_ADD_QUAD(ctx, node_children,
1422 OID_AUTO, "lro_cnt_64",
1423 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1426 SYSCTL_ADD_QUAD(ctx, node_children,
1427 OID_AUTO, "lro_cnt_128",
1428 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1431 SYSCTL_ADD_QUAD(ctx, node_children,
1432 OID_AUTO, "lro_cnt_256",
1433 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1436 SYSCTL_ADD_QUAD(ctx, node_children,
1437 OID_AUTO, "lro_cnt_512",
1438 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1441 SYSCTL_ADD_QUAD(ctx, node_children,
1442 OID_AUTO, "lro_cnt_1024",
1443 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1448 SYSCTL_ADD_QUAD(ctx, node_children,
1449 OID_AUTO, "rx_pkts",
1450 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1451 "No. of received packets");
1453 SYSCTL_ADD_QUAD(ctx, node_children,
1454 OID_AUTO, "tpa_start",
1455 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1456 "No. of tpa_start packets");
1458 SYSCTL_ADD_QUAD(ctx, node_children,
1459 OID_AUTO, "tpa_cont",
1460 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1461 "No. of tpa_cont packets");
1463 SYSCTL_ADD_QUAD(ctx, node_children,
1464 OID_AUTO, "tpa_end",
1465 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1466 "No. of tpa_end packets");
1468 SYSCTL_ADD_QUAD(ctx, node_children,
1469 OID_AUTO, "err_m_getcl",
1470 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1473 SYSCTL_ADD_QUAD(ctx, node_children,
1474 OID_AUTO, "err_m_getjcl",
1475 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1478 SYSCTL_ADD_QUAD(ctx, node_children,
1479 OID_AUTO, "err_rx_hw_errors",
1480 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1481 "err_rx_hw_errors");
1483 SYSCTL_ADD_QUAD(ctx, node_children,
1484 OID_AUTO, "err_rx_alloc_errors",
1485 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1486 "err_rx_alloc_errors");
1493 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1495 struct sysctl_ctx_list *ctx;
1496 struct sysctl_oid_list *children;
1497 struct sysctl_oid *ctx_oid;
1499 ctx = device_get_sysctl_ctx(ha->pci_dev);
1500 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1502 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1503 CTLFLAG_RD, NULL, "hwstat");
1504 children = SYSCTL_CHILDREN(ctx_oid);
1506 SYSCTL_ADD_QUAD(ctx, children,
1507 OID_AUTO, "no_buff_discards",
1508 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1509 "No. of packets discarded due to lack of buffer");
1511 SYSCTL_ADD_QUAD(ctx, children,
1512 OID_AUTO, "packet_too_big_discard",
1513 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1514 "No. of packets discarded because packet was too big");
1516 SYSCTL_ADD_QUAD(ctx, children,
1517 OID_AUTO, "ttl0_discard",
1518 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1521 SYSCTL_ADD_QUAD(ctx, children,
1522 OID_AUTO, "rx_ucast_bytes",
1523 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1526 SYSCTL_ADD_QUAD(ctx, children,
1527 OID_AUTO, "rx_mcast_bytes",
1528 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1531 SYSCTL_ADD_QUAD(ctx, children,
1532 OID_AUTO, "rx_bcast_bytes",
1533 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1536 SYSCTL_ADD_QUAD(ctx, children,
1537 OID_AUTO, "rx_ucast_pkts",
1538 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1541 SYSCTL_ADD_QUAD(ctx, children,
1542 OID_AUTO, "rx_mcast_pkts",
1543 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1546 SYSCTL_ADD_QUAD(ctx, children,
1547 OID_AUTO, "rx_bcast_pkts",
1548 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1551 SYSCTL_ADD_QUAD(ctx, children,
1552 OID_AUTO, "mftag_filter_discards",
1553 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1554 "mftag_filter_discards");
1556 SYSCTL_ADD_QUAD(ctx, children,
1557 OID_AUTO, "mac_filter_discards",
1558 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1559 "mac_filter_discards");
1561 SYSCTL_ADD_QUAD(ctx, children,
1562 OID_AUTO, "tx_ucast_bytes",
1563 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1566 SYSCTL_ADD_QUAD(ctx, children,
1567 OID_AUTO, "tx_mcast_bytes",
1568 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1571 SYSCTL_ADD_QUAD(ctx, children,
1572 OID_AUTO, "tx_bcast_bytes",
1573 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1576 SYSCTL_ADD_QUAD(ctx, children,
1577 OID_AUTO, "tx_ucast_pkts",
1578 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1581 SYSCTL_ADD_QUAD(ctx, children,
1582 OID_AUTO, "tx_mcast_pkts",
1583 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1586 SYSCTL_ADD_QUAD(ctx, children,
1587 OID_AUTO, "tx_bcast_pkts",
1588 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1591 SYSCTL_ADD_QUAD(ctx, children,
1592 OID_AUTO, "tx_err_drop_pkts",
1593 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1594 "tx_err_drop_pkts");
1596 SYSCTL_ADD_QUAD(ctx, children,
1597 OID_AUTO, "tpa_coalesced_pkts",
1598 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1599 "tpa_coalesced_pkts");
1601 SYSCTL_ADD_QUAD(ctx, children,
1602 OID_AUTO, "tpa_coalesced_events",
1603 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1604 "tpa_coalesced_events");
1606 SYSCTL_ADD_QUAD(ctx, children,
1607 OID_AUTO, "tpa_aborts_num",
1608 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1611 SYSCTL_ADD_QUAD(ctx, children,
1612 OID_AUTO, "tpa_not_coalesced_pkts",
1613 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1614 "tpa_not_coalesced_pkts");
1616 SYSCTL_ADD_QUAD(ctx, children,
1617 OID_AUTO, "tpa_coalesced_bytes",
1618 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1619 "tpa_coalesced_bytes");
1621 SYSCTL_ADD_QUAD(ctx, children,
1622 OID_AUTO, "rx_64_byte_packets",
1623 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1624 "rx_64_byte_packets");
1626 SYSCTL_ADD_QUAD(ctx, children,
1627 OID_AUTO, "rx_65_to_127_byte_packets",
1628 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1629 "rx_65_to_127_byte_packets");
1631 SYSCTL_ADD_QUAD(ctx, children,
1632 OID_AUTO, "rx_128_to_255_byte_packets",
1633 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1634 "rx_128_to_255_byte_packets");
1636 SYSCTL_ADD_QUAD(ctx, children,
1637 OID_AUTO, "rx_256_to_511_byte_packets",
1638 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1639 "rx_256_to_511_byte_packets");
1641 SYSCTL_ADD_QUAD(ctx, children,
1642 OID_AUTO, "rx_512_to_1023_byte_packets",
1643 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1644 "rx_512_to_1023_byte_packets");
1646 SYSCTL_ADD_QUAD(ctx, children,
1647 OID_AUTO, "rx_1024_to_1518_byte_packets",
1648 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1649 "rx_1024_to_1518_byte_packets");
1651 SYSCTL_ADD_QUAD(ctx, children,
1652 OID_AUTO, "rx_1519_to_1522_byte_packets",
1653 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1654 "rx_1519_to_1522_byte_packets");
1656 SYSCTL_ADD_QUAD(ctx, children,
1657 OID_AUTO, "rx_1523_to_2047_byte_packets",
1658 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1659 "rx_1523_to_2047_byte_packets");
1661 SYSCTL_ADD_QUAD(ctx, children,
1662 OID_AUTO, "rx_2048_to_4095_byte_packets",
1663 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1664 "rx_2048_to_4095_byte_packets");
1666 SYSCTL_ADD_QUAD(ctx, children,
1667 OID_AUTO, "rx_4096_to_9216_byte_packets",
1668 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1669 "rx_4096_to_9216_byte_packets");
1671 SYSCTL_ADD_QUAD(ctx, children,
1672 OID_AUTO, "rx_9217_to_16383_byte_packets",
1673 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1674 "rx_9217_to_16383_byte_packets");
1676 SYSCTL_ADD_QUAD(ctx, children,
1677 OID_AUTO, "rx_crc_errors",
1678 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1681 SYSCTL_ADD_QUAD(ctx, children,
1682 OID_AUTO, "rx_mac_crtl_frames",
1683 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1684 "rx_mac_crtl_frames");
1686 SYSCTL_ADD_QUAD(ctx, children,
1687 OID_AUTO, "rx_pause_frames",
1688 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1691 SYSCTL_ADD_QUAD(ctx, children,
1692 OID_AUTO, "rx_pfc_frames",
1693 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1696 SYSCTL_ADD_QUAD(ctx, children,
1697 OID_AUTO, "rx_align_errors",
1698 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1701 SYSCTL_ADD_QUAD(ctx, children,
1702 OID_AUTO, "rx_carrier_errors",
1703 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1704 "rx_carrier_errors");
1706 SYSCTL_ADD_QUAD(ctx, children,
1707 OID_AUTO, "rx_oversize_packets",
1708 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1709 "rx_oversize_packets");
1711 SYSCTL_ADD_QUAD(ctx, children,
1712 OID_AUTO, "rx_jabbers",
1713 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1716 SYSCTL_ADD_QUAD(ctx, children,
1717 OID_AUTO, "rx_undersize_packets",
1718 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1719 "rx_undersize_packets");
1721 SYSCTL_ADD_QUAD(ctx, children,
1722 OID_AUTO, "rx_fragments",
1723 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1726 SYSCTL_ADD_QUAD(ctx, children,
1727 OID_AUTO, "tx_64_byte_packets",
1728 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1729 "tx_64_byte_packets");
1731 SYSCTL_ADD_QUAD(ctx, children,
1732 OID_AUTO, "tx_65_to_127_byte_packets",
1733 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1734 "tx_65_to_127_byte_packets");
1736 SYSCTL_ADD_QUAD(ctx, children,
1737 OID_AUTO, "tx_128_to_255_byte_packets",
1738 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1739 "tx_128_to_255_byte_packets");
1741 SYSCTL_ADD_QUAD(ctx, children,
1742 OID_AUTO, "tx_256_to_511_byte_packets",
1743 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1744 "tx_256_to_511_byte_packets");
1746 SYSCTL_ADD_QUAD(ctx, children,
1747 OID_AUTO, "tx_512_to_1023_byte_packets",
1748 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1749 "tx_512_to_1023_byte_packets");
1751 SYSCTL_ADD_QUAD(ctx, children,
1752 OID_AUTO, "tx_1024_to_1518_byte_packets",
1753 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1754 "tx_1024_to_1518_byte_packets");
1756 SYSCTL_ADD_QUAD(ctx, children,
1757 OID_AUTO, "tx_1519_to_2047_byte_packets",
1758 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1759 "tx_1519_to_2047_byte_packets");
1761 SYSCTL_ADD_QUAD(ctx, children,
1762 OID_AUTO, "tx_2048_to_4095_byte_packets",
1763 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1764 "tx_2048_to_4095_byte_packets");
1766 SYSCTL_ADD_QUAD(ctx, children,
1767 OID_AUTO, "tx_4096_to_9216_byte_packets",
1768 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1769 "tx_4096_to_9216_byte_packets");
1771 SYSCTL_ADD_QUAD(ctx, children,
1772 OID_AUTO, "tx_9217_to_16383_byte_packets",
1773 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1774 "tx_9217_to_16383_byte_packets");
1776 SYSCTL_ADD_QUAD(ctx, children,
1777 OID_AUTO, "tx_pause_frames",
1778 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1781 SYSCTL_ADD_QUAD(ctx, children,
1782 OID_AUTO, "tx_pfc_frames",
1783 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1786 SYSCTL_ADD_QUAD(ctx, children,
1787 OID_AUTO, "tx_lpi_entry_count",
1788 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1789 "tx_lpi_entry_count");
1791 SYSCTL_ADD_QUAD(ctx, children,
1792 OID_AUTO, "tx_total_collisions",
1793 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1794 "tx_total_collisions");
1796 SYSCTL_ADD_QUAD(ctx, children,
1797 OID_AUTO, "brb_truncates",
1798 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1801 SYSCTL_ADD_QUAD(ctx, children,
1802 OID_AUTO, "brb_discards",
1803 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1806 SYSCTL_ADD_QUAD(ctx, children,
1807 OID_AUTO, "rx_mac_bytes",
1808 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1811 SYSCTL_ADD_QUAD(ctx, children,
1812 OID_AUTO, "rx_mac_uc_packets",
1813 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1814 "rx_mac_uc_packets");
1816 SYSCTL_ADD_QUAD(ctx, children,
1817 OID_AUTO, "rx_mac_mc_packets",
1818 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1819 "rx_mac_mc_packets");
1821 SYSCTL_ADD_QUAD(ctx, children,
1822 OID_AUTO, "rx_mac_bc_packets",
1823 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1824 "rx_mac_bc_packets");
1826 SYSCTL_ADD_QUAD(ctx, children,
1827 OID_AUTO, "rx_mac_frames_ok",
1828 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1829 "rx_mac_frames_ok");
1831 SYSCTL_ADD_QUAD(ctx, children,
1832 OID_AUTO, "tx_mac_bytes",
1833 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1836 SYSCTL_ADD_QUAD(ctx, children,
1837 OID_AUTO, "tx_mac_uc_packets",
1838 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1839 "tx_mac_uc_packets");
1841 SYSCTL_ADD_QUAD(ctx, children,
1842 OID_AUTO, "tx_mac_mc_packets",
1843 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1844 "tx_mac_mc_packets");
1846 SYSCTL_ADD_QUAD(ctx, children,
1847 OID_AUTO, "tx_mac_bc_packets",
1848 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1849 "tx_mac_bc_packets");
1851 SYSCTL_ADD_QUAD(ctx, children,
1852 OID_AUTO, "tx_mac_ctrl_frames",
1853 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1854 "tx_mac_ctrl_frames");
1859 qlnx_add_sysctls(qlnx_host_t *ha)
1861 device_t dev = ha->pci_dev;
1862 struct sysctl_ctx_list *ctx;
1863 struct sysctl_oid_list *children;
1865 ctx = device_get_sysctl_ctx(dev);
1866 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1868 qlnx_add_fp_stats_sysctls(ha);
1869 qlnx_add_sp_stats_sysctls(ha);
1870 qlnx_add_hw_stats_sysctls(ha);
1872 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1873 CTLFLAG_RD, qlnx_ver_str, 0,
1876 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1877 CTLFLAG_RD, ha->stormfw_ver, 0,
1878 "STORM Firmware Version");
1880 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1881 CTLFLAG_RD, ha->mfw_ver, 0,
1882 "Management Firmware Version");
1884 SYSCTL_ADD_UINT(ctx, children,
1885 OID_AUTO, "personality", CTLFLAG_RD,
1886 &ha->personality, ha->personality,
1887 "\tpersonality = 0 => Ethernet Only\n"
1888 "\tpersonality = 3 => Ethernet and RoCE\n"
1889 "\tpersonality = 4 => Ethernet and iWARP\n"
1890 "\tpersonality = 6 => Default in Shared Memory\n");
1893 SYSCTL_ADD_UINT(ctx, children,
1894 OID_AUTO, "debug", CTLFLAG_RW,
1895 &ha->dbg_level, ha->dbg_level, "Debug Level");
1897 ha->dp_level = 0x01;
1898 SYSCTL_ADD_UINT(ctx, children,
1899 OID_AUTO, "dp_level", CTLFLAG_RW,
1900 &ha->dp_level, ha->dp_level, "DP Level");
1902 ha->dbg_trace_lro_cnt = 0;
1903 SYSCTL_ADD_UINT(ctx, children,
1904 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1905 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1906 "Trace LRO Counts");
1908 ha->dbg_trace_tso_pkt_len = 0;
1909 SYSCTL_ADD_UINT(ctx, children,
1910 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1911 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1912 "Trace TSO packet lengths");
1915 SYSCTL_ADD_UINT(ctx, children,
1916 OID_AUTO, "dp_module", CTLFLAG_RW,
1917 &ha->dp_module, ha->dp_module, "DP Module");
1921 SYSCTL_ADD_UINT(ctx, children,
1922 OID_AUTO, "err_inject", CTLFLAG_RW,
1923 &ha->err_inject, ha->err_inject, "Error Inject");
1925 ha->storm_stats_enable = 0;
1927 SYSCTL_ADD_UINT(ctx, children,
1928 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1929 &ha->storm_stats_enable, ha->storm_stats_enable,
1930 "Enable Storm Statistics Gathering");
1932 ha->storm_stats_index = 0;
1934 SYSCTL_ADD_UINT(ctx, children,
1935 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1936 &ha->storm_stats_index, ha->storm_stats_index,
1937 "Enable Storm Statistics Gathering Current Index");
1939 ha->grcdump_taken = 0;
1940 SYSCTL_ADD_UINT(ctx, children,
1941 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1942 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1944 ha->idle_chk_taken = 0;
1945 SYSCTL_ADD_UINT(ctx, children,
1946 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1947 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1949 SYSCTL_ADD_UINT(ctx, children,
1950 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1951 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1952 "rx_coalesce_usecs");
1954 SYSCTL_ADD_UINT(ctx, children,
1955 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1956 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1957 "tx_coalesce_usecs");
1959 ha->rx_pkt_threshold = 128;
1960 SYSCTL_ADD_UINT(ctx, children,
1961 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1962 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1963 "No. of Rx Pkts to process at a time");
1965 ha->rx_jumbo_buf_eq_mtu = 0;
1966 SYSCTL_ADD_UINT(ctx, children,
1967 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1968 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1969 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1970 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
1972 SYSCTL_ADD_PROC(ctx, children,
1973 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1975 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1977 SYSCTL_ADD_PROC(ctx, children,
1978 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1980 qlnx_set_rx_coalesce, "I",
1981 "rx interrupt coalesce period microseconds");
1983 SYSCTL_ADD_PROC(ctx, children,
1984 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1986 qlnx_set_tx_coalesce, "I",
1987 "tx interrupt coalesce period microseconds");
1989 SYSCTL_ADD_QUAD(ctx, children,
1990 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1991 &ha->err_illegal_intr, "err_illegal_intr");
1993 SYSCTL_ADD_QUAD(ctx, children,
1994 OID_AUTO, "err_fp_null", CTLFLAG_RD,
1995 &ha->err_fp_null, "err_fp_null");
1997 SYSCTL_ADD_QUAD(ctx, children,
1998 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1999 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2005 /*****************************************************************************
2006 * Operating System Network Interface Functions
2007 *****************************************************************************/
2010 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2015 ifp = ha->ifp = if_alloc(IFT_ETHER);
2018 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2020 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2022 device_id = pci_get_device(ha->pci_dev);
2024 #if __FreeBSD_version >= 1000000
2026 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2027 ifp->if_baudrate = IF_Gbps(40);
2028 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2029 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2030 ifp->if_baudrate = IF_Gbps(25);
2031 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2032 ifp->if_baudrate = IF_Gbps(50);
2033 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2034 ifp->if_baudrate = IF_Gbps(100);
2036 ifp->if_capabilities = IFCAP_LINKSTATE;
2038 ifp->if_mtu = ETHERMTU;
2039 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2041 #endif /* #if __FreeBSD_version >= 1000000 */
2043 ifp->if_init = qlnx_init;
2045 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2046 ifp->if_ioctl = qlnx_ioctl;
2047 ifp->if_transmit = qlnx_transmit;
2048 ifp->if_qflush = qlnx_qflush;
2050 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2051 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2052 IFQ_SET_READY(&ifp->if_snd);
2054 #if __FreeBSD_version >= 1100036
2055 if_setgetcounterfn(ifp, qlnx_get_counter);
2058 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2060 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2061 ether_ifattach(ifp, ha->primary_mac);
2062 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2064 ifp->if_capabilities = IFCAP_HWCSUM;
2065 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2067 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2068 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2069 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2070 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2071 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2072 ifp->if_capabilities |= IFCAP_TSO4;
2073 ifp->if_capabilities |= IFCAP_TSO6;
2074 ifp->if_capabilities |= IFCAP_LRO;
2076 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2077 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2078 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2079 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2082 ifp->if_capenable = ifp->if_capabilities;
2084 ifp->if_hwassist = CSUM_IP;
2085 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2086 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2087 ifp->if_hwassist |= CSUM_TSO;
2089 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2091 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2094 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2095 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2096 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2097 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2098 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2099 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2100 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2101 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2102 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2103 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2104 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2105 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2106 ifmedia_add(&ha->media,
2107 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2108 ifmedia_add(&ha->media,
2109 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2110 ifmedia_add(&ha->media,
2111 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2114 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2115 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2118 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2120 QL_DPRINT2(ha, "exit\n");
2126 qlnx_init_locked(qlnx_host_t *ha)
2128 struct ifnet *ifp = ha->ifp;
2130 QL_DPRINT1(ha, "Driver Initialization start \n");
2134 if (qlnx_load(ha) == 0) {
2135 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2136 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2143 qlnx_init(void *arg)
2147 ha = (qlnx_host_t *)arg;
2149 QL_DPRINT2(ha, "enter\n");
2152 qlnx_init_locked(ha);
2155 QL_DPRINT2(ha, "exit\n");
2161 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2163 struct ecore_filter_mcast *mcast;
2164 struct ecore_dev *cdev;
2169 mcast = &ha->ecore_mcast;
2170 bzero(mcast, sizeof(struct ecore_filter_mcast));
2173 mcast->opcode = ECORE_FILTER_ADD;
2175 mcast->opcode = ECORE_FILTER_REMOVE;
2177 mcast->num_mc_addrs = 1;
2178 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2180 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2186 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2190 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2192 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2193 return 0; /* its been already added */
2196 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2198 if ((ha->mcast[i].addr[0] == 0) &&
2199 (ha->mcast[i].addr[1] == 0) &&
2200 (ha->mcast[i].addr[2] == 0) &&
2201 (ha->mcast[i].addr[3] == 0) &&
2202 (ha->mcast[i].addr[4] == 0) &&
2203 (ha->mcast[i].addr[5] == 0)) {
2205 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2208 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2218 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2222 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2223 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2225 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2228 ha->mcast[i].addr[0] = 0;
2229 ha->mcast[i].addr[1] = 0;
2230 ha->mcast[i].addr[2] = 0;
2231 ha->mcast[i].addr[3] = 0;
2232 ha->mcast[i].addr[4] = 0;
2233 ha->mcast[i].addr[5] = 0;
2244 * Name: qls_hw_set_multi
2245 * Function: Sets the Multicast Addresses provided the host O.S into the
2246 * hardware (for the given interface)
2249 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2254 for (i = 0; i < mcnt; i++) {
2256 if (qlnx_hw_add_mcast(ha, mta))
2259 if (qlnx_hw_del_mcast(ha, mta))
2263 mta += ETHER_HDR_LEN;
2269 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2271 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2273 uint8_t mta[QLNX_MCAST_ADDRS_SIZE];
2274 struct ifmultiaddr *ifma;
2276 struct ifnet *ifp = ha->ifp;
2279 if_maddr_rlock(ifp);
2281 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2283 if (ifma->ifma_addr->sa_family != AF_LINK)
2286 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2289 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2290 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2295 if_maddr_runlock(ifp);
2298 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2305 qlnx_set_promisc(qlnx_host_t *ha)
2310 filter = ha->filter;
2311 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2312 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2314 rc = qlnx_set_rx_accept_filter(ha, filter);
2319 qlnx_set_allmulti(qlnx_host_t *ha)
2324 filter = ha->filter;
2325 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2326 rc = qlnx_set_rx_accept_filter(ha, filter);
2333 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2336 struct ifreq *ifr = (struct ifreq *)data;
2337 struct ifaddr *ifa = (struct ifaddr *)data;
2340 ha = (qlnx_host_t *)ifp->if_softc;
2344 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2346 if (ifa->ifa_addr->sa_family == AF_INET) {
2347 ifp->if_flags |= IFF_UP;
2348 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2350 qlnx_init_locked(ha);
2353 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2354 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2356 arp_ifinit(ifp, ifa);
2358 ether_ioctl(ifp, cmd, data);
2363 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2365 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2369 ifp->if_mtu = ifr->ifr_mtu;
2370 ha->max_frame_size =
2371 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2372 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2373 qlnx_init_locked(ha);
2382 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2386 if (ifp->if_flags & IFF_UP) {
2387 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2388 if ((ifp->if_flags ^ ha->if_flags) &
2390 ret = qlnx_set_promisc(ha);
2391 } else if ((ifp->if_flags ^ ha->if_flags) &
2393 ret = qlnx_set_allmulti(ha);
2396 ha->max_frame_size = ifp->if_mtu +
2397 ETHER_HDR_LEN + ETHER_CRC_LEN;
2398 qlnx_init_locked(ha);
2401 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2403 ha->if_flags = ifp->if_flags;
2410 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2412 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2413 if (qlnx_set_multi(ha, 1))
2419 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2421 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2422 if (qlnx_set_multi(ha, 0))
2429 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2431 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2436 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2438 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2440 if (mask & IFCAP_HWCSUM)
2441 ifp->if_capenable ^= IFCAP_HWCSUM;
2442 if (mask & IFCAP_TSO4)
2443 ifp->if_capenable ^= IFCAP_TSO4;
2444 if (mask & IFCAP_TSO6)
2445 ifp->if_capenable ^= IFCAP_TSO6;
2446 if (mask & IFCAP_VLAN_HWTAGGING)
2447 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2448 if (mask & IFCAP_VLAN_HWTSO)
2449 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2450 if (mask & IFCAP_LRO)
2451 ifp->if_capenable ^= IFCAP_LRO;
2453 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2456 VLAN_CAPABILITIES(ifp);
2459 #if (__FreeBSD_version >= 1100101)
2463 struct ifi2creq i2c;
2464 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2465 struct ecore_ptt *p_ptt;
2467 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2472 if ((i2c.len > sizeof (i2c.data)) ||
2473 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2478 p_ptt = ecore_ptt_acquire(p_hwfn);
2481 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2486 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2487 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2488 i2c.len, &i2c.data[0]);
2490 ecore_ptt_release(p_hwfn, p_ptt);
2497 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2499 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2500 len = %d addr = 0x%02x offset = 0x%04x \
2501 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2502 0x%02x 0x%02x 0x%02x\n",
2503 ret, i2c.len, i2c.dev_addr, i2c.offset,
2504 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2505 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2508 #endif /* #if (__FreeBSD_version >= 1100101) */
2511 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2512 ret = ether_ioctl(ifp, cmd, data);
2520 qlnx_media_change(struct ifnet *ifp)
2523 struct ifmedia *ifm;
2526 ha = (qlnx_host_t *)ifp->if_softc;
2528 QL_DPRINT2(ha, "enter\n");
2532 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2535 QL_DPRINT2(ha, "exit\n");
2541 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2545 ha = (qlnx_host_t *)ifp->if_softc;
2547 QL_DPRINT2(ha, "enter\n");
2549 ifmr->ifm_status = IFM_AVALID;
2550 ifmr->ifm_active = IFM_ETHER;
2553 ifmr->ifm_status |= IFM_ACTIVE;
2555 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2557 if (ha->if_link.link_partner_caps &
2558 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2560 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2563 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2570 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2571 struct qlnx_tx_queue *txq)
2577 struct eth_tx_bd *tx_data_bd;
2578 struct eth_tx_1st_bd *first_bd;
2581 idx = txq->sw_tx_cons;
2582 mp = txq->sw_tx_ring[idx].mp;
2583 map = txq->sw_tx_ring[idx].map;
2585 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2587 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2589 QL_DPRINT1(ha, "(mp == NULL) "
2591 " ecore_prod_idx = 0x%x"
2592 " ecore_cons_idx = 0x%x"
2593 " hw_bd_cons = 0x%x"
2594 " txq_db_last = 0x%x"
2595 " elem_left = 0x%x\n",
2597 ecore_chain_get_prod_idx(&txq->tx_pbl),
2598 ecore_chain_get_cons_idx(&txq->tx_pbl),
2599 le16toh(*txq->hw_cons_ptr),
2601 ecore_chain_get_elem_left(&txq->tx_pbl));
2603 fp->err_tx_free_pkt_null++;
2606 qlnx_trigger_dump(ha);
2611 QLNX_INC_OPACKETS((ha->ifp));
2612 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2614 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2615 bus_dmamap_unload(ha->tx_tag, map);
2617 fp->tx_pkts_freed++;
2618 fp->tx_pkts_completed++;
2623 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2624 nbds = first_bd->data.nbds;
2626 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2628 for (i = 1; i < nbds; i++) {
2629 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2630 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2632 txq->sw_tx_ring[idx].flags = 0;
2633 txq->sw_tx_ring[idx].mp = NULL;
2634 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2640 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2641 struct qlnx_tx_queue *txq)
2648 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2650 while (hw_bd_cons !=
2651 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2653 if (hw_bd_cons < ecore_cons_idx) {
2654 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2656 diff = hw_bd_cons - ecore_cons_idx;
2658 if ((diff > TX_RING_SIZE) ||
2659 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2661 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2663 QL_DPRINT1(ha, "(diff = 0x%x) "
2665 " ecore_prod_idx = 0x%x"
2666 " ecore_cons_idx = 0x%x"
2667 " hw_bd_cons = 0x%x"
2668 " txq_db_last = 0x%x"
2669 " elem_left = 0x%x\n",
2672 ecore_chain_get_prod_idx(&txq->tx_pbl),
2673 ecore_chain_get_cons_idx(&txq->tx_pbl),
2674 le16toh(*txq->hw_cons_ptr),
2676 ecore_chain_get_elem_left(&txq->tx_pbl));
2678 fp->err_tx_cons_idx_conflict++;
2681 qlnx_trigger_dump(ha);
2684 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2685 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2686 prefetch(txq->sw_tx_ring[idx].mp);
2687 prefetch(txq->sw_tx_ring[idx2].mp);
2689 qlnx_free_tx_pkt(ha, fp, txq);
2691 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2697 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
2700 struct qlnx_tx_queue *txq;
2705 ha = (qlnx_host_t *)fp->edev;
2708 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2710 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2715 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2717 mp = drbr_peek(ifp, fp->tx_br);
2719 while (mp != NULL) {
2721 if (qlnx_send(ha, fp, &mp)) {
2724 drbr_putback(ifp, fp->tx_br, mp);
2726 fp->tx_pkts_processed++;
2727 drbr_advance(ifp, fp->tx_br);
2729 goto qlnx_transmit_locked_exit;
2732 drbr_advance(ifp, fp->tx_br);
2733 fp->tx_pkts_transmitted++;
2734 fp->tx_pkts_processed++;
2737 mp = drbr_peek(ifp, fp->tx_br);
2740 qlnx_transmit_locked_exit:
2741 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
2742 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
2743 < QLNX_TX_ELEM_MAX_THRESH))
2744 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
2746 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
2752 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
2754 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
2755 struct qlnx_fastpath *fp;
2756 int rss_id = 0, ret = 0;
2758 #ifdef QLNX_TRACEPERF_DATA
2759 uint64_t tx_pkts = 0, tx_compl = 0;
2762 QL_DPRINT2(ha, "enter\n");
2764 #if __FreeBSD_version >= 1100000
2765 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2767 if (mp->m_flags & M_FLOWID)
2769 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2772 fp = &ha->fp_array[rss_id];
2774 if (fp->tx_br == NULL) {
2776 goto qlnx_transmit_exit;
2779 if (mtx_trylock(&fp->tx_mtx)) {
2781 #ifdef QLNX_TRACEPERF_DATA
2782 tx_pkts = fp->tx_pkts_transmitted;
2783 tx_compl = fp->tx_pkts_completed;
2786 ret = qlnx_transmit_locked(ifp, fp, mp);
2788 #ifdef QLNX_TRACEPERF_DATA
2789 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
2790 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
2792 mtx_unlock(&fp->tx_mtx);
2794 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
2795 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2796 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2802 QL_DPRINT2(ha, "exit ret = %d\n", ret);
2807 qlnx_qflush(struct ifnet *ifp)
2810 struct qlnx_fastpath *fp;
2814 ha = (qlnx_host_t *)ifp->if_softc;
2816 QL_DPRINT2(ha, "enter\n");
2818 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2820 fp = &ha->fp_array[rss_id];
2826 mtx_lock(&fp->tx_mtx);
2828 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2829 fp->tx_pkts_freed++;
2832 mtx_unlock(&fp->tx_mtx);
2835 QL_DPRINT2(ha, "exit\n");
2841 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2843 struct ecore_dev *cdev;
2848 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2850 bus_write_4(ha->pci_dbells, offset, value);
2851 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
2852 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
2858 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2860 struct ether_vlan_header *eh = NULL;
2861 struct ip *ip = NULL;
2862 struct ip6_hdr *ip6 = NULL;
2863 struct tcphdr *th = NULL;
2864 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
2867 uint8_t buf[sizeof(struct ip6_hdr)];
2871 eh = mtod(mp, struct ether_vlan_header *);
2873 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2874 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2875 etype = ntohs(eh->evl_proto);
2877 ehdrlen = ETHER_HDR_LEN;
2878 etype = ntohs(eh->evl_encap_proto);
2884 ip = (struct ip *)(mp->m_data + ehdrlen);
2886 ip_hlen = sizeof (struct ip);
2888 if (mp->m_len < (ehdrlen + ip_hlen)) {
2889 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2890 ip = (struct ip *)buf;
2893 th = (struct tcphdr *)(ip + 1);
2894 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2897 case ETHERTYPE_IPV6:
2898 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2900 ip_hlen = sizeof(struct ip6_hdr);
2902 if (mp->m_len < (ehdrlen + ip_hlen)) {
2903 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2905 ip6 = (struct ip6_hdr *)buf;
2907 th = (struct tcphdr *)(ip6 + 1);
2908 offset = ip_hlen + ehdrlen + (th->th_off << 2);
2919 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2923 uint32_t sum, nbds_in_hdr = 1;
2925 bus_dma_segment_t *s_seg;
2927 /* If the header spans mulitple segments, skip those segments */
2929 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
2934 while ((i < nsegs) && (offset >= segs->ds_len)) {
2935 offset = offset - segs->ds_len;
2941 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
2945 while (nsegs >= window) {
2950 for (i = 0; i < window; i++){
2951 sum += s_seg->ds_len;
2955 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2956 fp->tx_lso_wnd_min_len++;
2968 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2970 bus_dma_segment_t *segs;
2971 bus_dmamap_t map = 0;
2974 struct mbuf *m_head = *m_headp;
2979 struct qlnx_tx_queue *txq;
2981 struct eth_tx_1st_bd *first_bd;
2982 struct eth_tx_2nd_bd *second_bd;
2983 struct eth_tx_3rd_bd *third_bd;
2984 struct eth_tx_bd *tx_data_bd;
2987 uint32_t nbds_in_hdr = 0;
2988 uint32_t offset = 0;
2990 #ifdef QLNX_TRACE_PERF_DATA
2994 QL_DPRINT8(ha, "enter\n");
3006 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3007 QLNX_TX_ELEM_MIN_THRESH) {
3009 fp->tx_nsegs_gt_elem_left++;
3010 fp->err_tx_nsegs_gt_elem_left++;
3015 idx = txq->sw_tx_prod;
3017 map = txq->sw_tx_ring[idx].map;
3020 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3023 if (ha->dbg_trace_tso_pkt_len) {
3024 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3025 if (!fp->tx_tso_min_pkt_len) {
3026 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3027 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3029 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3030 fp->tx_tso_min_pkt_len =
3031 m_head->m_pkthdr.len;
3032 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3033 fp->tx_tso_max_pkt_len =
3034 m_head->m_pkthdr.len;
3039 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3040 offset = qlnx_tcp_offset(ha, m_head);
3042 if ((ret == EFBIG) ||
3043 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3044 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3045 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3046 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3050 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3054 m = m_defrag(m_head, M_NOWAIT);
3056 fp->err_tx_defrag++;
3057 fp->tx_pkts_freed++;
3060 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3067 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3068 segs, &nsegs, BUS_DMA_NOWAIT))) {
3070 fp->err_tx_defrag_dmamap_load++;
3073 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3074 ret, m_head->m_pkthdr.len);
3076 fp->tx_pkts_freed++;
3083 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3084 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3086 fp->err_tx_non_tso_max_seg++;
3089 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3090 ret, nsegs, m_head->m_pkthdr.len);
3092 fp->tx_pkts_freed++;
3098 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3099 offset = qlnx_tcp_offset(ha, m_head);
3103 fp->err_tx_dmamap_load++;
3105 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3106 ret, m_head->m_pkthdr.len);
3107 fp->tx_pkts_freed++;
3113 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3115 if (ha->dbg_trace_tso_pkt_len) {
3116 if (nsegs < QLNX_FP_MAX_SEGS)
3117 fp->tx_pkts[(nsegs - 1)]++;
3119 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3122 #ifdef QLNX_TRACE_PERF_DATA
3123 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3124 if(m_head->m_pkthdr.len <= 2048)
3125 fp->tx_pkts_hist[0]++;
3126 else if((m_head->m_pkthdr.len > 2048) &&
3127 (m_head->m_pkthdr.len <= 4096))
3128 fp->tx_pkts_hist[1]++;
3129 else if((m_head->m_pkthdr.len > 4096) &&
3130 (m_head->m_pkthdr.len <= 8192))
3131 fp->tx_pkts_hist[2]++;
3132 else if((m_head->m_pkthdr.len > 8192) &&
3133 (m_head->m_pkthdr.len <= 12288 ))
3134 fp->tx_pkts_hist[3]++;
3135 else if((m_head->m_pkthdr.len > 11288) &&
3136 (m_head->m_pkthdr.len <= 16394))
3137 fp->tx_pkts_hist[4]++;
3138 else if((m_head->m_pkthdr.len > 16384) &&
3139 (m_head->m_pkthdr.len <= 20480))
3140 fp->tx_pkts_hist[5]++;
3141 else if((m_head->m_pkthdr.len > 20480) &&
3142 (m_head->m_pkthdr.len <= 24576))
3143 fp->tx_pkts_hist[6]++;
3144 else if((m_head->m_pkthdr.len > 24576) &&
3145 (m_head->m_pkthdr.len <= 28672))
3146 fp->tx_pkts_hist[7]++;
3147 else if((m_head->m_pkthdr.len > 28762) &&
3148 (m_head->m_pkthdr.len <= 32768))
3149 fp->tx_pkts_hist[8]++;
3150 else if((m_head->m_pkthdr.len > 32768) &&
3151 (m_head->m_pkthdr.len <= 36864))
3152 fp->tx_pkts_hist[9]++;
3153 else if((m_head->m_pkthdr.len > 36864) &&
3154 (m_head->m_pkthdr.len <= 40960))
3155 fp->tx_pkts_hist[10]++;
3156 else if((m_head->m_pkthdr.len > 40960) &&
3157 (m_head->m_pkthdr.len <= 45056))
3158 fp->tx_pkts_hist[11]++;
3159 else if((m_head->m_pkthdr.len > 45056) &&
3160 (m_head->m_pkthdr.len <= 49152))
3161 fp->tx_pkts_hist[12]++;
3162 else if((m_head->m_pkthdr.len > 49512) &&
3163 m_head->m_pkthdr.len <= 53248))
3164 fp->tx_pkts_hist[13]++;
3165 else if((m_head->m_pkthdr.len > 53248) &&
3166 (m_head->m_pkthdr.len <= 57344))
3167 fp->tx_pkts_hist[14]++;
3168 else if((m_head->m_pkthdr.len > 53248) &&
3169 (m_head->m_pkthdr.len <= 57344))
3170 fp->tx_pkts_hist[15]++;
3171 else if((m_head->m_pkthdr.len > 57344) &&
3172 (m_head->m_pkthdr.len <= 61440))
3173 fp->tx_pkts_hist[16]++;
3175 fp->tx_pkts_hist[17]++;
3178 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3180 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3181 bd_used = TX_RING_SIZE - elem_left;
3185 else if((bd_used > 100) && (bd_used <= 500))
3187 else if((bd_used > 500) && (bd_used <= 1000))
3189 else if((bd_used > 1000) && (bd_used <= 2000))
3191 else if((bd_used > 3000) && (bd_used <= 4000))
3193 else if((bd_used > 4000) && (bd_used <= 5000))
3195 else if((bd_used > 6000) && (bd_used <= 7000))
3197 else if((bd_used > 7000) && (bd_used <= 8000))
3199 else if((bd_used > 8000) && (bd_used <= 9000))
3201 else if((bd_used > 9000) && (bd_used <= 10000))
3203 else if((bd_used > 10000) && (bd_used <= 11000))
3204 fp->tx_pkts_q[10]++;
3205 else if((bd_used > 11000) && (bd_used <= 12000))
3206 fp->tx_pkts_q[11]++;
3207 else if((bd_used > 12000) && (bd_used <= 13000))
3208 fp->tx_pkts_q[12]++;
3209 else if((bd_used > 13000) && (bd_used <= 14000))
3210 fp->tx_pkts_q[13]++;
3211 else if((bd_used > 14000) && (bd_used <= 15000))
3212 fp->tx_pkts_q[14]++;
3213 else if((bd_used > 15000) && (bd_used <= 16000))
3214 fp->tx_pkts_q[15]++;
3216 fp->tx_pkts_q[16]++;
3219 #endif /* end of QLNX_TRACE_PERF_DATA */
3221 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3222 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3224 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3225 " in chain[%d] trying to free packets\n",
3226 nsegs, elem_left, fp->rss_id);
3228 fp->tx_nsegs_gt_elem_left++;
3230 (void)qlnx_tx_int(ha, fp, txq);
3232 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3233 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3236 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3237 nsegs, elem_left, fp->rss_id);
3239 fp->err_tx_nsegs_gt_elem_left++;
3240 fp->tx_ring_full = 1;
3241 if (ha->storm_stats_enable)
3242 ha->storm_stats_gather = 1;
3247 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3249 txq->sw_tx_ring[idx].mp = m_head;
3251 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3253 memset(first_bd, 0, sizeof(*first_bd));
3255 first_bd->data.bd_flags.bitfields =
3256 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3258 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3262 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3263 first_bd->data.bd_flags.bitfields |=
3264 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3267 if (m_head->m_pkthdr.csum_flags &
3268 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3269 first_bd->data.bd_flags.bitfields |=
3270 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3273 if (m_head->m_flags & M_VLANTAG) {
3274 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3275 first_bd->data.bd_flags.bitfields |=
3276 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3279 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3281 first_bd->data.bd_flags.bitfields |=
3282 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3283 first_bd->data.bd_flags.bitfields |=
3284 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3288 if (offset == segs->ds_len) {
3289 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3293 second_bd = (struct eth_tx_2nd_bd *)
3294 ecore_chain_produce(&txq->tx_pbl);
3295 memset(second_bd, 0, sizeof(*second_bd));
3298 if (seg_idx < nsegs) {
3299 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3300 (segs->ds_addr), (segs->ds_len));
3305 third_bd = (struct eth_tx_3rd_bd *)
3306 ecore_chain_produce(&txq->tx_pbl);
3307 memset(third_bd, 0, sizeof(*third_bd));
3308 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3309 third_bd->data.bitfields |=
3310 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3313 if (seg_idx < nsegs) {
3314 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3315 (segs->ds_addr), (segs->ds_len));
3320 for (; seg_idx < nsegs; seg_idx++) {
3321 tx_data_bd = (struct eth_tx_bd *)
3322 ecore_chain_produce(&txq->tx_pbl);
3323 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3324 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3331 } else if (offset < segs->ds_len) {
3332 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3334 second_bd = (struct eth_tx_2nd_bd *)
3335 ecore_chain_produce(&txq->tx_pbl);
3336 memset(second_bd, 0, sizeof(*second_bd));
3337 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3338 (segs->ds_addr + offset),\
3339 (segs->ds_len - offset));
3343 third_bd = (struct eth_tx_3rd_bd *)
3344 ecore_chain_produce(&txq->tx_pbl);
3345 memset(third_bd, 0, sizeof(*third_bd));
3347 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3350 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3351 third_bd->data.bitfields |=
3352 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3356 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3357 tx_data_bd = (struct eth_tx_bd *)
3358 ecore_chain_produce(&txq->tx_pbl);
3359 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3360 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3368 offset = offset - segs->ds_len;
3371 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3376 tx_data_bd = (struct eth_tx_bd *)
3377 ecore_chain_produce(&txq->tx_pbl);
3378 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3380 if (second_bd == NULL) {
3381 second_bd = (struct eth_tx_2nd_bd *)
3383 } else if (third_bd == NULL) {
3384 third_bd = (struct eth_tx_3rd_bd *)
3388 if (offset && (offset < segs->ds_len)) {
3389 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3390 segs->ds_addr, offset);
3392 tx_data_bd = (struct eth_tx_bd *)
3393 ecore_chain_produce(&txq->tx_pbl);
3395 memset(tx_data_bd, 0,
3396 sizeof(*tx_data_bd));
3398 if (second_bd == NULL) {
3400 (struct eth_tx_2nd_bd *)tx_data_bd;
3401 } else if (third_bd == NULL) {
3403 (struct eth_tx_3rd_bd *)tx_data_bd;
3405 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3406 (segs->ds_addr + offset), \
3407 (segs->ds_len - offset));
3412 offset = offset - segs->ds_len;
3413 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3414 segs->ds_addr, segs->ds_len);
3420 if (third_bd == NULL) {
3421 third_bd = (struct eth_tx_3rd_bd *)
3422 ecore_chain_produce(&txq->tx_pbl);
3423 memset(third_bd, 0, sizeof(*third_bd));
3426 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3427 third_bd->data.bitfields |=
3428 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3433 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3434 tx_data_bd = (struct eth_tx_bd *)
3435 ecore_chain_produce(&txq->tx_pbl);
3436 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3437 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3442 first_bd->data.bitfields =
3443 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3444 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3445 first_bd->data.bitfields =
3446 htole16(first_bd->data.bitfields);
3447 fp->tx_non_tso_pkts++;
3451 first_bd->data.nbds = nbd;
3453 if (ha->dbg_trace_tso_pkt_len) {
3454 if (fp->tx_tso_max_nsegs < nsegs)
3455 fp->tx_tso_max_nsegs = nsegs;
3457 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3458 fp->tx_tso_min_nsegs = nsegs;
3461 txq->sw_tx_ring[idx].nsegs = nsegs;
3462 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3464 txq->tx_db.data.bd_prod =
3465 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3467 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3469 QL_DPRINT8(ha, "exit\n");
3474 qlnx_stop(qlnx_host_t *ha)
3476 struct ifnet *ifp = ha->ifp;
3482 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3485 * We simply lock and unlock each fp->tx_mtx to
3486 * propagate the if_drv_flags
3487 * state to each tx thread
3489 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3491 if (ha->state == QLNX_STATE_OPEN) {
3492 for (i = 0; i < ha->num_rss; i++) {
3493 struct qlnx_fastpath *fp = &ha->fp_array[i];
3495 mtx_lock(&fp->tx_mtx);
3496 mtx_unlock(&fp->tx_mtx);
3498 if (fp->fp_taskqueue != NULL)
3499 taskqueue_enqueue(fp->fp_taskqueue,
3510 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3512 return(TX_RING_SIZE - 1);
3516 qlnx_get_mac_addr(qlnx_host_t *ha)
3518 struct ecore_hwfn *p_hwfn;
3520 p_hwfn = &ha->cdev.hwfns[0];
3521 return (p_hwfn->hw_info.hw_mac_addr);
3525 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3527 uint32_t ifm_type = 0;
3529 switch (if_link->media_type) {
3531 case MEDIA_MODULE_FIBER:
3532 case MEDIA_UNSPECIFIED:
3533 if (if_link->speed == (100 * 1000))
3534 ifm_type = QLNX_IFM_100G_SR4;
3535 else if (if_link->speed == (40 * 1000))
3536 ifm_type = IFM_40G_SR4;
3537 else if (if_link->speed == (25 * 1000))
3538 ifm_type = QLNX_IFM_25G_SR;
3539 else if (if_link->speed == (10 * 1000))
3540 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3541 else if (if_link->speed == (1 * 1000))
3542 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3546 case MEDIA_DA_TWINAX:
3547 if (if_link->speed == (100 * 1000))
3548 ifm_type = QLNX_IFM_100G_CR4;
3549 else if (if_link->speed == (40 * 1000))
3550 ifm_type = IFM_40G_CR4;
3551 else if (if_link->speed == (25 * 1000))
3552 ifm_type = QLNX_IFM_25G_CR;
3553 else if (if_link->speed == (10 * 1000))
3554 ifm_type = IFM_10G_TWINAX;
3559 ifm_type = IFM_UNKNOWN;
3567 /*****************************************************************************
3568 * Interrupt Service Functions
3569 *****************************************************************************/
3572 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3573 struct mbuf *mp_head, uint16_t len)
3575 struct mbuf *mp, *mpf, *mpl;
3576 struct sw_rx_data *sw_rx_data;
3577 struct qlnx_rx_queue *rxq;
3578 uint16_t len_in_buffer;
3581 mpf = mpl = mp = NULL;
3585 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3587 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3588 mp = sw_rx_data->data;
3591 QL_DPRINT1(ha, "mp = NULL\n");
3592 fp->err_rx_mp_null++;
3594 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3601 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3602 BUS_DMASYNC_POSTREAD);
3604 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3606 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3607 " incoming packet and reusing its buffer\n");
3609 qlnx_reuse_rx_data(rxq);
3610 fp->err_rx_alloc_errors++;
3617 ecore_chain_consume(&rxq->rx_bd_ring);
3619 if (len > rxq->rx_buf_size)
3620 len_in_buffer = rxq->rx_buf_size;
3622 len_in_buffer = len;
3624 len = len - len_in_buffer;
3626 mp->m_flags &= ~M_PKTHDR;
3628 mp->m_len = len_in_buffer;
3639 mp_head->m_next = mpf;
3645 qlnx_tpa_start(qlnx_host_t *ha,
3646 struct qlnx_fastpath *fp,
3647 struct qlnx_rx_queue *rxq,
3648 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3651 struct ifnet *ifp = ha->ifp;
3653 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3654 struct sw_rx_data *sw_rx_data;
3657 struct eth_rx_bd *rx_bd;
3660 #if __FreeBSD_version >= 1100000
3662 #endif /* #if __FreeBSD_version >= 1100000 */
3665 agg_index = cqe->tpa_agg_index;
3667 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3669 \t bitfields = 0x%x\n \
3670 \t seg_len = 0x%x\n \
3671 \t pars_flags = 0x%x\n \
3672 \t vlan_tag = 0x%x\n \
3673 \t rss_hash = 0x%x\n \
3674 \t len_on_first_bd = 0x%x\n \
3675 \t placement_offset = 0x%x\n \
3676 \t tpa_agg_index = 0x%x\n \
3677 \t header_len = 0x%x\n \
3678 \t ext_bd_len_list[0] = 0x%x\n \
3679 \t ext_bd_len_list[1] = 0x%x\n \
3680 \t ext_bd_len_list[2] = 0x%x\n \
3681 \t ext_bd_len_list[3] = 0x%x\n \
3682 \t ext_bd_len_list[4] = 0x%x\n",
3683 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3684 cqe->pars_flags.flags, cqe->vlan_tag,
3685 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3686 cqe->tpa_agg_index, cqe->header_len,
3687 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3688 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3689 cqe->ext_bd_len_list[4]);
3691 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3692 fp->err_rx_tpa_invalid_agg_num++;
3696 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3697 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3698 mp = sw_rx_data->data;
3700 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3703 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3704 fp->err_rx_mp_null++;
3705 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3710 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3712 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3713 " flags = %x, dropping incoming packet\n", fp->rss_id,
3714 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3716 fp->err_rx_hw_errors++;
3718 qlnx_reuse_rx_data(rxq);
3720 QLNX_INC_IERRORS(ifp);
3725 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3727 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3728 " dropping incoming packet and reusing its buffer\n",
3731 fp->err_rx_alloc_errors++;
3732 QLNX_INC_IQDROPS(ifp);
3735 * Load the tpa mbuf into the rx ring and save the
3739 map = sw_rx_data->map;
3740 addr = sw_rx_data->dma_addr;
3742 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3744 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3745 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3746 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3748 rxq->tpa_info[agg_index].rx_buf.data = mp;
3749 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3750 rxq->tpa_info[agg_index].rx_buf.map = map;
3752 rx_bd = (struct eth_rx_bd *)
3753 ecore_chain_produce(&rxq->rx_bd_ring);
3755 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3756 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3758 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3759 BUS_DMASYNC_PREREAD);
3761 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3762 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3764 ecore_chain_consume(&rxq->rx_bd_ring);
3766 /* Now reuse any buffers posted in ext_bd_len_list */
3767 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3769 if (cqe->ext_bd_len_list[i] == 0)
3772 qlnx_reuse_rx_data(rxq);
3775 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3779 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3781 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3782 " dropping incoming packet and reusing its buffer\n",
3785 QLNX_INC_IQDROPS(ifp);
3787 /* if we already have mbuf head in aggregation free it */
3788 if (rxq->tpa_info[agg_index].mpf) {
3789 m_freem(rxq->tpa_info[agg_index].mpf);
3790 rxq->tpa_info[agg_index].mpl = NULL;
3792 rxq->tpa_info[agg_index].mpf = mp;
3793 rxq->tpa_info[agg_index].mpl = NULL;
3795 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3796 ecore_chain_consume(&rxq->rx_bd_ring);
3798 /* Now reuse any buffers posted in ext_bd_len_list */
3799 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3801 if (cqe->ext_bd_len_list[i] == 0)
3804 qlnx_reuse_rx_data(rxq);
3806 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3812 * first process the ext_bd_len_list
3813 * if this fails then we simply drop the packet
3815 ecore_chain_consume(&rxq->rx_bd_ring);
3816 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3818 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3820 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3822 if (cqe->ext_bd_len_list[i] == 0)
3825 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3826 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3827 BUS_DMASYNC_POSTREAD);
3829 mpc = sw_rx_data->data;
3832 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3833 fp->err_rx_mp_null++;
3837 rxq->tpa_info[agg_index].agg_state =
3838 QLNX_AGG_STATE_ERROR;
3839 ecore_chain_consume(&rxq->rx_bd_ring);
3841 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3845 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3846 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3847 " dropping incoming packet and reusing its"
3848 " buffer\n", fp->rss_id);
3850 qlnx_reuse_rx_data(rxq);
3856 rxq->tpa_info[agg_index].agg_state =
3857 QLNX_AGG_STATE_ERROR;
3859 ecore_chain_consume(&rxq->rx_bd_ring);
3861 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3866 mpc->m_flags &= ~M_PKTHDR;
3868 mpc->m_len = cqe->ext_bd_len_list[i];
3874 mpl->m_len = ha->rx_buf_size;
3879 ecore_chain_consume(&rxq->rx_bd_ring);
3881 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3884 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3886 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3887 " incoming packet and reusing its buffer\n",
3890 QLNX_INC_IQDROPS(ifp);
3892 rxq->tpa_info[agg_index].mpf = mp;
3893 rxq->tpa_info[agg_index].mpl = NULL;
3898 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3901 mp->m_len = ha->rx_buf_size;
3903 rxq->tpa_info[agg_index].mpf = mp;
3904 rxq->tpa_info[agg_index].mpl = mpl;
3906 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3907 rxq->tpa_info[agg_index].mpf = mp;
3908 rxq->tpa_info[agg_index].mpl = mp;
3912 mp->m_flags |= M_PKTHDR;
3914 /* assign packet to this interface interface */
3915 mp->m_pkthdr.rcvif = ifp;
3917 /* assume no hardware checksum has complated */
3918 mp->m_pkthdr.csum_flags = 0;
3920 //mp->m_pkthdr.flowid = fp->rss_id;
3921 mp->m_pkthdr.flowid = cqe->rss_hash;
3923 #if __FreeBSD_version >= 1100000
3925 hash_type = cqe->bitfields &
3926 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3927 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3929 switch (hash_type) {
3931 case RSS_HASH_TYPE_IPV4:
3932 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3935 case RSS_HASH_TYPE_TCP_IPV4:
3936 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3939 case RSS_HASH_TYPE_IPV6:
3940 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3943 case RSS_HASH_TYPE_TCP_IPV6:
3944 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3948 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3953 mp->m_flags |= M_FLOWID;
3956 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3957 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3959 mp->m_pkthdr.csum_data = 0xFFFF;
3961 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3962 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3963 mp->m_flags |= M_VLANTAG;
3966 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3968 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3969 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3970 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3976 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3977 struct qlnx_rx_queue *rxq,
3978 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3980 struct sw_rx_data *sw_rx_data;
3982 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3989 QL_DPRINT7(ha, "[%d]: enter\n \
3991 \t tpa_agg_index = 0x%x\n \
3992 \t len_list[0] = 0x%x\n \
3993 \t len_list[1] = 0x%x\n \
3994 \t len_list[2] = 0x%x\n \
3995 \t len_list[3] = 0x%x\n \
3996 \t len_list[4] = 0x%x\n \
3997 \t len_list[5] = 0x%x\n",
3998 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3999 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4000 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4002 agg_index = cqe->tpa_agg_index;
4004 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4005 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4006 fp->err_rx_tpa_invalid_agg_num++;
4011 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4013 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4015 if (cqe->len_list[i] == 0)
4018 if (rxq->tpa_info[agg_index].agg_state !=
4019 QLNX_AGG_STATE_START) {
4020 qlnx_reuse_rx_data(rxq);
4024 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4025 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4026 BUS_DMASYNC_POSTREAD);
4028 mpc = sw_rx_data->data;
4032 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4034 fp->err_rx_mp_null++;
4038 rxq->tpa_info[agg_index].agg_state =
4039 QLNX_AGG_STATE_ERROR;
4040 ecore_chain_consume(&rxq->rx_bd_ring);
4042 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4046 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4048 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4049 " dropping incoming packet and reusing its"
4050 " buffer\n", fp->rss_id);
4052 qlnx_reuse_rx_data(rxq);
4058 rxq->tpa_info[agg_index].agg_state =
4059 QLNX_AGG_STATE_ERROR;
4061 ecore_chain_consume(&rxq->rx_bd_ring);
4063 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4068 mpc->m_flags &= ~M_PKTHDR;
4070 mpc->m_len = cqe->len_list[i];
4076 mpl->m_len = ha->rx_buf_size;
4081 ecore_chain_consume(&rxq->rx_bd_ring);
4083 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4086 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4087 fp->rss_id, mpf, mpl);
4090 mp = rxq->tpa_info[agg_index].mpl;
4091 mp->m_len = ha->rx_buf_size;
4093 rxq->tpa_info[agg_index].mpl = mpl;
4100 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4101 struct qlnx_rx_queue *rxq,
4102 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4104 struct sw_rx_data *sw_rx_data;
4106 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4110 struct ifnet *ifp = ha->ifp;
4115 QL_DPRINT7(ha, "[%d]: enter\n \
4117 \t tpa_agg_index = 0x%x\n \
4118 \t total_packet_len = 0x%x\n \
4119 \t num_of_bds = 0x%x\n \
4120 \t end_reason = 0x%x\n \
4121 \t num_of_coalesced_segs = 0x%x\n \
4122 \t ts_delta = 0x%x\n \
4123 \t len_list[0] = 0x%x\n \
4124 \t len_list[1] = 0x%x\n \
4125 \t len_list[2] = 0x%x\n \
4126 \t len_list[3] = 0x%x\n",
4127 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4128 cqe->total_packet_len, cqe->num_of_bds,
4129 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4130 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4133 agg_index = cqe->tpa_agg_index;
4135 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4137 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4139 fp->err_rx_tpa_invalid_agg_num++;
4144 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4146 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4148 if (cqe->len_list[i] == 0)
4151 if (rxq->tpa_info[agg_index].agg_state !=
4152 QLNX_AGG_STATE_START) {
4154 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4156 qlnx_reuse_rx_data(rxq);
4160 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4161 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4162 BUS_DMASYNC_POSTREAD);
4164 mpc = sw_rx_data->data;
4168 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4170 fp->err_rx_mp_null++;
4174 rxq->tpa_info[agg_index].agg_state =
4175 QLNX_AGG_STATE_ERROR;
4176 ecore_chain_consume(&rxq->rx_bd_ring);
4178 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4182 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4183 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4184 " dropping incoming packet and reusing its"
4185 " buffer\n", fp->rss_id);
4187 qlnx_reuse_rx_data(rxq);
4193 rxq->tpa_info[agg_index].agg_state =
4194 QLNX_AGG_STATE_ERROR;
4196 ecore_chain_consume(&rxq->rx_bd_ring);
4198 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4203 mpc->m_flags &= ~M_PKTHDR;
4205 mpc->m_len = cqe->len_list[i];
4211 mpl->m_len = ha->rx_buf_size;
4216 ecore_chain_consume(&rxq->rx_bd_ring);
4218 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4221 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4225 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4227 mp = rxq->tpa_info[agg_index].mpl;
4228 mp->m_len = ha->rx_buf_size;
4232 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4234 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4236 if (rxq->tpa_info[agg_index].mpf != NULL)
4237 m_freem(rxq->tpa_info[agg_index].mpf);
4238 rxq->tpa_info[agg_index].mpf = NULL;
4239 rxq->tpa_info[agg_index].mpl = NULL;
4240 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4244 mp = rxq->tpa_info[agg_index].mpf;
4245 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4246 mp->m_pkthdr.len = cqe->total_packet_len;
4248 if (mp->m_next == NULL)
4249 mp->m_len = mp->m_pkthdr.len;
4251 /* compute the total packet length */
4253 while (mpf != NULL) {
4258 if (cqe->total_packet_len > len) {
4259 mpl = rxq->tpa_info[agg_index].mpl;
4260 mpl->m_len += (cqe->total_packet_len - len);
4264 QLNX_INC_IPACKETS(ifp);
4265 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4267 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
4268 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4269 fp->rss_id, mp->m_pkthdr.csum_data,
4270 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4272 (*ifp->if_input)(ifp, mp);
4274 rxq->tpa_info[agg_index].mpf = NULL;
4275 rxq->tpa_info[agg_index].mpl = NULL;
4276 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4278 return (cqe->num_of_coalesced_segs);
4282 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4285 uint16_t hw_comp_cons, sw_comp_cons;
4287 struct qlnx_rx_queue *rxq = fp->rxq;
4288 struct ifnet *ifp = ha->ifp;
4289 struct ecore_dev *cdev = &ha->cdev;
4290 struct ecore_hwfn *p_hwfn;
4292 #ifdef QLNX_SOFT_LRO
4293 struct lro_ctrl *lro;
4296 #endif /* #ifdef QLNX_SOFT_LRO */
4298 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4299 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4301 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4303 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4304 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4305 * read before it is written by FW, then FW writes CQE and SB, and then
4306 * the CPU reads the hw_comp_cons, it will use an old CQE.
4309 /* Loop to complete all indicated BDs */
4310 while (sw_comp_cons != hw_comp_cons) {
4311 union eth_rx_cqe *cqe;
4312 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4313 struct sw_rx_data *sw_rx_data;
4314 register struct mbuf *mp;
4315 enum eth_rx_cqe_type cqe_type;
4316 uint16_t len, pad, len_on_first_bd;
4318 #if __FreeBSD_version >= 1100000
4320 #endif /* #if __FreeBSD_version >= 1100000 */
4322 /* Get the CQE from the completion ring */
4323 cqe = (union eth_rx_cqe *)
4324 ecore_chain_consume(&rxq->rx_comp_ring);
4325 cqe_type = cqe->fast_path_regular.type;
4327 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4328 QL_DPRINT3(ha, "Got a slowath CQE\n");
4330 ecore_eth_cqe_completion(p_hwfn,
4331 (struct eth_slow_path_rx_cqe *)cqe);
4335 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4339 case ETH_RX_CQE_TYPE_TPA_START:
4340 qlnx_tpa_start(ha, fp, rxq,
4341 &cqe->fast_path_tpa_start);
4345 case ETH_RX_CQE_TYPE_TPA_CONT:
4346 qlnx_tpa_cont(ha, fp, rxq,
4347 &cqe->fast_path_tpa_cont);
4351 case ETH_RX_CQE_TYPE_TPA_END:
4352 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4353 &cqe->fast_path_tpa_end);
4364 /* Get the data from the SW ring */
4365 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4366 mp = sw_rx_data->data;
4369 QL_DPRINT1(ha, "mp = NULL\n");
4370 fp->err_rx_mp_null++;
4372 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4375 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4376 BUS_DMASYNC_POSTREAD);
4379 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4380 len = le16toh(fp_cqe->pkt_len);
4381 pad = fp_cqe->placement_offset;
4383 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4384 " len %u, parsing flags = %d pad = %d\n",
4385 cqe_type, fp_cqe->bitfields,
4386 le16toh(fp_cqe->vlan_tag),
4387 len, le16toh(fp_cqe->pars_flags.flags), pad);
4389 data = mtod(mp, uint8_t *);
4393 qlnx_dump_buf8(ha, __func__, data, len);
4395 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4396 * is always with a fixed size. If allocation fails, we take the
4397 * consumed BD and return it to the ring in the PROD position.
4398 * The packet that was received on that BD will be dropped (and
4399 * not passed to the upper stack).
4401 /* If this is an error packet then drop it */
4402 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4405 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4406 " dropping incoming packet\n", sw_comp_cons,
4407 le16toh(cqe->fast_path_regular.pars_flags.flags));
4408 fp->err_rx_hw_errors++;
4410 qlnx_reuse_rx_data(rxq);
4412 QLNX_INC_IERRORS(ifp);
4417 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4419 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4420 " incoming packet and reusing its buffer\n");
4421 qlnx_reuse_rx_data(rxq);
4423 fp->err_rx_alloc_errors++;
4425 QLNX_INC_IQDROPS(ifp);
4430 ecore_chain_consume(&rxq->rx_bd_ring);
4432 len_on_first_bd = fp_cqe->len_on_first_bd;
4434 mp->m_pkthdr.len = len;
4436 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4437 len, len_on_first_bd);
4438 if ((len > 60 ) && (len > len_on_first_bd)) {
4440 mp->m_len = len_on_first_bd;
4442 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4443 (len - len_on_first_bd)) != 0) {
4447 QLNX_INC_IQDROPS(ifp);
4452 } else if (len_on_first_bd < len) {
4453 fp->err_rx_jumbo_chain_pkts++;
4458 mp->m_flags |= M_PKTHDR;
4460 /* assign packet to this interface interface */
4461 mp->m_pkthdr.rcvif = ifp;
4463 /* assume no hardware checksum has complated */
4464 mp->m_pkthdr.csum_flags = 0;
4466 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4468 #if __FreeBSD_version >= 1100000
4470 hash_type = fp_cqe->bitfields &
4471 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4472 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4474 switch (hash_type) {
4476 case RSS_HASH_TYPE_IPV4:
4477 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4480 case RSS_HASH_TYPE_TCP_IPV4:
4481 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4484 case RSS_HASH_TYPE_IPV6:
4485 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4488 case RSS_HASH_TYPE_TCP_IPV6:
4489 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4493 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4498 mp->m_flags |= M_FLOWID;
4501 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4502 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4505 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4506 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4509 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4510 mp->m_pkthdr.csum_data = 0xFFFF;
4511 mp->m_pkthdr.csum_flags |=
4512 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4515 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4516 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4517 mp->m_flags |= M_VLANTAG;
4520 QLNX_INC_IPACKETS(ifp);
4521 QLNX_INC_IBYTES(ifp, len);
4523 #ifdef QLNX_SOFT_LRO
4527 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4529 tcp_lro_queue_mbuf(lro, mp);
4533 if (tcp_lro_rx(lro, mp, 0))
4534 (*ifp->if_input)(ifp, mp);
4536 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4539 (*ifp->if_input)(ifp, mp);
4543 (*ifp->if_input)(ifp, mp);
4545 #endif /* #ifdef QLNX_SOFT_LRO */
4549 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4551 next_cqe: /* don't consume bd rx buffer */
4552 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4553 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4555 /* CR TPA - revisit how to handle budget in TPA perhaps
4556 increase on "end" */
4557 if (rx_pkt == budget)
4559 } /* repeat while sw_comp_cons != hw_comp_cons... */
4561 /* Update producers */
4562 qlnx_update_rx_prod(p_hwfn, rxq);
4569 * fast path interrupt
4573 qlnx_fp_isr(void *arg)
4575 qlnx_ivec_t *ivec = arg;
4577 struct qlnx_fastpath *fp = NULL;
4582 if (ha->state != QLNX_STATE_OPEN) {
4586 idx = ivec->rss_idx;
4588 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4589 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4590 ha->err_illegal_intr++;
4593 fp = &ha->fp_array[idx];
4599 #ifdef QLNX_RCV_IN_TASKQ
4600 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4601 if (fp->fp_taskqueue != NULL)
4602 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4604 int rx_int = 0, total_rx_count = 0;
4606 struct qlnx_tx_queue *txq;
4609 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4611 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4614 for (tc = 0; tc < ha->num_tc; tc++) {
4618 if((int)(elem_left =
4619 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4620 QLNX_TX_ELEM_THRESH) {
4622 if (mtx_trylock(&fp->tx_mtx)) {
4623 #ifdef QLNX_TRACE_PERF_DATA
4624 tx_compl = fp->tx_pkts_completed;
4627 qlnx_tx_int(ha, fp, fp->txq[tc]);
4628 #ifdef QLNX_TRACE_PERF_DATA
4629 fp->tx_pkts_compl_intr +=
4630 (fp->tx_pkts_completed - tx_compl);
4631 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4633 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4634 ((fp->tx_pkts_completed - tx_compl) <= 64))
4636 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4637 ((fp->tx_pkts_completed - tx_compl) <= 128))
4639 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4642 mtx_unlock(&fp->tx_mtx);
4647 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4651 fp->rx_pkts += rx_int;
4652 total_rx_count += rx_int;
4657 #ifdef QLNX_SOFT_LRO
4659 struct lro_ctrl *lro;
4661 lro = &fp->rxq->lro;
4663 if (lro_enable && total_rx_count) {
4665 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4667 #ifdef QLNX_TRACE_LRO_CNT
4668 if (lro->lro_mbuf_count & ~1023)
4670 else if (lro->lro_mbuf_count & ~511)
4672 else if (lro->lro_mbuf_count & ~255)
4674 else if (lro->lro_mbuf_count & ~127)
4676 else if (lro->lro_mbuf_count & ~63)
4678 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4680 tcp_lro_flush_all(lro);
4683 struct lro_entry *queued;
4685 while ((!SLIST_EMPTY(&lro->lro_active))) {
4686 queued = SLIST_FIRST(&lro->lro_active);
4687 SLIST_REMOVE_HEAD(&lro->lro_active, \
4689 tcp_lro_flush(lro, queued);
4691 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4694 #endif /* #ifdef QLNX_SOFT_LRO */
4696 ecore_sb_update_sb_idx(fp->sb_info);
4698 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4700 #endif /* #ifdef QLNX_RCV_IN_TASKQ */
4708 * slow path interrupt processing function
4709 * can be invoked in polled mode or in interrupt mode via taskqueue.
4712 qlnx_sp_isr(void *arg)
4714 struct ecore_hwfn *p_hwfn;
4719 ha = (qlnx_host_t *)p_hwfn->p_dev;
4721 ha->sp_interrupts++;
4723 QL_DPRINT2(ha, "enter\n");
4725 ecore_int_sp_dpc(p_hwfn);
4727 QL_DPRINT2(ha, "exit\n");
4732 /*****************************************************************************
4733 * Support Functions for DMA'able Memory
4734 *****************************************************************************/
4737 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4739 *((bus_addr_t *)arg) = 0;
4742 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4746 *((bus_addr_t *)arg) = segs[0].ds_addr;
4752 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4760 ret = bus_dma_tag_create(
4761 ha->parent_tag,/* parent */
4763 ((bus_size_t)(1ULL << 32)),/* boundary */
4764 BUS_SPACE_MAXADDR, /* lowaddr */
4765 BUS_SPACE_MAXADDR, /* highaddr */
4766 NULL, NULL, /* filter, filterarg */
4767 dma_buf->size, /* maxsize */
4769 dma_buf->size, /* maxsegsize */
4771 NULL, NULL, /* lockfunc, lockarg */
4775 QL_DPRINT1(ha, "could not create dma tag\n");
4776 goto qlnx_alloc_dmabuf_exit;
4778 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4779 (void **)&dma_buf->dma_b,
4780 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4783 bus_dma_tag_destroy(dma_buf->dma_tag);
4784 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4785 goto qlnx_alloc_dmabuf_exit;
4788 ret = bus_dmamap_load(dma_buf->dma_tag,
4792 qlnx_dmamap_callback,
4793 &b_addr, BUS_DMA_NOWAIT);
4795 if (ret || !b_addr) {
4796 bus_dma_tag_destroy(dma_buf->dma_tag);
4797 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4800 goto qlnx_alloc_dmabuf_exit;
4803 dma_buf->dma_addr = b_addr;
4805 qlnx_alloc_dmabuf_exit:
4811 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4813 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4814 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4815 bus_dma_tag_destroy(dma_buf->dma_tag);
4820 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4827 ha = (qlnx_host_t *)ecore_dev;
4830 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4832 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4834 dma_buf.size = size + PAGE_SIZE;
4835 dma_buf.alignment = 8;
4837 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4839 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4841 *phys = dma_buf.dma_addr;
4843 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4845 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4847 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4848 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4849 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4851 return (dma_buf.dma_b);
4855 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4858 qlnx_dma_t dma_buf, *dma_p;
4862 ha = (qlnx_host_t *)ecore_dev;
4868 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4870 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4872 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4873 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4874 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4878 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4883 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4891 * Allocate parent DMA Tag
4893 ret = bus_dma_tag_create(
4894 bus_get_dma_tag(dev), /* parent */
4895 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4896 BUS_SPACE_MAXADDR, /* lowaddr */
4897 BUS_SPACE_MAXADDR, /* highaddr */
4898 NULL, NULL, /* filter, filterarg */
4899 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4901 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4903 NULL, NULL, /* lockfunc, lockarg */
4907 QL_DPRINT1(ha, "could not create parent dma tag\n");
4911 ha->flags.parent_tag = 1;
4917 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4919 if (ha->parent_tag != NULL) {
4920 bus_dma_tag_destroy(ha->parent_tag);
4921 ha->parent_tag = NULL;
4927 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4929 if (bus_dma_tag_create(NULL, /* parent */
4930 1, 0, /* alignment, bounds */
4931 BUS_SPACE_MAXADDR, /* lowaddr */
4932 BUS_SPACE_MAXADDR, /* highaddr */
4933 NULL, NULL, /* filter, filterarg */
4934 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4935 QLNX_MAX_SEGMENTS, /* nsegments */
4936 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
4938 NULL, /* lockfunc */
4939 NULL, /* lockfuncarg */
4942 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4950 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4952 if (ha->tx_tag != NULL) {
4953 bus_dma_tag_destroy(ha->tx_tag);
4960 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4962 if (bus_dma_tag_create(NULL, /* parent */
4963 1, 0, /* alignment, bounds */
4964 BUS_SPACE_MAXADDR, /* lowaddr */
4965 BUS_SPACE_MAXADDR, /* highaddr */
4966 NULL, NULL, /* filter, filterarg */
4967 MJUM9BYTES, /* maxsize */
4969 MJUM9BYTES, /* maxsegsize */
4971 NULL, /* lockfunc */
4972 NULL, /* lockfuncarg */
4975 QL_DPRINT1(ha, " rx_tag alloc failed\n");
4983 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4985 if (ha->rx_tag != NULL) {
4986 bus_dma_tag_destroy(ha->rx_tag);
4992 /*********************************
4993 * Exported functions
4994 *********************************/
4996 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5000 bar_id = bar_id * 2;
5002 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5010 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5012 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5018 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5019 uint16_t *reg_value)
5021 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5027 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5028 uint32_t *reg_value)
5030 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5036 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5038 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5039 pci_reg, reg_value, 1);
5044 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5047 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5048 pci_reg, reg_value, 2);
5053 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5056 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5057 pci_reg, reg_value, 4);
5063 qlnx_pci_find_capability(void *ecore_dev, int cap)
5070 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5073 QL_DPRINT1(ha, "failed\n");
5079 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5082 struct ecore_dev *cdev;
5083 struct ecore_hwfn *p_hwfn;
5087 cdev = p_hwfn->p_dev;
5089 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5090 (uint8_t *)(cdev->regview)) + reg_addr;
5092 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
5098 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5100 struct ecore_dev *cdev;
5101 struct ecore_hwfn *p_hwfn;
5105 cdev = p_hwfn->p_dev;
5107 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5108 (uint8_t *)(cdev->regview)) + reg_addr;
5110 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
5116 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5118 struct ecore_dev *cdev;
5119 struct ecore_hwfn *p_hwfn;
5123 cdev = p_hwfn->p_dev;
5125 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
5126 (uint8_t *)(cdev->regview)) + reg_addr;
5128 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
5134 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5136 struct ecore_dev *cdev;
5137 struct ecore_hwfn *p_hwfn;
5141 cdev = p_hwfn->p_dev;
5143 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
5144 (uint8_t *)(cdev->doorbells)) + reg_addr;
5146 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
5152 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5156 struct ecore_dev *cdev;
5158 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5159 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5161 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5167 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5170 struct ecore_dev *cdev;
5172 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5173 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5175 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5181 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5184 struct ecore_dev *cdev;
5186 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5187 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5189 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5194 qlnx_zalloc(uint32_t size)
5198 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5200 return ((void *)va);
5204 qlnx_barrier(void *p_hwfn)
5208 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5209 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5213 qlnx_link_update(void *p_hwfn)
5216 int prev_link_state;
5218 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5220 qlnx_fill_link(p_hwfn, &ha->if_link);
5222 prev_link_state = ha->link_up;
5223 ha->link_up = ha->if_link.link_up;
5225 if (prev_link_state != ha->link_up) {
5227 if_link_state_change(ha->ifp, LINK_STATE_UP);
5229 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5236 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
5238 struct ecore_mcp_link_params link_params;
5239 struct ecore_mcp_link_state link_state;
5241 memset(if_link, 0, sizeof(*if_link));
5242 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5243 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5245 /* Prepare source inputs */
5246 /* we only deal with physical functions */
5247 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5248 sizeof(link_params));
5249 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5250 sizeof(link_state));
5252 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
5254 /* Set the link parameters to pass to protocol driver */
5255 if (link_state.link_up) {
5256 if_link->link_up = true;
5257 if_link->speed = link_state.speed;
5260 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5262 if (link_params.speed.autoneg)
5263 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5265 if (link_params.pause.autoneg ||
5266 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5267 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5269 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5270 link_params.pause.forced_tx)
5271 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5273 if (link_params.speed.advertised_speeds &
5274 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5275 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5276 QLNX_LINK_CAP_1000baseT_Full;
5278 if (link_params.speed.advertised_speeds &
5279 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5280 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5282 if (link_params.speed.advertised_speeds &
5283 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5284 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5286 if (link_params.speed.advertised_speeds &
5287 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5288 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5290 if (link_params.speed.advertised_speeds &
5291 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5292 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5294 if (link_params.speed.advertised_speeds &
5295 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5296 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5298 if_link->advertised_caps = if_link->supported_caps;
5300 if_link->autoneg = link_params.speed.autoneg;
5301 if_link->duplex = QLNX_LINK_DUPLEX;
5303 /* Link partner capabilities */
5305 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5306 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5308 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5309 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5311 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5312 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5314 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5315 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5317 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5318 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5320 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5321 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5323 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5324 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5326 if (link_state.an_complete)
5327 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5329 if (link_state.partner_adv_pause)
5330 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5332 if ((link_state.partner_adv_pause ==
5333 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5334 (link_state.partner_adv_pause ==
5335 ECORE_LINK_PARTNER_BOTH_PAUSE))
5336 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5342 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5346 for (i = 0; i < cdev->num_hwfns; i++) {
5347 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5348 p_hwfn->pf_params = *func_params;
5351 rc = ecore_resc_alloc(cdev);
5353 goto qlnx_nic_setup_exit;
5355 ecore_resc_setup(cdev);
5357 qlnx_nic_setup_exit:
5363 qlnx_nic_start(struct ecore_dev *cdev)
5366 struct ecore_hw_init_params params;
5368 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5370 params.p_tunn = NULL;
5371 params.b_hw_start = true;
5372 params.int_mode = cdev->int_mode;
5373 params.allow_npar_tx_switch = true;
5374 params.bin_fw_data = NULL;
5376 rc = ecore_hw_init(cdev, ¶ms);
5378 ecore_resc_free(cdev);
5386 qlnx_slowpath_start(qlnx_host_t *ha)
5388 struct ecore_dev *cdev;
5389 struct ecore_pf_params pf_params;
5392 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5393 pf_params.eth_pf_params.num_cons =
5394 (ha->num_rss) * (ha->num_tc + 1);
5398 rc = qlnx_nic_setup(cdev, &pf_params);
5400 goto qlnx_slowpath_start_exit;
5402 cdev->int_mode = ECORE_INT_MODE_MSIX;
5403 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5405 #ifdef QLNX_MAX_COALESCE
5406 cdev->rx_coalesce_usecs = 255;
5407 cdev->tx_coalesce_usecs = 255;
5410 rc = qlnx_nic_start(cdev);
5412 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5413 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5415 qlnx_slowpath_start_exit:
5421 qlnx_slowpath_stop(qlnx_host_t *ha)
5423 struct ecore_dev *cdev;
5424 device_t dev = ha->pci_dev;
5429 ecore_hw_stop(cdev);
5431 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5433 if (ha->sp_handle[i])
5434 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5437 ha->sp_handle[i] = NULL;
5440 (void) bus_release_resource(dev, SYS_RES_IRQ,
5441 ha->sp_irq_rid[i], ha->sp_irq[i]);
5442 ha->sp_irq[i] = NULL;
5445 ecore_resc_free(cdev);
5451 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5452 char ver_str[VER_SIZE])
5456 memcpy(cdev->name, name, NAME_SIZE);
5458 for_each_hwfn(cdev, i) {
5459 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5462 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5468 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5470 enum ecore_mcp_protocol_type type;
5471 union ecore_mcp_protocol_stats *stats;
5472 struct ecore_eth_stats eth_stats;
5476 stats = proto_stats;
5481 case ECORE_MCP_LAN_STATS:
5482 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5483 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5484 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5485 stats->lan_stats.fcs_err = -1;
5489 ha->err_get_proto_invalid_type++;
5491 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5498 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5500 struct ecore_hwfn *p_hwfn;
5501 struct ecore_ptt *p_ptt;
5503 p_hwfn = &ha->cdev.hwfns[0];
5504 p_ptt = ecore_ptt_acquire(p_hwfn);
5506 if (p_ptt == NULL) {
5507 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5510 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5512 ecore_ptt_release(p_hwfn, p_ptt);
5518 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5520 struct ecore_hwfn *p_hwfn;
5521 struct ecore_ptt *p_ptt;
5523 p_hwfn = &ha->cdev.hwfns[0];
5524 p_ptt = ecore_ptt_acquire(p_hwfn);
5526 if (p_ptt == NULL) {
5527 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5530 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5532 ecore_ptt_release(p_hwfn, p_ptt);
5538 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5540 struct ecore_dev *cdev;
5544 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5545 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5546 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5552 qlnx_init_fp(qlnx_host_t *ha)
5554 int rss_id, txq_array_index, tc;
5556 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5558 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5560 fp->rss_id = rss_id;
5562 fp->sb_info = &ha->sb_array[rss_id];
5563 fp->rxq = &ha->rxq_array[rss_id];
5564 fp->rxq->rxq_id = rss_id;
5566 for (tc = 0; tc < ha->num_tc; tc++) {
5567 txq_array_index = tc * ha->num_rss + rss_id;
5568 fp->txq[tc] = &ha->txq_array[txq_array_index];
5569 fp->txq[tc]->index = txq_array_index;
5572 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5575 fp->tx_ring_full = 0;
5577 /* reset all the statistics counters */
5579 fp->tx_pkts_processed = 0;
5580 fp->tx_pkts_freed = 0;
5581 fp->tx_pkts_transmitted = 0;
5582 fp->tx_pkts_completed = 0;
5584 #ifdef QLNX_TRACE_PERF_DATA
5585 fp->tx_pkts_trans_ctx = 0;
5586 fp->tx_pkts_compl_ctx = 0;
5587 fp->tx_pkts_trans_fp = 0;
5588 fp->tx_pkts_compl_fp = 0;
5589 fp->tx_pkts_compl_intr = 0;
5591 fp->tx_lso_wnd_min_len = 0;
5593 fp->tx_nsegs_gt_elem_left = 0;
5594 fp->tx_tso_max_nsegs = 0;
5595 fp->tx_tso_min_nsegs = 0;
5596 fp->err_tx_nsegs_gt_elem_left = 0;
5597 fp->err_tx_dmamap_create = 0;
5598 fp->err_tx_defrag_dmamap_load = 0;
5599 fp->err_tx_non_tso_max_seg = 0;
5600 fp->err_tx_dmamap_load = 0;
5601 fp->err_tx_defrag = 0;
5602 fp->err_tx_free_pkt_null = 0;
5603 fp->err_tx_cons_idx_conflict = 0;
5606 fp->err_m_getcl = 0;
5607 fp->err_m_getjcl = 0;
5613 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5615 struct ecore_dev *cdev;
5619 if (sb_info->sb_virt) {
5620 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5621 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5622 sb_info->sb_virt = NULL;
5627 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5628 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5630 struct ecore_hwfn *p_hwfn;
5634 hwfn_index = sb_id % cdev->num_hwfns;
5635 p_hwfn = &cdev->hwfns[hwfn_index];
5636 rel_sb_id = sb_id / cdev->num_hwfns;
5638 QL_DPRINT2(((qlnx_host_t *)cdev),
5639 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5640 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5641 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5642 sb_virt_addr, (void *)sb_phy_addr);
5644 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5645 sb_virt_addr, sb_phy_addr, rel_sb_id);
5650 /* This function allocates fast-path status block memory */
5652 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5654 struct status_block_e4 *sb_virt;
5658 struct ecore_dev *cdev;
5662 size = sizeof(*sb_virt);
5663 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5666 QL_DPRINT1(ha, "Status block allocation failed\n");
5670 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5672 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5679 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5682 struct sw_rx_data *rx_buf;
5684 for (i = 0; i < rxq->num_rx_buffers; i++) {
5686 rx_buf = &rxq->sw_rx_ring[i];
5688 if (rx_buf->data != NULL) {
5689 if (rx_buf->map != NULL) {
5690 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5691 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5694 m_freem(rx_buf->data);
5695 rx_buf->data = NULL;
5702 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5704 struct ecore_dev *cdev;
5709 qlnx_free_rx_buffers(ha, rxq);
5711 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5712 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5713 if (rxq->tpa_info[i].mpf != NULL)
5714 m_freem(rxq->tpa_info[i].mpf);
5717 bzero((void *)&rxq->sw_rx_ring[0],
5718 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5720 /* Free the real RQ ring used by FW */
5721 if (rxq->rx_bd_ring.p_virt_addr) {
5722 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5723 rxq->rx_bd_ring.p_virt_addr = NULL;
5726 /* Free the real completion ring used by FW */
5727 if (rxq->rx_comp_ring.p_virt_addr &&
5728 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5729 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5730 rxq->rx_comp_ring.p_virt_addr = NULL;
5731 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5734 #ifdef QLNX_SOFT_LRO
5736 struct lro_ctrl *lro;
5741 #endif /* #ifdef QLNX_SOFT_LRO */
5747 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5749 register struct mbuf *mp;
5750 uint16_t rx_buf_size;
5751 struct sw_rx_data *sw_rx_data;
5752 struct eth_rx_bd *rx_bd;
5753 dma_addr_t dma_addr;
5755 bus_dma_segment_t segs[1];
5758 struct ecore_dev *cdev;
5762 rx_buf_size = rxq->rx_buf_size;
5764 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5767 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5771 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5773 map = (bus_dmamap_t)0;
5775 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5777 dma_addr = segs[0].ds_addr;
5779 if (ret || !dma_addr || (nsegs != 1)) {
5781 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5782 ret, (long long unsigned int)dma_addr, nsegs);
5786 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5787 sw_rx_data->data = mp;
5788 sw_rx_data->dma_addr = dma_addr;
5789 sw_rx_data->map = map;
5791 /* Advance PROD and get BD pointer */
5792 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5793 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5794 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5795 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5797 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5803 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5804 struct qlnx_agg_info *tpa)
5807 dma_addr_t dma_addr;
5809 bus_dma_segment_t segs[1];
5812 struct sw_rx_data *rx_buf;
5814 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5817 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5821 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5823 map = (bus_dmamap_t)0;
5825 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5827 dma_addr = segs[0].ds_addr;
5829 if (ret || !dma_addr || (nsegs != 1)) {
5831 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5832 ret, (long long unsigned int)dma_addr, nsegs);
5836 rx_buf = &tpa->rx_buf;
5838 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5841 rx_buf->dma_addr = dma_addr;
5844 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5850 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5852 struct sw_rx_data *rx_buf;
5854 rx_buf = &tpa->rx_buf;
5856 if (rx_buf->data != NULL) {
5857 if (rx_buf->map != NULL) {
5858 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5859 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5862 m_freem(rx_buf->data);
5863 rx_buf->data = NULL;
5868 /* This function allocates all memory needed per Rx queue */
5870 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5872 int i, rc, num_allocated;
5874 struct ecore_dev *cdev;
5879 rxq->num_rx_buffers = RX_RING_SIZE;
5881 rxq->rx_buf_size = ha->rx_buf_size;
5883 /* Allocate the parallel driver ring for Rx buffers */
5884 bzero((void *)&rxq->sw_rx_ring[0],
5885 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5887 /* Allocate FW Rx ring */
5889 rc = ecore_chain_alloc(cdev,
5890 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5891 ECORE_CHAIN_MODE_NEXT_PTR,
5892 ECORE_CHAIN_CNT_TYPE_U16,
5894 sizeof(struct eth_rx_bd),
5895 &rxq->rx_bd_ring, NULL);
5900 /* Allocate FW completion ring */
5901 rc = ecore_chain_alloc(cdev,
5902 ECORE_CHAIN_USE_TO_CONSUME,
5903 ECORE_CHAIN_MODE_PBL,
5904 ECORE_CHAIN_CNT_TYPE_U16,
5906 sizeof(union eth_rx_cqe),
5907 &rxq->rx_comp_ring, NULL);
5912 /* Allocate buffers for the Rx ring */
5914 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5915 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5922 for (i = 0; i < rxq->num_rx_buffers; i++) {
5923 rc = qlnx_alloc_rx_buffer(ha, rxq);
5928 if (!num_allocated) {
5929 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5931 } else if (num_allocated < rxq->num_rx_buffers) {
5932 QL_DPRINT1(ha, "Allocated less buffers than"
5933 " desired (%d allocated)\n", num_allocated);
5936 #ifdef QLNX_SOFT_LRO
5939 struct lro_ctrl *lro;
5943 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5944 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5945 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5950 if (tcp_lro_init(lro)) {
5951 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5955 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5959 #endif /* #ifdef QLNX_SOFT_LRO */
5963 qlnx_free_mem_rxq(ha, rxq);
5969 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5970 struct qlnx_tx_queue *txq)
5972 struct ecore_dev *cdev;
5976 bzero((void *)&txq->sw_tx_ring[0],
5977 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5979 /* Free the real RQ ring used by FW */
5980 if (txq->tx_pbl.p_virt_addr) {
5981 ecore_chain_free(cdev, &txq->tx_pbl);
5982 txq->tx_pbl.p_virt_addr = NULL;
5987 /* This function allocates all memory needed per Tx queue */
5989 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5990 struct qlnx_tx_queue *txq)
5992 int ret = ECORE_SUCCESS;
5993 union eth_tx_bd_types *p_virt;
5994 struct ecore_dev *cdev;
5998 bzero((void *)&txq->sw_tx_ring[0],
5999 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6001 /* Allocate the real Tx ring to be used by FW */
6002 ret = ecore_chain_alloc(cdev,
6003 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6004 ECORE_CHAIN_MODE_PBL,
6005 ECORE_CHAIN_CNT_TYPE_U16,
6008 &txq->tx_pbl, NULL);
6010 if (ret != ECORE_SUCCESS) {
6014 txq->num_tx_buffers = TX_RING_SIZE;
6019 qlnx_free_mem_txq(ha, fp, txq);
6024 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6027 struct ifnet *ifp = ha->ifp;
6029 if (mtx_initialized(&fp->tx_mtx)) {
6031 if (fp->tx_br != NULL) {
6033 mtx_lock(&fp->tx_mtx);
6035 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6036 fp->tx_pkts_freed++;
6040 mtx_unlock(&fp->tx_mtx);
6042 buf_ring_free(fp->tx_br, M_DEVBUF);
6045 mtx_destroy(&fp->tx_mtx);
6051 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6055 qlnx_free_mem_sb(ha, fp->sb_info);
6057 qlnx_free_mem_rxq(ha, fp->rxq);
6059 for (tc = 0; tc < ha->num_tc; tc++)
6060 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6066 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6068 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6069 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6071 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6073 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6074 M_NOWAIT, &fp->tx_mtx);
6075 if (fp->tx_br == NULL) {
6076 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6077 ha->dev_unit, fp->rss_id);
6084 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6088 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6092 if (ha->rx_jumbo_buf_eq_mtu) {
6093 if (ha->max_frame_size <= MCLBYTES)
6094 ha->rx_buf_size = MCLBYTES;
6095 else if (ha->max_frame_size <= MJUMPAGESIZE)
6096 ha->rx_buf_size = MJUMPAGESIZE;
6097 else if (ha->max_frame_size <= MJUM9BYTES)
6098 ha->rx_buf_size = MJUM9BYTES;
6099 else if (ha->max_frame_size <= MJUM16BYTES)
6100 ha->rx_buf_size = MJUM16BYTES;
6102 if (ha->max_frame_size <= MCLBYTES)
6103 ha->rx_buf_size = MCLBYTES;
6105 ha->rx_buf_size = MJUMPAGESIZE;
6108 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6112 for (tc = 0; tc < ha->num_tc; tc++) {
6113 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6121 qlnx_free_mem_fp(ha, fp);
6126 qlnx_free_mem_load(qlnx_host_t *ha)
6129 struct ecore_dev *cdev;
6133 for (i = 0; i < ha->num_rss; i++) {
6134 struct qlnx_fastpath *fp = &ha->fp_array[i];
6136 qlnx_free_mem_fp(ha, fp);
6142 qlnx_alloc_mem_load(qlnx_host_t *ha)
6146 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6147 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6149 rc = qlnx_alloc_mem_fp(ha, fp);
6157 qlnx_start_vport(struct ecore_dev *cdev,
6161 u8 inner_vlan_removal_en_flg,
6166 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6169 ha = (qlnx_host_t *)cdev;
6171 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6172 vport_start_params.tx_switching = 0;
6173 vport_start_params.handle_ptp_pkts = 0;
6174 vport_start_params.only_untagged = 0;
6175 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6177 vport_start_params.tpa_mode =
6178 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6179 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6181 vport_start_params.vport_id = vport_id;
6182 vport_start_params.mtu = mtu;
6185 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6187 for_each_hwfn(cdev, i) {
6188 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6190 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6191 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6193 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6196 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6197 " with MTU %d\n" , vport_id, mtu);
6201 ecore_hw_start_fastpath(p_hwfn);
6203 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6211 qlnx_update_vport(struct ecore_dev *cdev,
6212 struct qlnx_update_vport_params *params)
6214 struct ecore_sp_vport_update_params sp_params;
6215 int rc, i, j, fp_index;
6216 struct ecore_hwfn *p_hwfn;
6217 struct ecore_rss_params *rss;
6218 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6219 struct qlnx_fastpath *fp;
6221 memset(&sp_params, 0, sizeof(sp_params));
6222 /* Translate protocol params into sp params */
6223 sp_params.vport_id = params->vport_id;
6225 sp_params.update_vport_active_rx_flg =
6226 params->update_vport_active_rx_flg;
6227 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6229 sp_params.update_vport_active_tx_flg =
6230 params->update_vport_active_tx_flg;
6231 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6233 sp_params.update_inner_vlan_removal_flg =
6234 params->update_inner_vlan_removal_flg;
6235 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6237 sp_params.sge_tpa_params = params->sge_tpa_params;
6239 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6240 * We need to re-fix the rss values per engine for CMT.
6242 if (params->rss_params->update_rss_config)
6243 sp_params.rss_params = params->rss_params;
6245 sp_params.rss_params = NULL;
6247 for_each_hwfn(cdev, i) {
6249 p_hwfn = &cdev->hwfns[i];
6251 if ((cdev->num_hwfns > 1) &&
6252 params->rss_params->update_rss_config &&
6253 params->rss_params->rss_enable) {
6255 rss = params->rss_params;
6257 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6259 fp_index = ((cdev->num_hwfns * j) + i) %
6262 fp = &ha->fp_array[fp_index];
6263 rss->rss_ind_table[j] = fp->rxq->handle;
6266 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6267 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6268 rss->rss_ind_table[j],
6269 rss->rss_ind_table[j+1],
6270 rss->rss_ind_table[j+2],
6271 rss->rss_ind_table[j+3],
6272 rss->rss_ind_table[j+4],
6273 rss->rss_ind_table[j+5],
6274 rss->rss_ind_table[j+6],
6275 rss->rss_ind_table[j+7]);
6280 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6282 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6284 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6285 ECORE_SPQ_MODE_EBLOCK, NULL);
6287 QL_DPRINT1(ha, "Failed to update VPORT\n");
6291 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6292 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6293 params->vport_id, params->vport_active_tx_flg,
6294 params->vport_active_rx_flg,
6295 params->update_vport_active_tx_flg,
6296 params->update_vport_active_rx_flg);
6303 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6305 struct eth_rx_bd *rx_bd_cons =
6306 ecore_chain_consume(&rxq->rx_bd_ring);
6307 struct eth_rx_bd *rx_bd_prod =
6308 ecore_chain_produce(&rxq->rx_bd_ring);
6309 struct sw_rx_data *sw_rx_data_cons =
6310 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6311 struct sw_rx_data *sw_rx_data_prod =
6312 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6314 sw_rx_data_prod->data = sw_rx_data_cons->data;
6315 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6317 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6318 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6324 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6330 struct eth_rx_prod_data rx_prod_data;
6334 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6335 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6337 /* Update producers */
6338 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6339 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6341 /* Make sure that the BD and SGE data is updated before updating the
6342 * producers since FW might read the BD/SGE right after the producer
6347 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6348 sizeof(rx_prods), &rx_prods.data32);
6350 /* mmiowb is needed to synchronize doorbell writes from more than one
6351 * processor. It guarantees that the write arrives to the device before
6352 * the napi lock is released and another qlnx_poll is called (possibly
6353 * on another CPU). Without this barrier, the next doorbell can bypass
6354 * this doorbell. This is applicable to IA64/Altix systems.
6361 static uint32_t qlnx_hash_key[] = {
6362 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6363 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6364 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6365 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6366 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6367 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6368 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6369 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6370 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6371 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6374 qlnx_start_queues(qlnx_host_t *ha)
6376 int rc, tc, i, vport_id = 0,
6377 drop_ttl0_flg = 1, vlan_removal_en = 1,
6378 tx_switching = 0, hw_lro_enable = 0;
6379 struct ecore_dev *cdev = &ha->cdev;
6380 struct ecore_rss_params *rss_params = &ha->rss_params;
6381 struct qlnx_update_vport_params vport_update_params;
6383 struct ecore_hwfn *p_hwfn;
6384 struct ecore_sge_tpa_params tpa_params;
6385 struct ecore_queue_start_common_params qparams;
6386 struct qlnx_fastpath *fp;
6390 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6393 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6394 " are no Rx queues\n");
6398 #ifndef QLNX_SOFT_LRO
6399 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6400 #endif /* #ifndef QLNX_SOFT_LRO */
6402 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6403 vlan_removal_en, tx_switching, hw_lro_enable);
6406 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6410 QL_DPRINT2(ha, "Start vport ramrod passed, "
6411 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6412 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6415 struct ecore_rxq_start_ret_params rx_ret_params;
6416 struct ecore_txq_start_ret_params tx_ret_params;
6418 fp = &ha->fp_array[i];
6419 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6421 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6422 bzero(&rx_ret_params,
6423 sizeof (struct ecore_rxq_start_ret_params));
6425 qparams.queue_id = i ;
6426 qparams.vport_id = vport_id;
6427 qparams.stats_id = vport_id;
6428 qparams.p_sb = fp->sb_info;
6429 qparams.sb_idx = RX_PI;
6432 rc = ecore_eth_rx_queue_start(p_hwfn,
6433 p_hwfn->hw_info.opaque_fid,
6435 fp->rxq->rx_buf_size, /* bd_max_bytes */
6436 /* bd_chain_phys_addr */
6437 fp->rxq->rx_bd_ring.p_phys_addr,
6439 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6441 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6445 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6449 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6450 fp->rxq->handle = rx_ret_params.p_handle;
6451 fp->rxq->hw_cons_ptr =
6452 &fp->sb_info->sb_virt->pi_array[RX_PI];
6454 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6456 for (tc = 0; tc < ha->num_tc; tc++) {
6457 struct qlnx_tx_queue *txq = fp->txq[tc];
6460 sizeof(struct ecore_queue_start_common_params));
6461 bzero(&tx_ret_params,
6462 sizeof (struct ecore_txq_start_ret_params));
6464 qparams.queue_id = txq->index / cdev->num_hwfns ;
6465 qparams.vport_id = vport_id;
6466 qparams.stats_id = vport_id;
6467 qparams.p_sb = fp->sb_info;
6468 qparams.sb_idx = TX_PI(tc);
6470 rc = ecore_eth_tx_queue_start(p_hwfn,
6471 p_hwfn->hw_info.opaque_fid,
6473 /* bd_chain_phys_addr */
6474 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6475 ecore_chain_get_page_cnt(&txq->tx_pbl),
6479 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6484 txq->doorbell_addr = tx_ret_params.p_doorbell;
6485 txq->handle = tx_ret_params.p_handle;
6488 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6489 SET_FIELD(txq->tx_db.data.params,
6490 ETH_DB_DATA_DEST, DB_DEST_XCM);
6491 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6493 SET_FIELD(txq->tx_db.data.params,
6494 ETH_DB_DATA_AGG_VAL_SEL,
6495 DQ_XCM_ETH_TX_BD_PROD_CMD);
6497 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6501 /* Fill struct with RSS params */
6502 if (ha->num_rss > 1) {
6504 rss_params->update_rss_config = 1;
6505 rss_params->rss_enable = 1;
6506 rss_params->update_rss_capabilities = 1;
6507 rss_params->update_rss_ind_table = 1;
6508 rss_params->update_rss_key = 1;
6509 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6510 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6511 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6513 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6514 fp = &ha->fp_array[(i % ha->num_rss)];
6515 rss_params->rss_ind_table[i] = fp->rxq->handle;
6518 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6519 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6522 memset(rss_params, 0, sizeof(*rss_params));
6526 /* Prepare and send the vport enable */
6527 memset(&vport_update_params, 0, sizeof(vport_update_params));
6528 vport_update_params.vport_id = vport_id;
6529 vport_update_params.update_vport_active_tx_flg = 1;
6530 vport_update_params.vport_active_tx_flg = 1;
6531 vport_update_params.update_vport_active_rx_flg = 1;
6532 vport_update_params.vport_active_rx_flg = 1;
6533 vport_update_params.rss_params = rss_params;
6534 vport_update_params.update_inner_vlan_removal_flg = 1;
6535 vport_update_params.inner_vlan_removal_flg = 1;
6537 if (hw_lro_enable) {
6538 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6540 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6542 tpa_params.update_tpa_en_flg = 1;
6543 tpa_params.tpa_ipv4_en_flg = 1;
6544 tpa_params.tpa_ipv6_en_flg = 1;
6546 tpa_params.update_tpa_param_flg = 1;
6547 tpa_params.tpa_pkt_split_flg = 0;
6548 tpa_params.tpa_hdr_data_split_flg = 0;
6549 tpa_params.tpa_gro_consistent_flg = 0;
6550 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6551 tpa_params.tpa_max_size = (uint16_t)(-1);
6552 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6553 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6555 vport_update_params.sge_tpa_params = &tpa_params;
6558 rc = qlnx_update_vport(cdev, &vport_update_params);
6560 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6568 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6569 struct qlnx_tx_queue *txq)
6571 uint16_t hw_bd_cons;
6572 uint16_t ecore_cons_idx;
6574 QL_DPRINT2(ha, "enter\n");
6576 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6578 while (hw_bd_cons !=
6579 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6581 mtx_lock(&fp->tx_mtx);
6583 (void)qlnx_tx_int(ha, fp, txq);
6585 mtx_unlock(&fp->tx_mtx);
6587 qlnx_mdelay(__func__, 2);
6589 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6592 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6598 qlnx_stop_queues(qlnx_host_t *ha)
6600 struct qlnx_update_vport_params vport_update_params;
6601 struct ecore_dev *cdev;
6602 struct qlnx_fastpath *fp;
6607 /* Disable the vport */
6609 memset(&vport_update_params, 0, sizeof(vport_update_params));
6611 vport_update_params.vport_id = 0;
6612 vport_update_params.update_vport_active_tx_flg = 1;
6613 vport_update_params.vport_active_tx_flg = 0;
6614 vport_update_params.update_vport_active_rx_flg = 1;
6615 vport_update_params.vport_active_rx_flg = 0;
6616 vport_update_params.rss_params = &ha->rss_params;
6617 vport_update_params.rss_params->update_rss_config = 0;
6618 vport_update_params.rss_params->rss_enable = 0;
6619 vport_update_params.update_inner_vlan_removal_flg = 0;
6620 vport_update_params.inner_vlan_removal_flg = 0;
6622 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6624 rc = qlnx_update_vport(cdev, &vport_update_params);
6626 QL_DPRINT1(ha, "Failed to update vport\n");
6630 /* Flush Tx queues. If needed, request drain from MCP */
6632 fp = &ha->fp_array[i];
6634 for (tc = 0; tc < ha->num_tc; tc++) {
6635 struct qlnx_tx_queue *txq = fp->txq[tc];
6637 rc = qlnx_drain_txq(ha, fp, txq);
6643 /* Stop all Queues in reverse order*/
6644 for (i = ha->num_rss - 1; i >= 0; i--) {
6646 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6648 fp = &ha->fp_array[i];
6650 /* Stop the Tx Queue(s)*/
6651 for (tc = 0; tc < ha->num_tc; tc++) {
6654 tx_queue_id = tc * ha->num_rss + i;
6655 rc = ecore_eth_tx_queue_stop(p_hwfn,
6656 fp->txq[tc]->handle);
6659 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6665 /* Stop the Rx Queue*/
6666 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6669 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6674 /* Stop the vport */
6675 for_each_hwfn(cdev, i) {
6677 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6679 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6682 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6691 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6692 enum ecore_filter_opcode opcode,
6693 unsigned char mac[ETH_ALEN])
6695 struct ecore_filter_ucast ucast;
6696 struct ecore_dev *cdev;
6701 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6703 ucast.opcode = opcode;
6704 ucast.type = ECORE_FILTER_MAC;
6705 ucast.is_rx_filter = 1;
6706 ucast.vport_to_add_to = 0;
6707 memcpy(&ucast.mac[0], mac, ETH_ALEN);
6709 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6715 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6717 struct ecore_filter_ucast ucast;
6718 struct ecore_dev *cdev;
6721 bzero(&ucast, sizeof(struct ecore_filter_ucast));
6723 ucast.opcode = ECORE_FILTER_REPLACE;
6724 ucast.type = ECORE_FILTER_MAC;
6725 ucast.is_rx_filter = 1;
6729 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6735 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6737 struct ecore_filter_mcast *mcast;
6738 struct ecore_dev *cdev;
6743 mcast = &ha->ecore_mcast;
6744 bzero(mcast, sizeof(struct ecore_filter_mcast));
6746 mcast->opcode = ECORE_FILTER_REMOVE;
6748 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6750 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6751 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6752 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6754 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6755 mcast->num_mc_addrs++;
6758 mcast = &ha->ecore_mcast;
6760 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6762 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6769 qlnx_clean_filters(qlnx_host_t *ha)
6773 /* Remove all unicast macs */
6774 rc = qlnx_remove_all_ucast_mac(ha);
6778 /* Remove all multicast macs */
6779 rc = qlnx_remove_all_mcast_mac(ha);
6783 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6789 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6791 struct ecore_filter_accept_flags accept;
6793 struct ecore_dev *cdev;
6797 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6799 accept.update_rx_mode_config = 1;
6800 accept.rx_accept_filter = filter;
6802 accept.update_tx_mode_config = 1;
6803 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6804 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6806 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6807 ECORE_SPQ_MODE_CB, NULL);
6813 qlnx_set_rx_mode(qlnx_host_t *ha)
6818 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6822 rc = qlnx_remove_all_mcast_mac(ha);
6826 filter = ECORE_ACCEPT_UCAST_MATCHED |
6827 ECORE_ACCEPT_MCAST_MATCHED |
6829 ha->filter = filter;
6831 rc = qlnx_set_rx_accept_filter(ha, filter);
6837 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6840 struct ecore_dev *cdev;
6841 struct ecore_hwfn *hwfn;
6842 struct ecore_ptt *ptt;
6846 for_each_hwfn(cdev, i) {
6848 hwfn = &cdev->hwfns[i];
6850 ptt = ecore_ptt_acquire(hwfn);
6854 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6856 ecore_ptt_release(hwfn, ptt);
6864 #if __FreeBSD_version >= 1100000
6866 qlnx_get_counter(if_t ifp, ift_counter cnt)
6871 ha = (qlnx_host_t *)if_getsoftc(ifp);
6875 case IFCOUNTER_IPACKETS:
6876 count = ha->hw_stats.common.rx_ucast_pkts +
6877 ha->hw_stats.common.rx_mcast_pkts +
6878 ha->hw_stats.common.rx_bcast_pkts;
6881 case IFCOUNTER_IERRORS:
6882 count = ha->hw_stats.common.rx_crc_errors +
6883 ha->hw_stats.common.rx_align_errors +
6884 ha->hw_stats.common.rx_oversize_packets +
6885 ha->hw_stats.common.rx_undersize_packets;
6888 case IFCOUNTER_OPACKETS:
6889 count = ha->hw_stats.common.tx_ucast_pkts +
6890 ha->hw_stats.common.tx_mcast_pkts +
6891 ha->hw_stats.common.tx_bcast_pkts;
6894 case IFCOUNTER_OERRORS:
6895 count = ha->hw_stats.common.tx_err_drop_pkts;
6898 case IFCOUNTER_COLLISIONS:
6901 case IFCOUNTER_IBYTES:
6902 count = ha->hw_stats.common.rx_ucast_bytes +
6903 ha->hw_stats.common.rx_mcast_bytes +
6904 ha->hw_stats.common.rx_bcast_bytes;
6907 case IFCOUNTER_OBYTES:
6908 count = ha->hw_stats.common.tx_ucast_bytes +
6909 ha->hw_stats.common.tx_mcast_bytes +
6910 ha->hw_stats.common.tx_bcast_bytes;
6913 case IFCOUNTER_IMCASTS:
6914 count = ha->hw_stats.common.rx_mcast_bytes;
6917 case IFCOUNTER_OMCASTS:
6918 count = ha->hw_stats.common.tx_mcast_bytes;
6921 case IFCOUNTER_IQDROPS:
6922 case IFCOUNTER_OQDROPS:
6923 case IFCOUNTER_NOPROTO:
6926 return (if_get_counter_default(ifp, cnt));
6934 qlnx_timer(void *arg)
6938 ha = (qlnx_host_t *)arg;
6940 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6942 if (ha->storm_stats_gather)
6943 qlnx_sample_storm_stats(ha);
6945 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6951 qlnx_load(qlnx_host_t *ha)
6955 struct ecore_dev *cdev;
6961 QL_DPRINT2(ha, "enter\n");
6963 rc = qlnx_alloc_mem_arrays(ha);
6965 goto qlnx_load_exit0;
6969 rc = qlnx_alloc_mem_load(ha);
6971 goto qlnx_load_exit1;
6973 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6974 ha->num_rss, ha->num_tc);
6976 for (i = 0; i < ha->num_rss; i++) {
6978 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6979 (INTR_TYPE_NET | INTR_MPSAFE),
6980 NULL, qlnx_fp_isr, &ha->irq_vec[i],
6981 &ha->irq_vec[i].handle))) {
6983 QL_DPRINT1(ha, "could not setup interrupt\n");
6984 goto qlnx_load_exit2;
6987 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6988 irq %p handle %p\n", i,
6989 ha->irq_vec[i].irq_rid,
6990 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6992 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6995 rc = qlnx_start_queues(ha);
6997 goto qlnx_load_exit2;
6999 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7001 /* Add primary mac and set Rx filters */
7002 rc = qlnx_set_rx_mode(ha);
7004 goto qlnx_load_exit2;
7006 /* Ask for link-up using current configuration */
7007 qlnx_set_link(ha, true);
7009 ha->state = QLNX_STATE_OPEN;
7011 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7013 if (ha->flags.callout_init)
7014 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7016 goto qlnx_load_exit0;
7019 qlnx_free_mem_load(ha);
7025 QL_DPRINT2(ha, "exit [%d]\n", rc);
7030 qlnx_drain_soft_lro(qlnx_host_t *ha)
7032 #ifdef QLNX_SOFT_LRO
7040 if (ifp->if_capenable & IFCAP_LRO) {
7042 for (i = 0; i < ha->num_rss; i++) {
7044 struct qlnx_fastpath *fp = &ha->fp_array[i];
7045 struct lro_ctrl *lro;
7047 lro = &fp->rxq->lro;
7049 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7051 tcp_lro_flush_all(lro);
7054 struct lro_entry *queued;
7056 while ((!SLIST_EMPTY(&lro->lro_active))){
7057 queued = SLIST_FIRST(&lro->lro_active);
7058 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7059 tcp_lro_flush(lro, queued);
7062 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7067 #endif /* #ifdef QLNX_SOFT_LRO */
7073 qlnx_unload(qlnx_host_t *ha)
7075 struct ecore_dev *cdev;
7082 QL_DPRINT2(ha, "enter\n");
7083 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7085 if (ha->state == QLNX_STATE_OPEN) {
7087 qlnx_set_link(ha, false);
7088 qlnx_clean_filters(ha);
7089 qlnx_stop_queues(ha);
7090 ecore_hw_stop_fastpath(cdev);
7092 for (i = 0; i < ha->num_rss; i++) {
7093 if (ha->irq_vec[i].handle) {
7094 (void)bus_teardown_intr(dev,
7096 ha->irq_vec[i].handle);
7097 ha->irq_vec[i].handle = NULL;
7101 qlnx_drain_fp_taskqueues(ha);
7102 qlnx_drain_soft_lro(ha);
7103 qlnx_free_mem_load(ha);
7106 if (ha->flags.callout_init)
7107 callout_drain(&ha->qlnx_callout);
7109 qlnx_mdelay(__func__, 1000);
7111 ha->state = QLNX_STATE_CLOSED;
7113 QL_DPRINT2(ha, "exit\n");
7118 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7121 struct ecore_hwfn *p_hwfn;
7122 struct ecore_ptt *p_ptt;
7124 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7126 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7127 p_ptt = ecore_ptt_acquire(p_hwfn);
7130 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7134 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7136 if (rval == DBG_STATUS_OK)
7139 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7143 ecore_ptt_release(p_hwfn, p_ptt);
7149 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7152 struct ecore_hwfn *p_hwfn;
7153 struct ecore_ptt *p_ptt;
7155 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7157 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7158 p_ptt = ecore_ptt_acquire(p_hwfn);
7161 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7165 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7167 if (rval == DBG_STATUS_OK)
7170 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7174 ecore_ptt_release(p_hwfn, p_ptt);
7181 qlnx_sample_storm_stats(qlnx_host_t *ha)
7184 struct ecore_dev *cdev;
7185 qlnx_storm_stats_t *s_stats;
7187 struct ecore_ptt *p_ptt;
7188 struct ecore_hwfn *hwfn;
7190 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7191 ha->storm_stats_gather = 0;
7197 for_each_hwfn(cdev, i) {
7199 hwfn = &cdev->hwfns[i];
7201 p_ptt = ecore_ptt_acquire(hwfn);
7205 index = ha->storm_stats_index +
7206 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7208 s_stats = &ha->storm_stats[index];
7211 reg = XSEM_REG_FAST_MEMORY +
7212 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7213 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7215 reg = XSEM_REG_FAST_MEMORY +
7216 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7217 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7219 reg = XSEM_REG_FAST_MEMORY +
7220 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7221 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7223 reg = XSEM_REG_FAST_MEMORY +
7224 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7225 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7228 reg = YSEM_REG_FAST_MEMORY +
7229 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7230 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7232 reg = YSEM_REG_FAST_MEMORY +
7233 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7234 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7236 reg = YSEM_REG_FAST_MEMORY +
7237 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7238 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7240 reg = YSEM_REG_FAST_MEMORY +
7241 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7242 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7245 reg = PSEM_REG_FAST_MEMORY +
7246 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7247 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7249 reg = PSEM_REG_FAST_MEMORY +
7250 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7251 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7253 reg = PSEM_REG_FAST_MEMORY +
7254 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7255 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7257 reg = PSEM_REG_FAST_MEMORY +
7258 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7259 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7262 reg = TSEM_REG_FAST_MEMORY +
7263 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7264 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7266 reg = TSEM_REG_FAST_MEMORY +
7267 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7268 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7270 reg = TSEM_REG_FAST_MEMORY +
7271 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7272 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7274 reg = TSEM_REG_FAST_MEMORY +
7275 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7276 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7279 reg = MSEM_REG_FAST_MEMORY +
7280 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7281 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7283 reg = MSEM_REG_FAST_MEMORY +
7284 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7285 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7287 reg = MSEM_REG_FAST_MEMORY +
7288 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7289 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7291 reg = MSEM_REG_FAST_MEMORY +
7292 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7293 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7296 reg = USEM_REG_FAST_MEMORY +
7297 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7298 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7300 reg = USEM_REG_FAST_MEMORY +
7301 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7302 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7304 reg = USEM_REG_FAST_MEMORY +
7305 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7306 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7308 reg = USEM_REG_FAST_MEMORY +
7309 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7310 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7312 ecore_ptt_release(hwfn, p_ptt);
7315 ha->storm_stats_index++;
7321 * Name: qlnx_dump_buf8
7322 * Function: dumps a buffer as bytes
7325 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7334 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7337 device_printf(dev,"0x%08x:"
7338 " %02x %02x %02x %02x %02x %02x %02x %02x"
7339 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7340 buf[0], buf[1], buf[2], buf[3],
7341 buf[4], buf[5], buf[6], buf[7],
7342 buf[8], buf[9], buf[10], buf[11],
7343 buf[12], buf[13], buf[14], buf[15]);
7350 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7353 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7356 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7357 i, buf[0], buf[1], buf[2]);
7360 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7361 buf[0], buf[1], buf[2], buf[3]);
7364 device_printf(dev,"0x%08x:"
7365 " %02x %02x %02x %02x %02x\n", i,
7366 buf[0], buf[1], buf[2], buf[3], buf[4]);
7369 device_printf(dev,"0x%08x:"
7370 " %02x %02x %02x %02x %02x %02x\n", i,
7371 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7374 device_printf(dev,"0x%08x:"
7375 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7376 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7379 device_printf(dev,"0x%08x:"
7380 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7381 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7385 device_printf(dev,"0x%08x:"
7386 " %02x %02x %02x %02x %02x %02x %02x %02x"
7388 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7392 device_printf(dev,"0x%08x:"
7393 " %02x %02x %02x %02x %02x %02x %02x %02x"
7395 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7396 buf[7], buf[8], buf[9]);
7399 device_printf(dev,"0x%08x:"
7400 " %02x %02x %02x %02x %02x %02x %02x %02x"
7401 " %02x %02x %02x\n", i,
7402 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7403 buf[7], buf[8], buf[9], buf[10]);
7406 device_printf(dev,"0x%08x:"
7407 " %02x %02x %02x %02x %02x %02x %02x %02x"
7408 " %02x %02x %02x %02x\n", i,
7409 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7410 buf[7], buf[8], buf[9], buf[10], buf[11]);
7413 device_printf(dev,"0x%08x:"
7414 " %02x %02x %02x %02x %02x %02x %02x %02x"
7415 " %02x %02x %02x %02x %02x\n", i,
7416 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7417 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7420 device_printf(dev,"0x%08x:"
7421 " %02x %02x %02x %02x %02x %02x %02x %02x"
7422 " %02x %02x %02x %02x %02x %02x\n", i,
7423 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7424 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7428 device_printf(dev,"0x%08x:"
7429 " %02x %02x %02x %02x %02x %02x %02x %02x"
7430 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7431 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7432 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7439 device_printf(dev, "%s: %s dump end\n", __func__, msg);