2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61 #include "ecore_iov_api.h"
62 #include "ecore_vf_api.h"
64 #include "qlnx_ioctl.h"
68 #ifdef QLNX_ENABLE_IWARP
69 #include "qlnx_rdma.h"
70 #endif /* #ifdef QLNX_ENABLE_IWARP */
79 * ioctl related functions
81 static void qlnx_add_sysctls(qlnx_host_t *ha);
86 static void qlnx_release(qlnx_host_t *ha);
87 static void qlnx_fp_isr(void *arg);
88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
89 static void qlnx_init(void *arg);
90 static void qlnx_init_locked(qlnx_host_t *ha);
91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
92 static int qlnx_set_promisc(qlnx_host_t *ha);
93 static int qlnx_set_allmulti(qlnx_host_t *ha);
94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95 static int qlnx_media_change(struct ifnet *ifp);
96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
97 static void qlnx_stop(qlnx_host_t *ha);
98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
99 struct mbuf **m_headp);
100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
101 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
102 struct qlnx_link_output *if_link);
103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
106 static void qlnx_qflush(struct ifnet *ifp);
108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118 static int qlnx_nic_setup(struct ecore_dev *cdev,
119 struct ecore_pf_params *func_params);
120 static int qlnx_nic_start(struct ecore_dev *cdev);
121 static int qlnx_slowpath_start(qlnx_host_t *ha);
122 static int qlnx_slowpath_stop(qlnx_host_t *ha);
123 static int qlnx_init_hw(qlnx_host_t *ha);
124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
125 char ver_str[VER_SIZE]);
126 static void qlnx_unload(qlnx_host_t *ha);
127 static int qlnx_load(qlnx_host_t *ha);
128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
135 struct qlnx_rx_queue *rxq);
136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 static void qlnx_timer(void *arg);
142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_trigger_dump(qlnx_host_t *ha);
145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 struct qlnx_tx_queue *txq);
147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
148 struct qlnx_tx_queue *txq);
149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 static void qlnx_fp_taskqueue(void *context, int pending);
152 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
154 struct qlnx_agg_info *tpa);
155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157 #if __FreeBSD_version >= 1100000
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
163 * Hooks to the Operating Systems
165 static int qlnx_pci_probe (device_t);
166 static int qlnx_pci_attach (device_t);
167 static int qlnx_pci_detach (device_t);
171 #ifdef CONFIG_ECORE_SRIOV
173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
174 static void qlnx_iov_uninit(device_t dev);
175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
176 static void qlnx_initialize_sriov(qlnx_host_t *ha);
177 static void qlnx_pf_taskqueue(void *context, int pending);
178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
182 #endif /* #ifdef CONFIG_ECORE_SRIOV */
184 static device_method_t qlnx_pci_methods[] = {
185 /* Device interface */
186 DEVMETHOD(device_probe, qlnx_pci_probe),
187 DEVMETHOD(device_attach, qlnx_pci_attach),
188 DEVMETHOD(device_detach, qlnx_pci_detach),
190 #ifdef CONFIG_ECORE_SRIOV
191 DEVMETHOD(pci_iov_init, qlnx_iov_init),
192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
194 #endif /* #ifdef CONFIG_ECORE_SRIOV */
198 static driver_t qlnx_pci_driver = {
199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
202 static devclass_t qlnx_devclass;
204 MODULE_VERSION(if_qlnxe,1);
205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
212 static device_method_t qlnxv_pci_methods[] = {
213 /* Device interface */
214 DEVMETHOD(device_probe, qlnx_pci_probe),
215 DEVMETHOD(device_attach, qlnx_pci_attach),
216 DEVMETHOD(device_detach, qlnx_pci_detach),
220 static driver_t qlnxv_pci_driver = {
221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
224 static devclass_t qlnxv_devclass;
225 MODULE_VERSION(if_qlnxev,1);
226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0);
228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
231 #endif /* #ifdef QLNX_VF */
233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
235 char qlnx_dev_str[128];
236 char qlnx_ver_str[VER_SIZE];
237 char qlnx_name_str[NAME_SIZE];
240 * Some PCI Configuration Space Related Defines
243 #ifndef PCI_VENDOR_QLOGIC
244 #define PCI_VENDOR_QLOGIC 0x1077
247 /* 40G Adapter QLE45xxx*/
248 #ifndef QLOGIC_PCI_DEVICE_ID_1634
249 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
252 /* 100G Adapter QLE45xxx*/
253 #ifndef QLOGIC_PCI_DEVICE_ID_1644
254 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
257 /* 25G Adapter QLE45xxx*/
258 #ifndef QLOGIC_PCI_DEVICE_ID_1656
259 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
262 /* 50G Adapter QLE45xxx*/
263 #ifndef QLOGIC_PCI_DEVICE_ID_1654
264 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
267 /* 10G/25G/40G Adapter QLE41xxx*/
268 #ifndef QLOGIC_PCI_DEVICE_ID_8070
269 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
272 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
273 #ifndef QLOGIC_PCI_DEVICE_ID_8090
274 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
279 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters");
281 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
282 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
284 #if __FreeBSD_version < 1100000
286 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
290 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
291 &qlnxe_queue_count, 0, "Multi-Queue queue count");
295 * Note on RDMA personality setting
297 * Read the personality configured in NVRAM
298 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
299 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
300 * use the personality in NVRAM.
302 * Otherwise use t the personality configured in sysctl.
305 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
306 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
307 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
308 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
309 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
310 #define QLNX_PERSONALIY_MASK 0xF
312 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
313 static uint64_t qlnxe_rdma_configuration = 0x22222222;
315 #if __FreeBSD_version < 1100000
317 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
319 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
320 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
324 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
325 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
327 #endif /* #if __FreeBSD_version < 1100000 */
330 qlnx_vf_device(qlnx_host_t *ha)
334 device_id = ha->device_id;
336 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
343 qlnx_valid_device(qlnx_host_t *ha)
347 device_id = ha->device_id;
350 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
351 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
352 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
353 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
354 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
357 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
360 #endif /* #ifndef QLNX_VF */
364 #ifdef QLNX_ENABLE_IWARP
366 qlnx_rdma_supported(struct qlnx_host *ha)
370 device_id = pci_get_device(ha->pci_dev);
372 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
373 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
374 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
375 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
380 #endif /* #ifdef QLNX_ENABLE_IWARP */
383 * Name: qlnx_pci_probe
384 * Function: Validate the PCI device to be a QLA80XX device
387 qlnx_pci_probe(device_t dev)
389 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
390 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
391 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
393 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
397 switch (pci_get_device(dev)) {
401 case QLOGIC_PCI_DEVICE_ID_1644:
402 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
403 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
404 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
406 device_set_desc_copy(dev, qlnx_dev_str);
410 case QLOGIC_PCI_DEVICE_ID_1634:
411 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
412 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
413 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
415 device_set_desc_copy(dev, qlnx_dev_str);
419 case QLOGIC_PCI_DEVICE_ID_1656:
420 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
421 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
422 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
424 device_set_desc_copy(dev, qlnx_dev_str);
428 case QLOGIC_PCI_DEVICE_ID_1654:
429 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
430 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
431 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
433 device_set_desc_copy(dev, qlnx_dev_str);
437 case QLOGIC_PCI_DEVICE_ID_8070:
438 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
439 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
440 " Adapter-Ethernet Function",
441 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
443 device_set_desc_copy(dev, qlnx_dev_str);
448 case QLOGIC_PCI_DEVICE_ID_8090:
449 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
450 "Qlogic SRIOV PCI CNA (AH) "
451 "Adapter-Ethernet Function",
452 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
454 device_set_desc_copy(dev, qlnx_dev_str);
458 #endif /* #ifndef QLNX_VF */
464 #ifdef QLNX_ENABLE_IWARP
466 #endif /* #ifdef QLNX_ENABLE_IWARP */
468 return (BUS_PROBE_DEFAULT);
472 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
473 struct qlnx_tx_queue *txq)
479 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
481 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
482 if (hw_bd_cons < ecore_cons_idx) {
483 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
485 diff = hw_bd_cons - ecore_cons_idx;
492 qlnx_sp_intr(void *arg)
494 struct ecore_hwfn *p_hwfn;
500 if (p_hwfn == NULL) {
501 printf("%s: spurious slowpath intr\n", __func__);
505 ha = (qlnx_host_t *)p_hwfn->p_dev;
507 QL_DPRINT2(ha, "enter\n");
509 for (i = 0; i < ha->cdev.num_hwfns; i++) {
510 if (&ha->cdev.hwfns[i] == p_hwfn) {
511 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
515 QL_DPRINT2(ha, "exit\n");
521 qlnx_sp_taskqueue(void *context, int pending)
523 struct ecore_hwfn *p_hwfn;
527 if (p_hwfn != NULL) {
534 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
539 for (i = 0; i < ha->cdev.num_hwfns; i++) {
541 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
543 bzero(tq_name, sizeof (tq_name));
544 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
546 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
548 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
549 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
551 if (ha->sp_taskqueue[i] == NULL)
554 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
557 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
564 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
568 for (i = 0; i < ha->cdev.num_hwfns; i++) {
569 if (ha->sp_taskqueue[i] != NULL) {
570 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
571 taskqueue_free(ha->sp_taskqueue[i]);
578 qlnx_fp_taskqueue(void *context, int pending)
580 struct qlnx_fastpath *fp;
589 ha = (qlnx_host_t *)fp->edev;
593 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
595 if (!drbr_empty(ifp, fp->tx_br)) {
597 if(mtx_trylock(&fp->tx_mtx)) {
599 #ifdef QLNX_TRACE_PERF_DATA
600 tx_pkts = fp->tx_pkts_transmitted;
601 tx_compl = fp->tx_pkts_completed;
604 qlnx_transmit_locked(ifp, fp, NULL);
606 #ifdef QLNX_TRACE_PERF_DATA
607 fp->tx_pkts_trans_fp +=
608 (fp->tx_pkts_transmitted - tx_pkts);
609 fp->tx_pkts_compl_fp +=
610 (fp->tx_pkts_completed - tx_compl);
612 mtx_unlock(&fp->tx_mtx);
617 QL_DPRINT2(ha, "exit \n");
622 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
626 struct qlnx_fastpath *fp;
628 for (i = 0; i < ha->num_rss; i++) {
630 fp = &ha->fp_array[i];
632 bzero(tq_name, sizeof (tq_name));
633 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
635 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
637 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
638 taskqueue_thread_enqueue,
641 if (fp->fp_taskqueue == NULL)
644 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
647 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
654 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
657 struct qlnx_fastpath *fp;
659 for (i = 0; i < ha->num_rss; i++) {
661 fp = &ha->fp_array[i];
663 if (fp->fp_taskqueue != NULL) {
665 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
666 taskqueue_free(fp->fp_taskqueue);
667 fp->fp_taskqueue = NULL;
674 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
677 struct qlnx_fastpath *fp;
679 for (i = 0; i < ha->num_rss; i++) {
680 fp = &ha->fp_array[i];
682 if (fp->fp_taskqueue != NULL) {
684 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
692 qlnx_get_params(qlnx_host_t *ha)
694 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
695 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
697 qlnxe_queue_count = 0;
703 qlnx_error_recovery_taskqueue(void *context, int pending)
709 QL_DPRINT2(ha, "enter\n");
715 #ifdef QLNX_ENABLE_IWARP
716 qlnx_rdma_dev_remove(ha);
717 #endif /* #ifdef QLNX_ENABLE_IWARP */
719 qlnx_slowpath_stop(ha);
720 qlnx_slowpath_start(ha);
722 #ifdef QLNX_ENABLE_IWARP
723 qlnx_rdma_dev_add(ha);
724 #endif /* #ifdef QLNX_ENABLE_IWARP */
728 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
730 QL_DPRINT2(ha, "exit\n");
736 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
740 bzero(tq_name, sizeof (tq_name));
741 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
743 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
745 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
746 taskqueue_thread_enqueue, &ha->err_taskqueue);
749 if (ha->err_taskqueue == NULL)
752 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
754 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
760 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
762 if (ha->err_taskqueue != NULL) {
763 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
764 taskqueue_free(ha->err_taskqueue);
767 ha->err_taskqueue = NULL;
773 * Name: qlnx_pci_attach
774 * Function: attaches the device to the operating system
777 qlnx_pci_attach(device_t dev)
779 qlnx_host_t *ha = NULL;
780 uint32_t rsrc_len_reg = 0;
781 uint32_t rsrc_len_dbells = 0;
782 uint32_t rsrc_len_msix = 0;
785 uint32_t num_sp_msix = 0;
786 uint32_t num_rdma_irqs = 0;
788 if ((ha = device_get_softc(dev)) == NULL) {
789 device_printf(dev, "cannot get softc\n");
793 memset(ha, 0, sizeof (qlnx_host_t));
795 ha->device_id = pci_get_device(dev);
797 if (qlnx_valid_device(ha) != 0) {
798 device_printf(dev, "device is not valid device\n");
801 ha->pci_func = pci_get_function(dev);
805 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
807 ha->flags.lock_init = 1;
809 pci_enable_busmaster(dev);
815 ha->reg_rid = PCIR_BAR(0);
816 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
819 if (ha->pci_reg == NULL) {
820 device_printf(dev, "unable to map BAR0\n");
821 goto qlnx_pci_attach_err;
824 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
827 ha->dbells_rid = PCIR_BAR(2);
828 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
831 if (rsrc_len_dbells) {
833 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
834 &ha->dbells_rid, RF_ACTIVE);
836 if (ha->pci_dbells == NULL) {
837 device_printf(dev, "unable to map BAR1\n");
838 goto qlnx_pci_attach_err;
840 ha->dbells_phys_addr = (uint64_t)
841 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
843 ha->dbells_size = rsrc_len_dbells;
845 if (qlnx_vf_device(ha) != 0) {
846 device_printf(dev, " BAR1 size is zero\n");
847 goto qlnx_pci_attach_err;
851 ha->msix_rid = PCIR_BAR(4);
852 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
853 &ha->msix_rid, RF_ACTIVE);
855 if (ha->msix_bar == NULL) {
856 device_printf(dev, "unable to map BAR2\n");
857 goto qlnx_pci_attach_err;
860 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
863 ha->dbg_level = 0x0000;
865 QL_DPRINT1(ha, "\n\t\t\t"
866 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
867 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
868 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
869 " msix_avail = 0x%x "
870 "\n\t\t\t[ncpus = %d]\n",
871 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
872 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
873 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
879 if (qlnx_alloc_parent_dma_tag(ha))
880 goto qlnx_pci_attach_err;
882 if (qlnx_alloc_tx_dma_tag(ha))
883 goto qlnx_pci_attach_err;
885 if (qlnx_alloc_rx_dma_tag(ha))
886 goto qlnx_pci_attach_err;
889 if (qlnx_init_hw(ha) != 0)
890 goto qlnx_pci_attach_err;
892 ha->flags.hw_init = 1;
896 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
897 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
898 qlnxe_queue_count = QLNX_MAX_RSS;
902 * Allocate MSI-x vectors
904 if (qlnx_vf_device(ha) != 0) {
906 if (qlnxe_queue_count == 0)
907 ha->num_rss = QLNX_DEFAULT_RSS;
909 ha->num_rss = qlnxe_queue_count;
911 num_sp_msix = ha->cdev.num_hwfns;
916 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
919 if (max_rxq < max_txq)
920 ha->num_rss = max_rxq;
922 ha->num_rss = max_txq;
924 if (ha->num_rss > QLNX_MAX_VF_RSS)
925 ha->num_rss = QLNX_MAX_VF_RSS;
930 if (ha->num_rss > mp_ncpus)
931 ha->num_rss = mp_ncpus;
933 ha->num_tc = QLNX_MAX_TC;
935 ha->msix_count = pci_msix_count(dev);
937 #ifdef QLNX_ENABLE_IWARP
939 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
941 #endif /* #ifdef QLNX_ENABLE_IWARP */
943 if (!ha->msix_count ||
944 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
945 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
947 goto qlnx_pci_attach_err;
950 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
951 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
953 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
955 QL_DPRINT1(ha, "\n\t\t\t"
956 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
957 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
958 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
959 " msix_avail = 0x%x msix_alloc = 0x%x"
960 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
961 ha->pci_reg, rsrc_len_reg,
962 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
963 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
964 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
966 if (pci_alloc_msix(dev, &ha->msix_count)) {
967 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
970 goto qlnx_pci_attach_err;
974 * Initialize slow path interrupt and task queue
979 if (qlnx_create_sp_taskqueues(ha) != 0)
980 goto qlnx_pci_attach_err;
982 for (i = 0; i < ha->cdev.num_hwfns; i++) {
984 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
986 ha->sp_irq_rid[i] = i + 1;
987 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
989 (RF_ACTIVE | RF_SHAREABLE));
990 if (ha->sp_irq[i] == NULL) {
992 "could not allocate mbx interrupt\n");
993 goto qlnx_pci_attach_err;
996 if (bus_setup_intr(dev, ha->sp_irq[i],
997 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
998 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
1000 "could not setup slow path interrupt\n");
1001 goto qlnx_pci_attach_err;
1004 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
1005 " sp_irq %p sp_handle %p\n", p_hwfn,
1006 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
1011 * initialize fast path interrupt
1013 if (qlnx_create_fp_taskqueues(ha) != 0)
1014 goto qlnx_pci_attach_err;
1016 for (i = 0; i < ha->num_rss; i++) {
1017 ha->irq_vec[i].rss_idx = i;
1018 ha->irq_vec[i].ha = ha;
1019 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
1021 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1022 &ha->irq_vec[i].irq_rid,
1023 (RF_ACTIVE | RF_SHAREABLE));
1025 if (ha->irq_vec[i].irq == NULL) {
1027 "could not allocate interrupt[%d] irq_rid = %d\n",
1028 i, ha->irq_vec[i].irq_rid);
1029 goto qlnx_pci_attach_err;
1032 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1033 device_printf(dev, "could not allocate tx_br[%d]\n", i);
1034 goto qlnx_pci_attach_err;
1040 if (qlnx_vf_device(ha) != 0) {
1042 callout_init(&ha->qlnx_callout, 1);
1043 ha->flags.callout_init = 1;
1045 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1047 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1048 goto qlnx_pci_attach_err;
1049 if (ha->grcdump_size[i] == 0)
1050 goto qlnx_pci_attach_err;
1052 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1053 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1054 i, ha->grcdump_size[i]);
1056 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1057 if (ha->grcdump[i] == NULL) {
1058 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1059 goto qlnx_pci_attach_err;
1062 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1063 goto qlnx_pci_attach_err;
1064 if (ha->idle_chk_size[i] == 0)
1065 goto qlnx_pci_attach_err;
1067 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1068 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1069 i, ha->idle_chk_size[i]);
1071 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1073 if (ha->idle_chk[i] == NULL) {
1074 device_printf(dev, "idle_chk alloc failed\n");
1075 goto qlnx_pci_attach_err;
1079 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1080 goto qlnx_pci_attach_err;
1083 if (qlnx_slowpath_start(ha) != 0)
1084 goto qlnx_pci_attach_err;
1086 ha->flags.slowpath_start = 1;
1088 if (qlnx_vf_device(ha) != 0) {
1089 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1090 qlnx_mdelay(__func__, 1000);
1091 qlnx_trigger_dump(ha);
1093 goto qlnx_pci_attach_err0;
1096 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1097 qlnx_mdelay(__func__, 1000);
1098 qlnx_trigger_dump(ha);
1100 goto qlnx_pci_attach_err0;
1103 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1104 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1107 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1108 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1109 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1110 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1111 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1112 FW_ENGINEERING_VERSION);
1114 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1115 ha->stormfw_ver, ha->mfw_ver);
1117 qlnx_init_ifnet(dev, ha);
1122 qlnx_add_sysctls(ha);
1124 qlnx_pci_attach_err0:
1126 * create ioctl device interface
1128 if (qlnx_vf_device(ha) != 0) {
1130 if (qlnx_make_cdev(ha)) {
1131 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1132 goto qlnx_pci_attach_err;
1135 #ifdef QLNX_ENABLE_IWARP
1136 qlnx_rdma_dev_add(ha);
1137 #endif /* #ifdef QLNX_ENABLE_IWARP */
1141 #ifdef CONFIG_ECORE_SRIOV
1143 if (qlnx_vf_device(ha) != 0)
1144 qlnx_initialize_sriov(ha);
1146 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1147 #endif /* #ifdef QLNX_VF */
1149 QL_DPRINT2(ha, "success\n");
1153 qlnx_pci_attach_err:
1161 * Name: qlnx_pci_detach
1162 * Function: Unhooks the device from the operating system
1165 qlnx_pci_detach(device_t dev)
1167 qlnx_host_t *ha = NULL;
1169 if ((ha = device_get_softc(dev)) == NULL) {
1170 device_printf(dev, "%s: cannot get softc\n", __func__);
1174 if (qlnx_vf_device(ha) != 0) {
1175 #ifdef CONFIG_ECORE_SRIOV
1178 ret = pci_iov_detach(dev);
1180 device_printf(dev, "%s: SRIOV in use\n", __func__);
1184 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1186 #ifdef QLNX_ENABLE_IWARP
1187 if (qlnx_rdma_dev_remove(ha) != 0)
1189 #endif /* #ifdef QLNX_ENABLE_IWARP */
1201 #ifdef QLNX_ENABLE_IWARP
1204 qlnx_get_personality(uint8_t pci_func)
1206 uint8_t personality;
1208 personality = (qlnxe_rdma_configuration >>
1209 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1210 QLNX_PERSONALIY_MASK;
1211 return (personality);
1215 qlnx_set_personality(qlnx_host_t *ha)
1217 struct ecore_hwfn *p_hwfn;
1218 uint8_t personality;
1220 p_hwfn = &ha->cdev.hwfns[0];
1222 personality = qlnx_get_personality(ha->pci_func);
1224 switch (personality) {
1226 case QLNX_PERSONALITY_DEFAULT:
1227 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1229 ha->personality = ECORE_PCI_DEFAULT;
1232 case QLNX_PERSONALITY_ETH_ONLY:
1233 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1235 ha->personality = ECORE_PCI_ETH;
1238 case QLNX_PERSONALITY_ETH_IWARP:
1239 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1241 ha->personality = ECORE_PCI_ETH_IWARP;
1244 case QLNX_PERSONALITY_ETH_ROCE:
1245 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1247 ha->personality = ECORE_PCI_ETH_ROCE;
1254 #endif /* #ifdef QLNX_ENABLE_IWARP */
1257 qlnx_init_hw(qlnx_host_t *ha)
1260 struct ecore_hw_prepare_params params;
1262 ecore_init_struct(&ha->cdev);
1264 /* ha->dp_module = ECORE_MSG_PROBE |
1270 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1271 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1272 ha->dp_level = ECORE_LEVEL_NOTICE;
1273 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1275 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1277 ha->cdev.regview = ha->pci_reg;
1279 ha->personality = ECORE_PCI_DEFAULT;
1281 if (qlnx_vf_device(ha) == 0) {
1282 ha->cdev.b_is_vf = true;
1284 if (ha->pci_dbells != NULL) {
1285 ha->cdev.doorbells = ha->pci_dbells;
1286 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1287 ha->cdev.db_size = ha->dbells_size;
1289 ha->pci_dbells = ha->pci_reg;
1292 ha->cdev.doorbells = ha->pci_dbells;
1293 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1294 ha->cdev.db_size = ha->dbells_size;
1296 #ifdef QLNX_ENABLE_IWARP
1298 if (qlnx_rdma_supported(ha) == 0)
1299 qlnx_set_personality(ha);
1301 #endif /* #ifdef QLNX_ENABLE_IWARP */
1304 QL_DPRINT2(ha, "%s: %s\n", __func__,
1305 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1307 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1309 params.personality = ha->personality;
1311 params.drv_resc_alloc = false;
1312 params.chk_reg_fifo = false;
1313 params.initiate_pf_flr = true;
1316 ecore_hw_prepare(&ha->cdev, ¶ms);
1318 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1320 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1321 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1327 qlnx_release(qlnx_host_t *ha)
1334 QL_DPRINT2(ha, "enter\n");
1336 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1337 if (ha->idle_chk[i] != NULL) {
1338 free(ha->idle_chk[i], M_QLNXBUF);
1339 ha->idle_chk[i] = NULL;
1342 if (ha->grcdump[i] != NULL) {
1343 free(ha->grcdump[i], M_QLNXBUF);
1344 ha->grcdump[i] = NULL;
1348 if (ha->flags.callout_init)
1349 callout_drain(&ha->qlnx_callout);
1351 if (ha->flags.slowpath_start) {
1352 qlnx_slowpath_stop(ha);
1355 if (ha->flags.hw_init)
1356 ecore_hw_remove(&ha->cdev);
1360 if (ha->ifp != NULL)
1361 ether_ifdetach(ha->ifp);
1363 qlnx_free_tx_dma_tag(ha);
1365 qlnx_free_rx_dma_tag(ha);
1367 qlnx_free_parent_dma_tag(ha);
1369 if (qlnx_vf_device(ha) != 0) {
1370 qlnx_destroy_error_recovery_taskqueue(ha);
1373 for (i = 0; i < ha->num_rss; i++) {
1374 struct qlnx_fastpath *fp = &ha->fp_array[i];
1376 if (ha->irq_vec[i].handle) {
1377 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1378 ha->irq_vec[i].handle);
1381 if (ha->irq_vec[i].irq) {
1382 (void)bus_release_resource(dev, SYS_RES_IRQ,
1383 ha->irq_vec[i].irq_rid,
1384 ha->irq_vec[i].irq);
1387 qlnx_free_tx_br(ha, fp);
1389 qlnx_destroy_fp_taskqueues(ha);
1391 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1392 if (ha->sp_handle[i])
1393 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1397 (void) bus_release_resource(dev, SYS_RES_IRQ,
1398 ha->sp_irq_rid[i], ha->sp_irq[i]);
1401 qlnx_destroy_sp_taskqueues(ha);
1404 pci_release_msi(dev);
1406 if (ha->flags.lock_init) {
1407 mtx_destroy(&ha->hw_lock);
1411 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1414 if (ha->dbells_size && ha->pci_dbells)
1415 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1419 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1422 QL_DPRINT2(ha, "exit\n");
1427 qlnx_trigger_dump(qlnx_host_t *ha)
1431 if (ha->ifp != NULL)
1432 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1434 QL_DPRINT2(ha, "enter\n");
1436 if (qlnx_vf_device(ha) == 0)
1439 ha->error_recovery = 1;
1441 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1442 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1443 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1446 QL_DPRINT2(ha, "exit\n");
1452 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1457 err = sysctl_handle_int(oidp, &ret, 0, req);
1459 if (err || !req->newptr)
1463 ha = (qlnx_host_t *)arg1;
1464 qlnx_trigger_dump(ha);
1470 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1472 int err, i, ret = 0, usecs = 0;
1474 struct ecore_hwfn *p_hwfn;
1475 struct qlnx_fastpath *fp;
1477 err = sysctl_handle_int(oidp, &usecs, 0, req);
1479 if (err || !req->newptr || !usecs || (usecs > 255))
1482 ha = (qlnx_host_t *)arg1;
1484 if (qlnx_vf_device(ha) == 0)
1487 for (i = 0; i < ha->num_rss; i++) {
1489 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1491 fp = &ha->fp_array[i];
1493 if (fp->txq[0]->handle != NULL) {
1494 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1495 (uint16_t)usecs, fp->txq[0]->handle);
1500 ha->tx_coalesce_usecs = (uint8_t)usecs;
1506 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1508 int err, i, ret = 0, usecs = 0;
1510 struct ecore_hwfn *p_hwfn;
1511 struct qlnx_fastpath *fp;
1513 err = sysctl_handle_int(oidp, &usecs, 0, req);
1515 if (err || !req->newptr || !usecs || (usecs > 255))
1518 ha = (qlnx_host_t *)arg1;
1520 if (qlnx_vf_device(ha) == 0)
1523 for (i = 0; i < ha->num_rss; i++) {
1525 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1527 fp = &ha->fp_array[i];
1529 if (fp->rxq->handle != NULL) {
1530 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1531 0, fp->rxq->handle);
1536 ha->rx_coalesce_usecs = (uint8_t)usecs;
1542 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1544 struct sysctl_ctx_list *ctx;
1545 struct sysctl_oid_list *children;
1546 struct sysctl_oid *ctx_oid;
1548 ctx = device_get_sysctl_ctx(ha->pci_dev);
1549 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1551 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1552 CTLFLAG_RD, NULL, "spstat");
1553 children = SYSCTL_CHILDREN(ctx_oid);
1555 SYSCTL_ADD_QUAD(ctx, children,
1556 OID_AUTO, "sp_interrupts",
1557 CTLFLAG_RD, &ha->sp_interrupts,
1558 "No. of slowpath interrupts");
1564 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1566 struct sysctl_ctx_list *ctx;
1567 struct sysctl_oid_list *children;
1568 struct sysctl_oid_list *node_children;
1569 struct sysctl_oid *ctx_oid;
1571 uint8_t name_str[16];
1573 ctx = device_get_sysctl_ctx(ha->pci_dev);
1574 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1576 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1577 CTLFLAG_RD, NULL, "fpstat");
1578 children = SYSCTL_CHILDREN(ctx_oid);
1580 for (i = 0; i < ha->num_rss; i++) {
1582 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1583 snprintf(name_str, sizeof(name_str), "%d", i);
1585 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1586 CTLFLAG_RD, NULL, name_str);
1587 node_children = SYSCTL_CHILDREN(ctx_oid);
1591 SYSCTL_ADD_QUAD(ctx, node_children,
1592 OID_AUTO, "tx_pkts_processed",
1593 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1594 "No. of packets processed for transmission");
1596 SYSCTL_ADD_QUAD(ctx, node_children,
1597 OID_AUTO, "tx_pkts_freed",
1598 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1599 "No. of freed packets");
1601 SYSCTL_ADD_QUAD(ctx, node_children,
1602 OID_AUTO, "tx_pkts_transmitted",
1603 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1604 "No. of transmitted packets");
1606 SYSCTL_ADD_QUAD(ctx, node_children,
1607 OID_AUTO, "tx_pkts_completed",
1608 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1609 "No. of transmit completions");
1611 SYSCTL_ADD_QUAD(ctx, node_children,
1612 OID_AUTO, "tx_non_tso_pkts",
1613 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1614 "No. of non LSO transmited packets");
1616 #ifdef QLNX_TRACE_PERF_DATA
1618 SYSCTL_ADD_QUAD(ctx, node_children,
1619 OID_AUTO, "tx_pkts_trans_ctx",
1620 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1621 "No. of transmitted packets in transmit context");
1623 SYSCTL_ADD_QUAD(ctx, node_children,
1624 OID_AUTO, "tx_pkts_compl_ctx",
1625 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1626 "No. of transmit completions in transmit context");
1628 SYSCTL_ADD_QUAD(ctx, node_children,
1629 OID_AUTO, "tx_pkts_trans_fp",
1630 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1631 "No. of transmitted packets in taskqueue");
1633 SYSCTL_ADD_QUAD(ctx, node_children,
1634 OID_AUTO, "tx_pkts_compl_fp",
1635 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1636 "No. of transmit completions in taskqueue");
1638 SYSCTL_ADD_QUAD(ctx, node_children,
1639 OID_AUTO, "tx_pkts_compl_intr",
1640 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1641 "No. of transmit completions in interrupt ctx");
1644 SYSCTL_ADD_QUAD(ctx, node_children,
1645 OID_AUTO, "tx_tso_pkts",
1646 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1647 "No. of LSO transmited packets");
1649 SYSCTL_ADD_QUAD(ctx, node_children,
1650 OID_AUTO, "tx_lso_wnd_min_len",
1651 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1652 "tx_lso_wnd_min_len");
1654 SYSCTL_ADD_QUAD(ctx, node_children,
1655 OID_AUTO, "tx_defrag",
1656 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1659 SYSCTL_ADD_QUAD(ctx, node_children,
1660 OID_AUTO, "tx_nsegs_gt_elem_left",
1661 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1662 "tx_nsegs_gt_elem_left");
1664 SYSCTL_ADD_UINT(ctx, node_children,
1665 OID_AUTO, "tx_tso_max_nsegs",
1666 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1667 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1669 SYSCTL_ADD_UINT(ctx, node_children,
1670 OID_AUTO, "tx_tso_min_nsegs",
1671 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1672 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1674 SYSCTL_ADD_UINT(ctx, node_children,
1675 OID_AUTO, "tx_tso_max_pkt_len",
1676 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1677 ha->fp_array[i].tx_tso_max_pkt_len,
1678 "tx_tso_max_pkt_len");
1680 SYSCTL_ADD_UINT(ctx, node_children,
1681 OID_AUTO, "tx_tso_min_pkt_len",
1682 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1683 ha->fp_array[i].tx_tso_min_pkt_len,
1684 "tx_tso_min_pkt_len");
1686 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1688 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1689 snprintf(name_str, sizeof(name_str),
1690 "tx_pkts_nseg_%02d", (j+1));
1692 SYSCTL_ADD_QUAD(ctx, node_children,
1693 OID_AUTO, name_str, CTLFLAG_RD,
1694 &ha->fp_array[i].tx_pkts[j], name_str);
1697 #ifdef QLNX_TRACE_PERF_DATA
1698 for (j = 0; j < 18; j++) {
1700 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1701 snprintf(name_str, sizeof(name_str),
1702 "tx_pkts_hist_%02d", (j+1));
1704 SYSCTL_ADD_QUAD(ctx, node_children,
1705 OID_AUTO, name_str, CTLFLAG_RD,
1706 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1708 for (j = 0; j < 5; j++) {
1710 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1711 snprintf(name_str, sizeof(name_str),
1712 "tx_comInt_%02d", (j+1));
1714 SYSCTL_ADD_QUAD(ctx, node_children,
1715 OID_AUTO, name_str, CTLFLAG_RD,
1716 &ha->fp_array[i].tx_comInt[j], name_str);
1718 for (j = 0; j < 18; j++) {
1720 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1721 snprintf(name_str, sizeof(name_str),
1722 "tx_pkts_q_%02d", (j+1));
1724 SYSCTL_ADD_QUAD(ctx, node_children,
1725 OID_AUTO, name_str, CTLFLAG_RD,
1726 &ha->fp_array[i].tx_pkts_q[j], name_str);
1730 SYSCTL_ADD_QUAD(ctx, node_children,
1731 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1732 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1733 "err_tx_nsegs_gt_elem_left");
1735 SYSCTL_ADD_QUAD(ctx, node_children,
1736 OID_AUTO, "err_tx_dmamap_create",
1737 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1738 "err_tx_dmamap_create");
1740 SYSCTL_ADD_QUAD(ctx, node_children,
1741 OID_AUTO, "err_tx_defrag_dmamap_load",
1742 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1743 "err_tx_defrag_dmamap_load");
1745 SYSCTL_ADD_QUAD(ctx, node_children,
1746 OID_AUTO, "err_tx_non_tso_max_seg",
1747 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1748 "err_tx_non_tso_max_seg");
1750 SYSCTL_ADD_QUAD(ctx, node_children,
1751 OID_AUTO, "err_tx_dmamap_load",
1752 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1753 "err_tx_dmamap_load");
1755 SYSCTL_ADD_QUAD(ctx, node_children,
1756 OID_AUTO, "err_tx_defrag",
1757 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1760 SYSCTL_ADD_QUAD(ctx, node_children,
1761 OID_AUTO, "err_tx_free_pkt_null",
1762 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1763 "err_tx_free_pkt_null");
1765 SYSCTL_ADD_QUAD(ctx, node_children,
1766 OID_AUTO, "err_tx_cons_idx_conflict",
1767 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1768 "err_tx_cons_idx_conflict");
1770 SYSCTL_ADD_QUAD(ctx, node_children,
1771 OID_AUTO, "lro_cnt_64",
1772 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1775 SYSCTL_ADD_QUAD(ctx, node_children,
1776 OID_AUTO, "lro_cnt_128",
1777 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1780 SYSCTL_ADD_QUAD(ctx, node_children,
1781 OID_AUTO, "lro_cnt_256",
1782 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1785 SYSCTL_ADD_QUAD(ctx, node_children,
1786 OID_AUTO, "lro_cnt_512",
1787 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1790 SYSCTL_ADD_QUAD(ctx, node_children,
1791 OID_AUTO, "lro_cnt_1024",
1792 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1797 SYSCTL_ADD_QUAD(ctx, node_children,
1798 OID_AUTO, "rx_pkts",
1799 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1800 "No. of received packets");
1802 SYSCTL_ADD_QUAD(ctx, node_children,
1803 OID_AUTO, "tpa_start",
1804 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1805 "No. of tpa_start packets");
1807 SYSCTL_ADD_QUAD(ctx, node_children,
1808 OID_AUTO, "tpa_cont",
1809 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1810 "No. of tpa_cont packets");
1812 SYSCTL_ADD_QUAD(ctx, node_children,
1813 OID_AUTO, "tpa_end",
1814 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1815 "No. of tpa_end packets");
1817 SYSCTL_ADD_QUAD(ctx, node_children,
1818 OID_AUTO, "err_m_getcl",
1819 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1822 SYSCTL_ADD_QUAD(ctx, node_children,
1823 OID_AUTO, "err_m_getjcl",
1824 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1827 SYSCTL_ADD_QUAD(ctx, node_children,
1828 OID_AUTO, "err_rx_hw_errors",
1829 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1830 "err_rx_hw_errors");
1832 SYSCTL_ADD_QUAD(ctx, node_children,
1833 OID_AUTO, "err_rx_alloc_errors",
1834 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1835 "err_rx_alloc_errors");
1842 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1844 struct sysctl_ctx_list *ctx;
1845 struct sysctl_oid_list *children;
1846 struct sysctl_oid *ctx_oid;
1848 ctx = device_get_sysctl_ctx(ha->pci_dev);
1849 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1851 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1852 CTLFLAG_RD, NULL, "hwstat");
1853 children = SYSCTL_CHILDREN(ctx_oid);
1855 SYSCTL_ADD_QUAD(ctx, children,
1856 OID_AUTO, "no_buff_discards",
1857 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1858 "No. of packets discarded due to lack of buffer");
1860 SYSCTL_ADD_QUAD(ctx, children,
1861 OID_AUTO, "packet_too_big_discard",
1862 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1863 "No. of packets discarded because packet was too big");
1865 SYSCTL_ADD_QUAD(ctx, children,
1866 OID_AUTO, "ttl0_discard",
1867 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1870 SYSCTL_ADD_QUAD(ctx, children,
1871 OID_AUTO, "rx_ucast_bytes",
1872 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1875 SYSCTL_ADD_QUAD(ctx, children,
1876 OID_AUTO, "rx_mcast_bytes",
1877 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1880 SYSCTL_ADD_QUAD(ctx, children,
1881 OID_AUTO, "rx_bcast_bytes",
1882 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1885 SYSCTL_ADD_QUAD(ctx, children,
1886 OID_AUTO, "rx_ucast_pkts",
1887 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1890 SYSCTL_ADD_QUAD(ctx, children,
1891 OID_AUTO, "rx_mcast_pkts",
1892 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1895 SYSCTL_ADD_QUAD(ctx, children,
1896 OID_AUTO, "rx_bcast_pkts",
1897 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1900 SYSCTL_ADD_QUAD(ctx, children,
1901 OID_AUTO, "mftag_filter_discards",
1902 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1903 "mftag_filter_discards");
1905 SYSCTL_ADD_QUAD(ctx, children,
1906 OID_AUTO, "mac_filter_discards",
1907 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1908 "mac_filter_discards");
1910 SYSCTL_ADD_QUAD(ctx, children,
1911 OID_AUTO, "tx_ucast_bytes",
1912 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1915 SYSCTL_ADD_QUAD(ctx, children,
1916 OID_AUTO, "tx_mcast_bytes",
1917 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1920 SYSCTL_ADD_QUAD(ctx, children,
1921 OID_AUTO, "tx_bcast_bytes",
1922 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1925 SYSCTL_ADD_QUAD(ctx, children,
1926 OID_AUTO, "tx_ucast_pkts",
1927 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1930 SYSCTL_ADD_QUAD(ctx, children,
1931 OID_AUTO, "tx_mcast_pkts",
1932 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1935 SYSCTL_ADD_QUAD(ctx, children,
1936 OID_AUTO, "tx_bcast_pkts",
1937 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1940 SYSCTL_ADD_QUAD(ctx, children,
1941 OID_AUTO, "tx_err_drop_pkts",
1942 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1943 "tx_err_drop_pkts");
1945 SYSCTL_ADD_QUAD(ctx, children,
1946 OID_AUTO, "tpa_coalesced_pkts",
1947 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1948 "tpa_coalesced_pkts");
1950 SYSCTL_ADD_QUAD(ctx, children,
1951 OID_AUTO, "tpa_coalesced_events",
1952 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1953 "tpa_coalesced_events");
1955 SYSCTL_ADD_QUAD(ctx, children,
1956 OID_AUTO, "tpa_aborts_num",
1957 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1960 SYSCTL_ADD_QUAD(ctx, children,
1961 OID_AUTO, "tpa_not_coalesced_pkts",
1962 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1963 "tpa_not_coalesced_pkts");
1965 SYSCTL_ADD_QUAD(ctx, children,
1966 OID_AUTO, "tpa_coalesced_bytes",
1967 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1968 "tpa_coalesced_bytes");
1970 SYSCTL_ADD_QUAD(ctx, children,
1971 OID_AUTO, "rx_64_byte_packets",
1972 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1973 "rx_64_byte_packets");
1975 SYSCTL_ADD_QUAD(ctx, children,
1976 OID_AUTO, "rx_65_to_127_byte_packets",
1977 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1978 "rx_65_to_127_byte_packets");
1980 SYSCTL_ADD_QUAD(ctx, children,
1981 OID_AUTO, "rx_128_to_255_byte_packets",
1982 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1983 "rx_128_to_255_byte_packets");
1985 SYSCTL_ADD_QUAD(ctx, children,
1986 OID_AUTO, "rx_256_to_511_byte_packets",
1987 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1988 "rx_256_to_511_byte_packets");
1990 SYSCTL_ADD_QUAD(ctx, children,
1991 OID_AUTO, "rx_512_to_1023_byte_packets",
1992 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1993 "rx_512_to_1023_byte_packets");
1995 SYSCTL_ADD_QUAD(ctx, children,
1996 OID_AUTO, "rx_1024_to_1518_byte_packets",
1997 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1998 "rx_1024_to_1518_byte_packets");
2000 SYSCTL_ADD_QUAD(ctx, children,
2001 OID_AUTO, "rx_1519_to_1522_byte_packets",
2002 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
2003 "rx_1519_to_1522_byte_packets");
2005 SYSCTL_ADD_QUAD(ctx, children,
2006 OID_AUTO, "rx_1523_to_2047_byte_packets",
2007 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
2008 "rx_1523_to_2047_byte_packets");
2010 SYSCTL_ADD_QUAD(ctx, children,
2011 OID_AUTO, "rx_2048_to_4095_byte_packets",
2012 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
2013 "rx_2048_to_4095_byte_packets");
2015 SYSCTL_ADD_QUAD(ctx, children,
2016 OID_AUTO, "rx_4096_to_9216_byte_packets",
2017 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
2018 "rx_4096_to_9216_byte_packets");
2020 SYSCTL_ADD_QUAD(ctx, children,
2021 OID_AUTO, "rx_9217_to_16383_byte_packets",
2022 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
2023 "rx_9217_to_16383_byte_packets");
2025 SYSCTL_ADD_QUAD(ctx, children,
2026 OID_AUTO, "rx_crc_errors",
2027 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
2030 SYSCTL_ADD_QUAD(ctx, children,
2031 OID_AUTO, "rx_mac_crtl_frames",
2032 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
2033 "rx_mac_crtl_frames");
2035 SYSCTL_ADD_QUAD(ctx, children,
2036 OID_AUTO, "rx_pause_frames",
2037 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
2040 SYSCTL_ADD_QUAD(ctx, children,
2041 OID_AUTO, "rx_pfc_frames",
2042 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2045 SYSCTL_ADD_QUAD(ctx, children,
2046 OID_AUTO, "rx_align_errors",
2047 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2050 SYSCTL_ADD_QUAD(ctx, children,
2051 OID_AUTO, "rx_carrier_errors",
2052 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2053 "rx_carrier_errors");
2055 SYSCTL_ADD_QUAD(ctx, children,
2056 OID_AUTO, "rx_oversize_packets",
2057 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2058 "rx_oversize_packets");
2060 SYSCTL_ADD_QUAD(ctx, children,
2061 OID_AUTO, "rx_jabbers",
2062 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2065 SYSCTL_ADD_QUAD(ctx, children,
2066 OID_AUTO, "rx_undersize_packets",
2067 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2068 "rx_undersize_packets");
2070 SYSCTL_ADD_QUAD(ctx, children,
2071 OID_AUTO, "rx_fragments",
2072 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2075 SYSCTL_ADD_QUAD(ctx, children,
2076 OID_AUTO, "tx_64_byte_packets",
2077 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2078 "tx_64_byte_packets");
2080 SYSCTL_ADD_QUAD(ctx, children,
2081 OID_AUTO, "tx_65_to_127_byte_packets",
2082 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2083 "tx_65_to_127_byte_packets");
2085 SYSCTL_ADD_QUAD(ctx, children,
2086 OID_AUTO, "tx_128_to_255_byte_packets",
2087 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2088 "tx_128_to_255_byte_packets");
2090 SYSCTL_ADD_QUAD(ctx, children,
2091 OID_AUTO, "tx_256_to_511_byte_packets",
2092 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2093 "tx_256_to_511_byte_packets");
2095 SYSCTL_ADD_QUAD(ctx, children,
2096 OID_AUTO, "tx_512_to_1023_byte_packets",
2097 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2098 "tx_512_to_1023_byte_packets");
2100 SYSCTL_ADD_QUAD(ctx, children,
2101 OID_AUTO, "tx_1024_to_1518_byte_packets",
2102 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2103 "tx_1024_to_1518_byte_packets");
2105 SYSCTL_ADD_QUAD(ctx, children,
2106 OID_AUTO, "tx_1519_to_2047_byte_packets",
2107 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2108 "tx_1519_to_2047_byte_packets");
2110 SYSCTL_ADD_QUAD(ctx, children,
2111 OID_AUTO, "tx_2048_to_4095_byte_packets",
2112 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2113 "tx_2048_to_4095_byte_packets");
2115 SYSCTL_ADD_QUAD(ctx, children,
2116 OID_AUTO, "tx_4096_to_9216_byte_packets",
2117 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2118 "tx_4096_to_9216_byte_packets");
2120 SYSCTL_ADD_QUAD(ctx, children,
2121 OID_AUTO, "tx_9217_to_16383_byte_packets",
2122 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2123 "tx_9217_to_16383_byte_packets");
2125 SYSCTL_ADD_QUAD(ctx, children,
2126 OID_AUTO, "tx_pause_frames",
2127 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2130 SYSCTL_ADD_QUAD(ctx, children,
2131 OID_AUTO, "tx_pfc_frames",
2132 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2135 SYSCTL_ADD_QUAD(ctx, children,
2136 OID_AUTO, "tx_lpi_entry_count",
2137 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2138 "tx_lpi_entry_count");
2140 SYSCTL_ADD_QUAD(ctx, children,
2141 OID_AUTO, "tx_total_collisions",
2142 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2143 "tx_total_collisions");
2145 SYSCTL_ADD_QUAD(ctx, children,
2146 OID_AUTO, "brb_truncates",
2147 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2150 SYSCTL_ADD_QUAD(ctx, children,
2151 OID_AUTO, "brb_discards",
2152 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2155 SYSCTL_ADD_QUAD(ctx, children,
2156 OID_AUTO, "rx_mac_bytes",
2157 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2160 SYSCTL_ADD_QUAD(ctx, children,
2161 OID_AUTO, "rx_mac_uc_packets",
2162 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2163 "rx_mac_uc_packets");
2165 SYSCTL_ADD_QUAD(ctx, children,
2166 OID_AUTO, "rx_mac_mc_packets",
2167 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2168 "rx_mac_mc_packets");
2170 SYSCTL_ADD_QUAD(ctx, children,
2171 OID_AUTO, "rx_mac_bc_packets",
2172 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2173 "rx_mac_bc_packets");
2175 SYSCTL_ADD_QUAD(ctx, children,
2176 OID_AUTO, "rx_mac_frames_ok",
2177 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2178 "rx_mac_frames_ok");
2180 SYSCTL_ADD_QUAD(ctx, children,
2181 OID_AUTO, "tx_mac_bytes",
2182 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2185 SYSCTL_ADD_QUAD(ctx, children,
2186 OID_AUTO, "tx_mac_uc_packets",
2187 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2188 "tx_mac_uc_packets");
2190 SYSCTL_ADD_QUAD(ctx, children,
2191 OID_AUTO, "tx_mac_mc_packets",
2192 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2193 "tx_mac_mc_packets");
2195 SYSCTL_ADD_QUAD(ctx, children,
2196 OID_AUTO, "tx_mac_bc_packets",
2197 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2198 "tx_mac_bc_packets");
2200 SYSCTL_ADD_QUAD(ctx, children,
2201 OID_AUTO, "tx_mac_ctrl_frames",
2202 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2203 "tx_mac_ctrl_frames");
2208 qlnx_add_sysctls(qlnx_host_t *ha)
2210 device_t dev = ha->pci_dev;
2211 struct sysctl_ctx_list *ctx;
2212 struct sysctl_oid_list *children;
2214 ctx = device_get_sysctl_ctx(dev);
2215 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2217 qlnx_add_fp_stats_sysctls(ha);
2218 qlnx_add_sp_stats_sysctls(ha);
2220 if (qlnx_vf_device(ha) != 0)
2221 qlnx_add_hw_stats_sysctls(ha);
2223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2224 CTLFLAG_RD, qlnx_ver_str, 0,
2227 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2228 CTLFLAG_RD, ha->stormfw_ver, 0,
2229 "STORM Firmware Version");
2231 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2232 CTLFLAG_RD, ha->mfw_ver, 0,
2233 "Management Firmware Version");
2235 SYSCTL_ADD_UINT(ctx, children,
2236 OID_AUTO, "personality", CTLFLAG_RD,
2237 &ha->personality, ha->personality,
2238 "\tpersonality = 0 => Ethernet Only\n"
2239 "\tpersonality = 3 => Ethernet and RoCE\n"
2240 "\tpersonality = 4 => Ethernet and iWARP\n"
2241 "\tpersonality = 6 => Default in Shared Memory\n");
2244 SYSCTL_ADD_UINT(ctx, children,
2245 OID_AUTO, "debug", CTLFLAG_RW,
2246 &ha->dbg_level, ha->dbg_level, "Debug Level");
2248 ha->dp_level = 0x01;
2249 SYSCTL_ADD_UINT(ctx, children,
2250 OID_AUTO, "dp_level", CTLFLAG_RW,
2251 &ha->dp_level, ha->dp_level, "DP Level");
2253 ha->dbg_trace_lro_cnt = 0;
2254 SYSCTL_ADD_UINT(ctx, children,
2255 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2256 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2257 "Trace LRO Counts");
2259 ha->dbg_trace_tso_pkt_len = 0;
2260 SYSCTL_ADD_UINT(ctx, children,
2261 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2262 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2263 "Trace TSO packet lengths");
2266 SYSCTL_ADD_UINT(ctx, children,
2267 OID_AUTO, "dp_module", CTLFLAG_RW,
2268 &ha->dp_module, ha->dp_module, "DP Module");
2272 SYSCTL_ADD_UINT(ctx, children,
2273 OID_AUTO, "err_inject", CTLFLAG_RW,
2274 &ha->err_inject, ha->err_inject, "Error Inject");
2276 ha->storm_stats_enable = 0;
2278 SYSCTL_ADD_UINT(ctx, children,
2279 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2280 &ha->storm_stats_enable, ha->storm_stats_enable,
2281 "Enable Storm Statistics Gathering");
2283 ha->storm_stats_index = 0;
2285 SYSCTL_ADD_UINT(ctx, children,
2286 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2287 &ha->storm_stats_index, ha->storm_stats_index,
2288 "Enable Storm Statistics Gathering Current Index");
2290 ha->grcdump_taken = 0;
2291 SYSCTL_ADD_UINT(ctx, children,
2292 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2293 &ha->grcdump_taken, ha->grcdump_taken,
2296 ha->idle_chk_taken = 0;
2297 SYSCTL_ADD_UINT(ctx, children,
2298 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2299 &ha->idle_chk_taken, ha->idle_chk_taken,
2302 SYSCTL_ADD_UINT(ctx, children,
2303 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2304 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2305 "rx_coalesce_usecs");
2307 SYSCTL_ADD_UINT(ctx, children,
2308 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2309 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2310 "tx_coalesce_usecs");
2312 SYSCTL_ADD_PROC(ctx, children,
2313 OID_AUTO, "trigger_dump", (CTLTYPE_INT | CTLFLAG_RW),
2315 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2317 SYSCTL_ADD_PROC(ctx, children,
2318 OID_AUTO, "set_rx_coalesce_usecs",
2319 (CTLTYPE_INT | CTLFLAG_RW),
2321 qlnx_set_rx_coalesce, "I",
2322 "rx interrupt coalesce period microseconds");
2324 SYSCTL_ADD_PROC(ctx, children,
2325 OID_AUTO, "set_tx_coalesce_usecs",
2326 (CTLTYPE_INT | CTLFLAG_RW),
2328 qlnx_set_tx_coalesce, "I",
2329 "tx interrupt coalesce period microseconds");
2331 ha->rx_pkt_threshold = 128;
2332 SYSCTL_ADD_UINT(ctx, children,
2333 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2334 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2335 "No. of Rx Pkts to process at a time");
2337 ha->rx_jumbo_buf_eq_mtu = 0;
2338 SYSCTL_ADD_UINT(ctx, children,
2339 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2340 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2341 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2342 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2344 SYSCTL_ADD_QUAD(ctx, children,
2345 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2346 &ha->err_illegal_intr, "err_illegal_intr");
2348 SYSCTL_ADD_QUAD(ctx, children,
2349 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2350 &ha->err_fp_null, "err_fp_null");
2352 SYSCTL_ADD_QUAD(ctx, children,
2353 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2354 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2360 /*****************************************************************************
2361 * Operating System Network Interface Functions
2362 *****************************************************************************/
2365 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2370 ifp = ha->ifp = if_alloc(IFT_ETHER);
2373 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2375 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2377 device_id = pci_get_device(ha->pci_dev);
2379 #if __FreeBSD_version >= 1000000
2381 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2382 ifp->if_baudrate = IF_Gbps(40);
2383 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2384 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2385 ifp->if_baudrate = IF_Gbps(25);
2386 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2387 ifp->if_baudrate = IF_Gbps(50);
2388 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2389 ifp->if_baudrate = IF_Gbps(100);
2391 ifp->if_capabilities = IFCAP_LINKSTATE;
2393 ifp->if_mtu = ETHERMTU;
2394 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2396 #endif /* #if __FreeBSD_version >= 1000000 */
2398 ifp->if_init = qlnx_init;
2400 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2401 ifp->if_ioctl = qlnx_ioctl;
2402 ifp->if_transmit = qlnx_transmit;
2403 ifp->if_qflush = qlnx_qflush;
2405 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2406 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2407 IFQ_SET_READY(&ifp->if_snd);
2409 #if __FreeBSD_version >= 1100036
2410 if_setgetcounterfn(ifp, qlnx_get_counter);
2413 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2415 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2417 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2418 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2419 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2424 ha->primary_mac[0] = 0x00;
2425 ha->primary_mac[1] = 0x0e;
2426 ha->primary_mac[2] = 0x1e;
2427 ha->primary_mac[3] = rnd & 0xFF;
2428 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2429 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2432 ether_ifattach(ifp, ha->primary_mac);
2433 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2435 ifp->if_capabilities = IFCAP_HWCSUM;
2436 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2438 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2439 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2440 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2441 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2442 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2443 ifp->if_capabilities |= IFCAP_TSO4;
2444 ifp->if_capabilities |= IFCAP_TSO6;
2445 ifp->if_capabilities |= IFCAP_LRO;
2447 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2448 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2449 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2450 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2453 ifp->if_capenable = ifp->if_capabilities;
2455 ifp->if_hwassist = CSUM_IP;
2456 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2457 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2458 ifp->if_hwassist |= CSUM_TSO;
2460 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2462 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2465 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2466 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2468 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2469 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2470 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2471 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2472 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2473 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2474 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2475 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2476 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2477 ifmedia_add(&ha->media,
2478 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2479 ifmedia_add(&ha->media,
2480 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2481 ifmedia_add(&ha->media,
2482 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2485 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2486 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2489 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2491 QL_DPRINT2(ha, "exit\n");
2497 qlnx_init_locked(qlnx_host_t *ha)
2499 struct ifnet *ifp = ha->ifp;
2501 QL_DPRINT1(ha, "Driver Initialization start \n");
2505 if (qlnx_load(ha) == 0) {
2507 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2508 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2510 #ifdef QLNX_ENABLE_IWARP
2511 if (qlnx_vf_device(ha) != 0) {
2512 qlnx_rdma_dev_open(ha);
2514 #endif /* #ifdef QLNX_ENABLE_IWARP */
2521 qlnx_init(void *arg)
2525 ha = (qlnx_host_t *)arg;
2527 QL_DPRINT2(ha, "enter\n");
2530 qlnx_init_locked(ha);
2533 QL_DPRINT2(ha, "exit\n");
2539 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2541 struct ecore_filter_mcast *mcast;
2542 struct ecore_dev *cdev;
2547 mcast = &ha->ecore_mcast;
2548 bzero(mcast, sizeof(struct ecore_filter_mcast));
2551 mcast->opcode = ECORE_FILTER_ADD;
2553 mcast->opcode = ECORE_FILTER_REMOVE;
2555 mcast->num_mc_addrs = 1;
2556 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2558 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2564 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2568 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2570 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2571 return 0; /* its been already added */
2574 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2576 if ((ha->mcast[i].addr[0] == 0) &&
2577 (ha->mcast[i].addr[1] == 0) &&
2578 (ha->mcast[i].addr[2] == 0) &&
2579 (ha->mcast[i].addr[3] == 0) &&
2580 (ha->mcast[i].addr[4] == 0) &&
2581 (ha->mcast[i].addr[5] == 0)) {
2583 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2586 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2596 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2600 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2601 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2603 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2606 ha->mcast[i].addr[0] = 0;
2607 ha->mcast[i].addr[1] = 0;
2608 ha->mcast[i].addr[2] = 0;
2609 ha->mcast[i].addr[3] = 0;
2610 ha->mcast[i].addr[4] = 0;
2611 ha->mcast[i].addr[5] = 0;
2622 * Name: qls_hw_set_multi
2623 * Function: Sets the Multicast Addresses provided the host O.S into the
2624 * hardware (for the given interface)
2627 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2632 for (i = 0; i < mcnt; i++) {
2634 if (qlnx_hw_add_mcast(ha, mta))
2637 if (qlnx_hw_del_mcast(ha, mta))
2641 mta += ETHER_HDR_LEN;
2648 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2652 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2655 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2661 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2663 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2664 struct ifnet *ifp = ha->ifp;
2667 if (qlnx_vf_device(ha) == 0)
2670 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2673 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2680 qlnx_set_promisc(qlnx_host_t *ha)
2685 if (qlnx_vf_device(ha) == 0)
2688 filter = ha->filter;
2689 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2690 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2692 rc = qlnx_set_rx_accept_filter(ha, filter);
2697 qlnx_set_allmulti(qlnx_host_t *ha)
2702 if (qlnx_vf_device(ha) == 0)
2705 filter = ha->filter;
2706 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2707 rc = qlnx_set_rx_accept_filter(ha, filter);
2714 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2717 struct ifreq *ifr = (struct ifreq *)data;
2718 struct ifaddr *ifa = (struct ifaddr *)data;
2721 ha = (qlnx_host_t *)ifp->if_softc;
2725 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2727 if (ifa->ifa_addr->sa_family == AF_INET) {
2728 ifp->if_flags |= IFF_UP;
2729 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2731 qlnx_init_locked(ha);
2734 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2735 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2737 arp_ifinit(ifp, ifa);
2739 ether_ioctl(ifp, cmd, data);
2744 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2746 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2750 ifp->if_mtu = ifr->ifr_mtu;
2751 ha->max_frame_size =
2752 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2753 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2754 qlnx_init_locked(ha);
2763 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2767 if (ifp->if_flags & IFF_UP) {
2768 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2769 if ((ifp->if_flags ^ ha->if_flags) &
2771 ret = qlnx_set_promisc(ha);
2772 } else if ((ifp->if_flags ^ ha->if_flags) &
2774 ret = qlnx_set_allmulti(ha);
2777 ha->max_frame_size = ifp->if_mtu +
2778 ETHER_HDR_LEN + ETHER_CRC_LEN;
2779 qlnx_init_locked(ha);
2782 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2784 ha->if_flags = ifp->if_flags;
2791 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2793 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2794 if (qlnx_set_multi(ha, 1))
2800 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2802 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2803 if (qlnx_set_multi(ha, 0))
2810 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2812 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2817 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2819 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2821 if (mask & IFCAP_HWCSUM)
2822 ifp->if_capenable ^= IFCAP_HWCSUM;
2823 if (mask & IFCAP_TSO4)
2824 ifp->if_capenable ^= IFCAP_TSO4;
2825 if (mask & IFCAP_TSO6)
2826 ifp->if_capenable ^= IFCAP_TSO6;
2827 if (mask & IFCAP_VLAN_HWTAGGING)
2828 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2829 if (mask & IFCAP_VLAN_HWTSO)
2830 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2831 if (mask & IFCAP_LRO)
2832 ifp->if_capenable ^= IFCAP_LRO;
2836 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2837 qlnx_init_locked(ha);
2841 VLAN_CAPABILITIES(ifp);
2844 #if (__FreeBSD_version >= 1100101)
2848 struct ifi2creq i2c;
2849 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2850 struct ecore_ptt *p_ptt;
2852 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2857 if ((i2c.len > sizeof (i2c.data)) ||
2858 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2863 p_ptt = ecore_ptt_acquire(p_hwfn);
2866 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2871 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2872 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2873 i2c.len, &i2c.data[0]);
2875 ecore_ptt_release(p_hwfn, p_ptt);
2882 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2884 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2885 len = %d addr = 0x%02x offset = 0x%04x \
2886 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2887 0x%02x 0x%02x 0x%02x\n",
2888 ret, i2c.len, i2c.dev_addr, i2c.offset,
2889 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2890 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2893 #endif /* #if (__FreeBSD_version >= 1100101) */
2896 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2897 ret = ether_ioctl(ifp, cmd, data);
2905 qlnx_media_change(struct ifnet *ifp)
2908 struct ifmedia *ifm;
2911 ha = (qlnx_host_t *)ifp->if_softc;
2913 QL_DPRINT2(ha, "enter\n");
2917 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2920 QL_DPRINT2(ha, "exit\n");
2926 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2930 ha = (qlnx_host_t *)ifp->if_softc;
2932 QL_DPRINT2(ha, "enter\n");
2934 ifmr->ifm_status = IFM_AVALID;
2935 ifmr->ifm_active = IFM_ETHER;
2938 ifmr->ifm_status |= IFM_ACTIVE;
2940 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2942 if (ha->if_link.link_partner_caps &
2943 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2945 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2948 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2955 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2956 struct qlnx_tx_queue *txq)
2962 struct eth_tx_bd *tx_data_bd;
2963 struct eth_tx_1st_bd *first_bd;
2966 idx = txq->sw_tx_cons;
2967 mp = txq->sw_tx_ring[idx].mp;
2968 map = txq->sw_tx_ring[idx].map;
2970 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2972 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2974 QL_DPRINT1(ha, "(mp == NULL) "
2976 " ecore_prod_idx = 0x%x"
2977 " ecore_cons_idx = 0x%x"
2978 " hw_bd_cons = 0x%x"
2979 " txq_db_last = 0x%x"
2980 " elem_left = 0x%x\n",
2982 ecore_chain_get_prod_idx(&txq->tx_pbl),
2983 ecore_chain_get_cons_idx(&txq->tx_pbl),
2984 le16toh(*txq->hw_cons_ptr),
2986 ecore_chain_get_elem_left(&txq->tx_pbl));
2988 fp->err_tx_free_pkt_null++;
2991 qlnx_trigger_dump(ha);
2996 QLNX_INC_OPACKETS((ha->ifp));
2997 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2999 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
3000 bus_dmamap_unload(ha->tx_tag, map);
3002 fp->tx_pkts_freed++;
3003 fp->tx_pkts_completed++;
3008 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
3009 nbds = first_bd->data.nbds;
3011 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
3013 for (i = 1; i < nbds; i++) {
3014 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
3015 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
3017 txq->sw_tx_ring[idx].flags = 0;
3018 txq->sw_tx_ring[idx].mp = NULL;
3019 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
3025 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3026 struct qlnx_tx_queue *txq)
3033 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
3035 while (hw_bd_cons !=
3036 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
3038 if (hw_bd_cons < ecore_cons_idx) {
3039 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
3041 diff = hw_bd_cons - ecore_cons_idx;
3043 if ((diff > TX_RING_SIZE) ||
3044 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
3046 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
3048 QL_DPRINT1(ha, "(diff = 0x%x) "
3050 " ecore_prod_idx = 0x%x"
3051 " ecore_cons_idx = 0x%x"
3052 " hw_bd_cons = 0x%x"
3053 " txq_db_last = 0x%x"
3054 " elem_left = 0x%x\n",
3057 ecore_chain_get_prod_idx(&txq->tx_pbl),
3058 ecore_chain_get_cons_idx(&txq->tx_pbl),
3059 le16toh(*txq->hw_cons_ptr),
3061 ecore_chain_get_elem_left(&txq->tx_pbl));
3063 fp->err_tx_cons_idx_conflict++;
3066 qlnx_trigger_dump(ha);
3069 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3070 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3071 prefetch(txq->sw_tx_ring[idx].mp);
3072 prefetch(txq->sw_tx_ring[idx2].mp);
3074 qlnx_free_tx_pkt(ha, fp, txq);
3076 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3082 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
3085 struct qlnx_tx_queue *txq;
3090 ha = (qlnx_host_t *)fp->edev;
3093 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3095 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3100 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3102 mp = drbr_peek(ifp, fp->tx_br);
3104 while (mp != NULL) {
3106 if (qlnx_send(ha, fp, &mp)) {
3109 drbr_putback(ifp, fp->tx_br, mp);
3111 fp->tx_pkts_processed++;
3112 drbr_advance(ifp, fp->tx_br);
3114 goto qlnx_transmit_locked_exit;
3117 drbr_advance(ifp, fp->tx_br);
3118 fp->tx_pkts_transmitted++;
3119 fp->tx_pkts_processed++;
3122 mp = drbr_peek(ifp, fp->tx_br);
3125 qlnx_transmit_locked_exit:
3126 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3127 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3128 < QLNX_TX_ELEM_MAX_THRESH))
3129 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3131 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3137 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
3139 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
3140 struct qlnx_fastpath *fp;
3141 int rss_id = 0, ret = 0;
3143 #ifdef QLNX_TRACEPERF_DATA
3144 uint64_t tx_pkts = 0, tx_compl = 0;
3147 QL_DPRINT2(ha, "enter\n");
3149 #if __FreeBSD_version >= 1100000
3150 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3152 if (mp->m_flags & M_FLOWID)
3154 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3157 fp = &ha->fp_array[rss_id];
3159 if (fp->tx_br == NULL) {
3161 goto qlnx_transmit_exit;
3164 if (mtx_trylock(&fp->tx_mtx)) {
3166 #ifdef QLNX_TRACEPERF_DATA
3167 tx_pkts = fp->tx_pkts_transmitted;
3168 tx_compl = fp->tx_pkts_completed;
3171 ret = qlnx_transmit_locked(ifp, fp, mp);
3173 #ifdef QLNX_TRACEPERF_DATA
3174 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3175 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3177 mtx_unlock(&fp->tx_mtx);
3179 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3180 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3181 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3187 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3192 qlnx_qflush(struct ifnet *ifp)
3195 struct qlnx_fastpath *fp;
3199 ha = (qlnx_host_t *)ifp->if_softc;
3201 QL_DPRINT2(ha, "enter\n");
3203 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3205 fp = &ha->fp_array[rss_id];
3211 mtx_lock(&fp->tx_mtx);
3213 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3214 fp->tx_pkts_freed++;
3217 mtx_unlock(&fp->tx_mtx);
3220 QL_DPRINT2(ha, "exit\n");
3226 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3228 struct ecore_dev *cdev;
3233 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3235 bus_write_4(ha->pci_dbells, offset, value);
3236 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3237 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3243 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3245 struct ether_vlan_header *eh = NULL;
3246 struct ip *ip = NULL;
3247 struct ip6_hdr *ip6 = NULL;
3248 struct tcphdr *th = NULL;
3249 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3252 uint8_t buf[sizeof(struct ip6_hdr)];
3256 eh = mtod(mp, struct ether_vlan_header *);
3258 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3259 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3260 etype = ntohs(eh->evl_proto);
3262 ehdrlen = ETHER_HDR_LEN;
3263 etype = ntohs(eh->evl_encap_proto);
3269 ip = (struct ip *)(mp->m_data + ehdrlen);
3271 ip_hlen = sizeof (struct ip);
3273 if (mp->m_len < (ehdrlen + ip_hlen)) {
3274 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3275 ip = (struct ip *)buf;
3278 th = (struct tcphdr *)(ip + 1);
3279 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3282 case ETHERTYPE_IPV6:
3283 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3285 ip_hlen = sizeof(struct ip6_hdr);
3287 if (mp->m_len < (ehdrlen + ip_hlen)) {
3288 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3290 ip6 = (struct ip6_hdr *)buf;
3292 th = (struct tcphdr *)(ip6 + 1);
3293 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3304 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3308 uint32_t sum, nbds_in_hdr = 1;
3310 bus_dma_segment_t *s_seg;
3312 /* If the header spans mulitple segments, skip those segments */
3314 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3319 while ((i < nsegs) && (offset >= segs->ds_len)) {
3320 offset = offset - segs->ds_len;
3326 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3330 while (nsegs >= window) {
3335 for (i = 0; i < window; i++){
3336 sum += s_seg->ds_len;
3340 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3341 fp->tx_lso_wnd_min_len++;
3353 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3355 bus_dma_segment_t *segs;
3356 bus_dmamap_t map = 0;
3359 struct mbuf *m_head = *m_headp;
3364 struct qlnx_tx_queue *txq;
3366 struct eth_tx_1st_bd *first_bd;
3367 struct eth_tx_2nd_bd *second_bd;
3368 struct eth_tx_3rd_bd *third_bd;
3369 struct eth_tx_bd *tx_data_bd;
3372 uint32_t nbds_in_hdr = 0;
3373 uint32_t offset = 0;
3375 #ifdef QLNX_TRACE_PERF_DATA
3379 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3391 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3392 QLNX_TX_ELEM_MIN_THRESH) {
3394 fp->tx_nsegs_gt_elem_left++;
3395 fp->err_tx_nsegs_gt_elem_left++;
3400 idx = txq->sw_tx_prod;
3402 map = txq->sw_tx_ring[idx].map;
3405 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3408 if (ha->dbg_trace_tso_pkt_len) {
3409 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3410 if (!fp->tx_tso_min_pkt_len) {
3411 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3412 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3414 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3415 fp->tx_tso_min_pkt_len =
3416 m_head->m_pkthdr.len;
3417 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3418 fp->tx_tso_max_pkt_len =
3419 m_head->m_pkthdr.len;
3424 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3425 offset = qlnx_tcp_offset(ha, m_head);
3427 if ((ret == EFBIG) ||
3428 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3429 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3430 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3431 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3435 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3439 m = m_defrag(m_head, M_NOWAIT);
3441 fp->err_tx_defrag++;
3442 fp->tx_pkts_freed++;
3445 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3452 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3453 segs, &nsegs, BUS_DMA_NOWAIT))) {
3455 fp->err_tx_defrag_dmamap_load++;
3458 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3459 ret, m_head->m_pkthdr.len);
3461 fp->tx_pkts_freed++;
3468 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3469 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3471 fp->err_tx_non_tso_max_seg++;
3474 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3475 ret, nsegs, m_head->m_pkthdr.len);
3477 fp->tx_pkts_freed++;
3483 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3484 offset = qlnx_tcp_offset(ha, m_head);
3488 fp->err_tx_dmamap_load++;
3490 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3491 ret, m_head->m_pkthdr.len);
3492 fp->tx_pkts_freed++;
3498 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3500 if (ha->dbg_trace_tso_pkt_len) {
3501 if (nsegs < QLNX_FP_MAX_SEGS)
3502 fp->tx_pkts[(nsegs - 1)]++;
3504 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3507 #ifdef QLNX_TRACE_PERF_DATA
3508 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3509 if(m_head->m_pkthdr.len <= 2048)
3510 fp->tx_pkts_hist[0]++;
3511 else if((m_head->m_pkthdr.len > 2048) &&
3512 (m_head->m_pkthdr.len <= 4096))
3513 fp->tx_pkts_hist[1]++;
3514 else if((m_head->m_pkthdr.len > 4096) &&
3515 (m_head->m_pkthdr.len <= 8192))
3516 fp->tx_pkts_hist[2]++;
3517 else if((m_head->m_pkthdr.len > 8192) &&
3518 (m_head->m_pkthdr.len <= 12288 ))
3519 fp->tx_pkts_hist[3]++;
3520 else if((m_head->m_pkthdr.len > 11288) &&
3521 (m_head->m_pkthdr.len <= 16394))
3522 fp->tx_pkts_hist[4]++;
3523 else if((m_head->m_pkthdr.len > 16384) &&
3524 (m_head->m_pkthdr.len <= 20480))
3525 fp->tx_pkts_hist[5]++;
3526 else if((m_head->m_pkthdr.len > 20480) &&
3527 (m_head->m_pkthdr.len <= 24576))
3528 fp->tx_pkts_hist[6]++;
3529 else if((m_head->m_pkthdr.len > 24576) &&
3530 (m_head->m_pkthdr.len <= 28672))
3531 fp->tx_pkts_hist[7]++;
3532 else if((m_head->m_pkthdr.len > 28762) &&
3533 (m_head->m_pkthdr.len <= 32768))
3534 fp->tx_pkts_hist[8]++;
3535 else if((m_head->m_pkthdr.len > 32768) &&
3536 (m_head->m_pkthdr.len <= 36864))
3537 fp->tx_pkts_hist[9]++;
3538 else if((m_head->m_pkthdr.len > 36864) &&
3539 (m_head->m_pkthdr.len <= 40960))
3540 fp->tx_pkts_hist[10]++;
3541 else if((m_head->m_pkthdr.len > 40960) &&
3542 (m_head->m_pkthdr.len <= 45056))
3543 fp->tx_pkts_hist[11]++;
3544 else if((m_head->m_pkthdr.len > 45056) &&
3545 (m_head->m_pkthdr.len <= 49152))
3546 fp->tx_pkts_hist[12]++;
3547 else if((m_head->m_pkthdr.len > 49512) &&
3548 m_head->m_pkthdr.len <= 53248))
3549 fp->tx_pkts_hist[13]++;
3550 else if((m_head->m_pkthdr.len > 53248) &&
3551 (m_head->m_pkthdr.len <= 57344))
3552 fp->tx_pkts_hist[14]++;
3553 else if((m_head->m_pkthdr.len > 53248) &&
3554 (m_head->m_pkthdr.len <= 57344))
3555 fp->tx_pkts_hist[15]++;
3556 else if((m_head->m_pkthdr.len > 57344) &&
3557 (m_head->m_pkthdr.len <= 61440))
3558 fp->tx_pkts_hist[16]++;
3560 fp->tx_pkts_hist[17]++;
3563 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3565 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3566 bd_used = TX_RING_SIZE - elem_left;
3570 else if((bd_used > 100) && (bd_used <= 500))
3572 else if((bd_used > 500) && (bd_used <= 1000))
3574 else if((bd_used > 1000) && (bd_used <= 2000))
3576 else if((bd_used > 3000) && (bd_used <= 4000))
3578 else if((bd_used > 4000) && (bd_used <= 5000))
3580 else if((bd_used > 6000) && (bd_used <= 7000))
3582 else if((bd_used > 7000) && (bd_used <= 8000))
3584 else if((bd_used > 8000) && (bd_used <= 9000))
3586 else if((bd_used > 9000) && (bd_used <= 10000))
3588 else if((bd_used > 10000) && (bd_used <= 11000))
3589 fp->tx_pkts_q[10]++;
3590 else if((bd_used > 11000) && (bd_used <= 12000))
3591 fp->tx_pkts_q[11]++;
3592 else if((bd_used > 12000) && (bd_used <= 13000))
3593 fp->tx_pkts_q[12]++;
3594 else if((bd_used > 13000) && (bd_used <= 14000))
3595 fp->tx_pkts_q[13]++;
3596 else if((bd_used > 14000) && (bd_used <= 15000))
3597 fp->tx_pkts_q[14]++;
3598 else if((bd_used > 15000) && (bd_used <= 16000))
3599 fp->tx_pkts_q[15]++;
3601 fp->tx_pkts_q[16]++;
3604 #endif /* end of QLNX_TRACE_PERF_DATA */
3606 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3607 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3609 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3610 " in chain[%d] trying to free packets\n",
3611 nsegs, elem_left, fp->rss_id);
3613 fp->tx_nsegs_gt_elem_left++;
3615 (void)qlnx_tx_int(ha, fp, txq);
3617 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3618 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3621 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3622 nsegs, elem_left, fp->rss_id);
3624 fp->err_tx_nsegs_gt_elem_left++;
3625 fp->tx_ring_full = 1;
3626 if (ha->storm_stats_enable)
3627 ha->storm_stats_gather = 1;
3632 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3634 txq->sw_tx_ring[idx].mp = m_head;
3636 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3638 memset(first_bd, 0, sizeof(*first_bd));
3640 first_bd->data.bd_flags.bitfields =
3641 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3643 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3647 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3648 first_bd->data.bd_flags.bitfields |=
3649 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3652 if (m_head->m_pkthdr.csum_flags &
3653 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3654 first_bd->data.bd_flags.bitfields |=
3655 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3658 if (m_head->m_flags & M_VLANTAG) {
3659 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3660 first_bd->data.bd_flags.bitfields |=
3661 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3664 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3666 first_bd->data.bd_flags.bitfields |=
3667 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3668 first_bd->data.bd_flags.bitfields |=
3669 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3673 if (offset == segs->ds_len) {
3674 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3678 second_bd = (struct eth_tx_2nd_bd *)
3679 ecore_chain_produce(&txq->tx_pbl);
3680 memset(second_bd, 0, sizeof(*second_bd));
3683 if (seg_idx < nsegs) {
3684 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3685 (segs->ds_addr), (segs->ds_len));
3690 third_bd = (struct eth_tx_3rd_bd *)
3691 ecore_chain_produce(&txq->tx_pbl);
3692 memset(third_bd, 0, sizeof(*third_bd));
3693 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3694 third_bd->data.bitfields |=
3695 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3698 if (seg_idx < nsegs) {
3699 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3700 (segs->ds_addr), (segs->ds_len));
3705 for (; seg_idx < nsegs; seg_idx++) {
3706 tx_data_bd = (struct eth_tx_bd *)
3707 ecore_chain_produce(&txq->tx_pbl);
3708 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3709 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3716 } else if (offset < segs->ds_len) {
3717 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3719 second_bd = (struct eth_tx_2nd_bd *)
3720 ecore_chain_produce(&txq->tx_pbl);
3721 memset(second_bd, 0, sizeof(*second_bd));
3722 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3723 (segs->ds_addr + offset),\
3724 (segs->ds_len - offset));
3728 third_bd = (struct eth_tx_3rd_bd *)
3729 ecore_chain_produce(&txq->tx_pbl);
3730 memset(third_bd, 0, sizeof(*third_bd));
3732 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3735 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3736 third_bd->data.bitfields |=
3737 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3741 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3742 tx_data_bd = (struct eth_tx_bd *)
3743 ecore_chain_produce(&txq->tx_pbl);
3744 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3745 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3753 offset = offset - segs->ds_len;
3756 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3761 tx_data_bd = (struct eth_tx_bd *)
3762 ecore_chain_produce(&txq->tx_pbl);
3763 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3765 if (second_bd == NULL) {
3766 second_bd = (struct eth_tx_2nd_bd *)
3768 } else if (third_bd == NULL) {
3769 third_bd = (struct eth_tx_3rd_bd *)
3773 if (offset && (offset < segs->ds_len)) {
3774 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3775 segs->ds_addr, offset);
3777 tx_data_bd = (struct eth_tx_bd *)
3778 ecore_chain_produce(&txq->tx_pbl);
3780 memset(tx_data_bd, 0,
3781 sizeof(*tx_data_bd));
3783 if (second_bd == NULL) {
3785 (struct eth_tx_2nd_bd *)tx_data_bd;
3786 } else if (third_bd == NULL) {
3788 (struct eth_tx_3rd_bd *)tx_data_bd;
3790 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3791 (segs->ds_addr + offset), \
3792 (segs->ds_len - offset));
3797 offset = offset - segs->ds_len;
3798 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3799 segs->ds_addr, segs->ds_len);
3805 if (third_bd == NULL) {
3806 third_bd = (struct eth_tx_3rd_bd *)
3807 ecore_chain_produce(&txq->tx_pbl);
3808 memset(third_bd, 0, sizeof(*third_bd));
3811 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3812 third_bd->data.bitfields |=
3813 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3818 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3819 tx_data_bd = (struct eth_tx_bd *)
3820 ecore_chain_produce(&txq->tx_pbl);
3821 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3822 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3827 first_bd->data.bitfields =
3828 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3829 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3830 first_bd->data.bitfields =
3831 htole16(first_bd->data.bitfields);
3832 fp->tx_non_tso_pkts++;
3836 first_bd->data.nbds = nbd;
3838 if (ha->dbg_trace_tso_pkt_len) {
3839 if (fp->tx_tso_max_nsegs < nsegs)
3840 fp->tx_tso_max_nsegs = nsegs;
3842 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3843 fp->tx_tso_min_nsegs = nsegs;
3846 txq->sw_tx_ring[idx].nsegs = nsegs;
3847 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3849 txq->tx_db.data.bd_prod =
3850 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3852 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3854 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3859 qlnx_stop(qlnx_host_t *ha)
3861 struct ifnet *ifp = ha->ifp;
3867 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3870 * We simply lock and unlock each fp->tx_mtx to
3871 * propagate the if_drv_flags
3872 * state to each tx thread
3874 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3876 if (ha->state == QLNX_STATE_OPEN) {
3877 for (i = 0; i < ha->num_rss; i++) {
3878 struct qlnx_fastpath *fp = &ha->fp_array[i];
3880 mtx_lock(&fp->tx_mtx);
3881 mtx_unlock(&fp->tx_mtx);
3883 if (fp->fp_taskqueue != NULL)
3884 taskqueue_enqueue(fp->fp_taskqueue,
3888 #ifdef QLNX_ENABLE_IWARP
3889 if (qlnx_vf_device(ha) != 0) {
3890 qlnx_rdma_dev_close(ha);
3892 #endif /* #ifdef QLNX_ENABLE_IWARP */
3900 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3902 return(TX_RING_SIZE - 1);
3906 qlnx_get_mac_addr(qlnx_host_t *ha)
3908 struct ecore_hwfn *p_hwfn;
3909 unsigned char mac[ETHER_ADDR_LEN];
3910 uint8_t p_is_forced;
3912 p_hwfn = &ha->cdev.hwfns[0];
3914 if (qlnx_vf_device(ha) != 0)
3915 return (p_hwfn->hw_info.hw_mac_addr);
3917 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3918 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3920 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3921 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3922 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3923 memcpy(ha->primary_mac, mac, ETH_ALEN);
3926 return (ha->primary_mac);
3930 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3932 uint32_t ifm_type = 0;
3934 switch (if_link->media_type) {
3936 case MEDIA_MODULE_FIBER:
3937 case MEDIA_UNSPECIFIED:
3938 if (if_link->speed == (100 * 1000))
3939 ifm_type = QLNX_IFM_100G_SR4;
3940 else if (if_link->speed == (40 * 1000))
3941 ifm_type = IFM_40G_SR4;
3942 else if (if_link->speed == (25 * 1000))
3943 ifm_type = QLNX_IFM_25G_SR;
3944 else if (if_link->speed == (10 * 1000))
3945 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3946 else if (if_link->speed == (1 * 1000))
3947 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3951 case MEDIA_DA_TWINAX:
3952 if (if_link->speed == (100 * 1000))
3953 ifm_type = QLNX_IFM_100G_CR4;
3954 else if (if_link->speed == (40 * 1000))
3955 ifm_type = IFM_40G_CR4;
3956 else if (if_link->speed == (25 * 1000))
3957 ifm_type = QLNX_IFM_25G_CR;
3958 else if (if_link->speed == (10 * 1000))
3959 ifm_type = IFM_10G_TWINAX;
3964 ifm_type = IFM_UNKNOWN;
3972 /*****************************************************************************
3973 * Interrupt Service Functions
3974 *****************************************************************************/
3977 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3978 struct mbuf *mp_head, uint16_t len)
3980 struct mbuf *mp, *mpf, *mpl;
3981 struct sw_rx_data *sw_rx_data;
3982 struct qlnx_rx_queue *rxq;
3983 uint16_t len_in_buffer;
3986 mpf = mpl = mp = NULL;
3990 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3992 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3993 mp = sw_rx_data->data;
3996 QL_DPRINT1(ha, "mp = NULL\n");
3997 fp->err_rx_mp_null++;
3999 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4006 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4007 BUS_DMASYNC_POSTREAD);
4009 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4011 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4012 " incoming packet and reusing its buffer\n");
4014 qlnx_reuse_rx_data(rxq);
4015 fp->err_rx_alloc_errors++;
4022 ecore_chain_consume(&rxq->rx_bd_ring);
4024 if (len > rxq->rx_buf_size)
4025 len_in_buffer = rxq->rx_buf_size;
4027 len_in_buffer = len;
4029 len = len - len_in_buffer;
4031 mp->m_flags &= ~M_PKTHDR;
4033 mp->m_len = len_in_buffer;
4044 mp_head->m_next = mpf;
4050 qlnx_tpa_start(qlnx_host_t *ha,
4051 struct qlnx_fastpath *fp,
4052 struct qlnx_rx_queue *rxq,
4053 struct eth_fast_path_rx_tpa_start_cqe *cqe)
4056 struct ifnet *ifp = ha->ifp;
4058 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4059 struct sw_rx_data *sw_rx_data;
4062 struct eth_rx_bd *rx_bd;
4065 #if __FreeBSD_version >= 1100000
4067 #endif /* #if __FreeBSD_version >= 1100000 */
4070 agg_index = cqe->tpa_agg_index;
4072 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
4074 \t bitfields = 0x%x\n \
4075 \t seg_len = 0x%x\n \
4076 \t pars_flags = 0x%x\n \
4077 \t vlan_tag = 0x%x\n \
4078 \t rss_hash = 0x%x\n \
4079 \t len_on_first_bd = 0x%x\n \
4080 \t placement_offset = 0x%x\n \
4081 \t tpa_agg_index = 0x%x\n \
4082 \t header_len = 0x%x\n \
4083 \t ext_bd_len_list[0] = 0x%x\n \
4084 \t ext_bd_len_list[1] = 0x%x\n \
4085 \t ext_bd_len_list[2] = 0x%x\n \
4086 \t ext_bd_len_list[3] = 0x%x\n \
4087 \t ext_bd_len_list[4] = 0x%x\n",
4088 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4089 cqe->pars_flags.flags, cqe->vlan_tag,
4090 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
4091 cqe->tpa_agg_index, cqe->header_len,
4092 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4093 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4094 cqe->ext_bd_len_list[4]);
4096 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4097 fp->err_rx_tpa_invalid_agg_num++;
4101 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4102 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4103 mp = sw_rx_data->data;
4105 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4108 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4109 fp->err_rx_mp_null++;
4110 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4115 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4117 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4118 " flags = %x, dropping incoming packet\n", fp->rss_id,
4119 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4121 fp->err_rx_hw_errors++;
4123 qlnx_reuse_rx_data(rxq);
4125 QLNX_INC_IERRORS(ifp);
4130 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4132 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4133 " dropping incoming packet and reusing its buffer\n",
4136 fp->err_rx_alloc_errors++;
4137 QLNX_INC_IQDROPS(ifp);
4140 * Load the tpa mbuf into the rx ring and save the
4144 map = sw_rx_data->map;
4145 addr = sw_rx_data->dma_addr;
4147 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4149 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4150 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4151 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4153 rxq->tpa_info[agg_index].rx_buf.data = mp;
4154 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4155 rxq->tpa_info[agg_index].rx_buf.map = map;
4157 rx_bd = (struct eth_rx_bd *)
4158 ecore_chain_produce(&rxq->rx_bd_ring);
4160 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4161 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4163 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4164 BUS_DMASYNC_PREREAD);
4166 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4167 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4169 ecore_chain_consume(&rxq->rx_bd_ring);
4171 /* Now reuse any buffers posted in ext_bd_len_list */
4172 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4174 if (cqe->ext_bd_len_list[i] == 0)
4177 qlnx_reuse_rx_data(rxq);
4180 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4184 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4186 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4187 " dropping incoming packet and reusing its buffer\n",
4190 QLNX_INC_IQDROPS(ifp);
4192 /* if we already have mbuf head in aggregation free it */
4193 if (rxq->tpa_info[agg_index].mpf) {
4194 m_freem(rxq->tpa_info[agg_index].mpf);
4195 rxq->tpa_info[agg_index].mpl = NULL;
4197 rxq->tpa_info[agg_index].mpf = mp;
4198 rxq->tpa_info[agg_index].mpl = NULL;
4200 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4201 ecore_chain_consume(&rxq->rx_bd_ring);
4203 /* Now reuse any buffers posted in ext_bd_len_list */
4204 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4206 if (cqe->ext_bd_len_list[i] == 0)
4209 qlnx_reuse_rx_data(rxq);
4211 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4217 * first process the ext_bd_len_list
4218 * if this fails then we simply drop the packet
4220 ecore_chain_consume(&rxq->rx_bd_ring);
4221 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4223 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4225 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4227 if (cqe->ext_bd_len_list[i] == 0)
4230 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4231 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4232 BUS_DMASYNC_POSTREAD);
4234 mpc = sw_rx_data->data;
4237 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4238 fp->err_rx_mp_null++;
4242 rxq->tpa_info[agg_index].agg_state =
4243 QLNX_AGG_STATE_ERROR;
4244 ecore_chain_consume(&rxq->rx_bd_ring);
4246 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4250 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4251 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4252 " dropping incoming packet and reusing its"
4253 " buffer\n", fp->rss_id);
4255 qlnx_reuse_rx_data(rxq);
4261 rxq->tpa_info[agg_index].agg_state =
4262 QLNX_AGG_STATE_ERROR;
4264 ecore_chain_consume(&rxq->rx_bd_ring);
4266 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4271 mpc->m_flags &= ~M_PKTHDR;
4273 mpc->m_len = cqe->ext_bd_len_list[i];
4279 mpl->m_len = ha->rx_buf_size;
4284 ecore_chain_consume(&rxq->rx_bd_ring);
4286 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4289 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4291 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4292 " incoming packet and reusing its buffer\n",
4295 QLNX_INC_IQDROPS(ifp);
4297 rxq->tpa_info[agg_index].mpf = mp;
4298 rxq->tpa_info[agg_index].mpl = NULL;
4303 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4306 mp->m_len = ha->rx_buf_size;
4308 rxq->tpa_info[agg_index].mpf = mp;
4309 rxq->tpa_info[agg_index].mpl = mpl;
4311 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4312 rxq->tpa_info[agg_index].mpf = mp;
4313 rxq->tpa_info[agg_index].mpl = mp;
4317 mp->m_flags |= M_PKTHDR;
4319 /* assign packet to this interface interface */
4320 mp->m_pkthdr.rcvif = ifp;
4322 /* assume no hardware checksum has complated */
4323 mp->m_pkthdr.csum_flags = 0;
4325 //mp->m_pkthdr.flowid = fp->rss_id;
4326 mp->m_pkthdr.flowid = cqe->rss_hash;
4328 #if __FreeBSD_version >= 1100000
4330 hash_type = cqe->bitfields &
4331 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4332 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4334 switch (hash_type) {
4336 case RSS_HASH_TYPE_IPV4:
4337 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4340 case RSS_HASH_TYPE_TCP_IPV4:
4341 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4344 case RSS_HASH_TYPE_IPV6:
4345 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4348 case RSS_HASH_TYPE_TCP_IPV6:
4349 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4353 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4358 mp->m_flags |= M_FLOWID;
4361 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4362 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4364 mp->m_pkthdr.csum_data = 0xFFFF;
4366 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4367 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4368 mp->m_flags |= M_VLANTAG;
4371 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4373 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4374 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4375 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4381 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4382 struct qlnx_rx_queue *rxq,
4383 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4385 struct sw_rx_data *sw_rx_data;
4387 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4394 QL_DPRINT7(ha, "[%d]: enter\n \
4396 \t tpa_agg_index = 0x%x\n \
4397 \t len_list[0] = 0x%x\n \
4398 \t len_list[1] = 0x%x\n \
4399 \t len_list[2] = 0x%x\n \
4400 \t len_list[3] = 0x%x\n \
4401 \t len_list[4] = 0x%x\n \
4402 \t len_list[5] = 0x%x\n",
4403 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4404 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4405 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4407 agg_index = cqe->tpa_agg_index;
4409 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4410 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4411 fp->err_rx_tpa_invalid_agg_num++;
4416 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4418 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4420 if (cqe->len_list[i] == 0)
4423 if (rxq->tpa_info[agg_index].agg_state !=
4424 QLNX_AGG_STATE_START) {
4425 qlnx_reuse_rx_data(rxq);
4429 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4430 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4431 BUS_DMASYNC_POSTREAD);
4433 mpc = sw_rx_data->data;
4437 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4439 fp->err_rx_mp_null++;
4443 rxq->tpa_info[agg_index].agg_state =
4444 QLNX_AGG_STATE_ERROR;
4445 ecore_chain_consume(&rxq->rx_bd_ring);
4447 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4451 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4453 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4454 " dropping incoming packet and reusing its"
4455 " buffer\n", fp->rss_id);
4457 qlnx_reuse_rx_data(rxq);
4463 rxq->tpa_info[agg_index].agg_state =
4464 QLNX_AGG_STATE_ERROR;
4466 ecore_chain_consume(&rxq->rx_bd_ring);
4468 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4473 mpc->m_flags &= ~M_PKTHDR;
4475 mpc->m_len = cqe->len_list[i];
4481 mpl->m_len = ha->rx_buf_size;
4486 ecore_chain_consume(&rxq->rx_bd_ring);
4488 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4491 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4492 fp->rss_id, mpf, mpl);
4495 mp = rxq->tpa_info[agg_index].mpl;
4496 mp->m_len = ha->rx_buf_size;
4498 rxq->tpa_info[agg_index].mpl = mpl;
4505 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4506 struct qlnx_rx_queue *rxq,
4507 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4509 struct sw_rx_data *sw_rx_data;
4511 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4515 struct ifnet *ifp = ha->ifp;
4520 QL_DPRINT7(ha, "[%d]: enter\n \
4522 \t tpa_agg_index = 0x%x\n \
4523 \t total_packet_len = 0x%x\n \
4524 \t num_of_bds = 0x%x\n \
4525 \t end_reason = 0x%x\n \
4526 \t num_of_coalesced_segs = 0x%x\n \
4527 \t ts_delta = 0x%x\n \
4528 \t len_list[0] = 0x%x\n \
4529 \t len_list[1] = 0x%x\n \
4530 \t len_list[2] = 0x%x\n \
4531 \t len_list[3] = 0x%x\n",
4532 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4533 cqe->total_packet_len, cqe->num_of_bds,
4534 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4535 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4538 agg_index = cqe->tpa_agg_index;
4540 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4542 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4544 fp->err_rx_tpa_invalid_agg_num++;
4549 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4551 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4553 if (cqe->len_list[i] == 0)
4556 if (rxq->tpa_info[agg_index].agg_state !=
4557 QLNX_AGG_STATE_START) {
4559 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4561 qlnx_reuse_rx_data(rxq);
4565 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4566 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4567 BUS_DMASYNC_POSTREAD);
4569 mpc = sw_rx_data->data;
4573 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4575 fp->err_rx_mp_null++;
4579 rxq->tpa_info[agg_index].agg_state =
4580 QLNX_AGG_STATE_ERROR;
4581 ecore_chain_consume(&rxq->rx_bd_ring);
4583 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4587 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4588 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4589 " dropping incoming packet and reusing its"
4590 " buffer\n", fp->rss_id);
4592 qlnx_reuse_rx_data(rxq);
4598 rxq->tpa_info[agg_index].agg_state =
4599 QLNX_AGG_STATE_ERROR;
4601 ecore_chain_consume(&rxq->rx_bd_ring);
4603 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4608 mpc->m_flags &= ~M_PKTHDR;
4610 mpc->m_len = cqe->len_list[i];
4616 mpl->m_len = ha->rx_buf_size;
4621 ecore_chain_consume(&rxq->rx_bd_ring);
4623 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4626 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4630 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4632 mp = rxq->tpa_info[agg_index].mpl;
4633 mp->m_len = ha->rx_buf_size;
4637 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4639 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4641 if (rxq->tpa_info[agg_index].mpf != NULL)
4642 m_freem(rxq->tpa_info[agg_index].mpf);
4643 rxq->tpa_info[agg_index].mpf = NULL;
4644 rxq->tpa_info[agg_index].mpl = NULL;
4645 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4649 mp = rxq->tpa_info[agg_index].mpf;
4650 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4651 mp->m_pkthdr.len = cqe->total_packet_len;
4653 if (mp->m_next == NULL)
4654 mp->m_len = mp->m_pkthdr.len;
4656 /* compute the total packet length */
4658 while (mpf != NULL) {
4663 if (cqe->total_packet_len > len) {
4664 mpl = rxq->tpa_info[agg_index].mpl;
4665 mpl->m_len += (cqe->total_packet_len - len);
4669 QLNX_INC_IPACKETS(ifp);
4670 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4672 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4673 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4674 fp->rss_id, mp->m_pkthdr.csum_data,
4675 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4677 (*ifp->if_input)(ifp, mp);
4679 rxq->tpa_info[agg_index].mpf = NULL;
4680 rxq->tpa_info[agg_index].mpl = NULL;
4681 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4683 return (cqe->num_of_coalesced_segs);
4687 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4690 uint16_t hw_comp_cons, sw_comp_cons;
4692 struct qlnx_rx_queue *rxq = fp->rxq;
4693 struct ifnet *ifp = ha->ifp;
4694 struct ecore_dev *cdev = &ha->cdev;
4695 struct ecore_hwfn *p_hwfn;
4697 #ifdef QLNX_SOFT_LRO
4698 struct lro_ctrl *lro;
4701 #endif /* #ifdef QLNX_SOFT_LRO */
4703 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4704 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4706 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4708 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4709 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4710 * read before it is written by FW, then FW writes CQE and SB, and then
4711 * the CPU reads the hw_comp_cons, it will use an old CQE.
4714 /* Loop to complete all indicated BDs */
4715 while (sw_comp_cons != hw_comp_cons) {
4716 union eth_rx_cqe *cqe;
4717 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4718 struct sw_rx_data *sw_rx_data;
4719 register struct mbuf *mp;
4720 enum eth_rx_cqe_type cqe_type;
4721 uint16_t len, pad, len_on_first_bd;
4723 #if __FreeBSD_version >= 1100000
4725 #endif /* #if __FreeBSD_version >= 1100000 */
4727 /* Get the CQE from the completion ring */
4728 cqe = (union eth_rx_cqe *)
4729 ecore_chain_consume(&rxq->rx_comp_ring);
4730 cqe_type = cqe->fast_path_regular.type;
4732 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4733 QL_DPRINT3(ha, "Got a slowath CQE\n");
4735 ecore_eth_cqe_completion(p_hwfn,
4736 (struct eth_slow_path_rx_cqe *)cqe);
4740 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4744 case ETH_RX_CQE_TYPE_TPA_START:
4745 qlnx_tpa_start(ha, fp, rxq,
4746 &cqe->fast_path_tpa_start);
4750 case ETH_RX_CQE_TYPE_TPA_CONT:
4751 qlnx_tpa_cont(ha, fp, rxq,
4752 &cqe->fast_path_tpa_cont);
4756 case ETH_RX_CQE_TYPE_TPA_END:
4757 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4758 &cqe->fast_path_tpa_end);
4769 /* Get the data from the SW ring */
4770 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4771 mp = sw_rx_data->data;
4774 QL_DPRINT1(ha, "mp = NULL\n");
4775 fp->err_rx_mp_null++;
4777 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4780 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4781 BUS_DMASYNC_POSTREAD);
4784 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4785 len = le16toh(fp_cqe->pkt_len);
4786 pad = fp_cqe->placement_offset;
4788 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4789 " len %u, parsing flags = %d pad = %d\n",
4790 cqe_type, fp_cqe->bitfields,
4791 le16toh(fp_cqe->vlan_tag),
4792 len, le16toh(fp_cqe->pars_flags.flags), pad);
4794 data = mtod(mp, uint8_t *);
4798 qlnx_dump_buf8(ha, __func__, data, len);
4800 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4801 * is always with a fixed size. If allocation fails, we take the
4802 * consumed BD and return it to the ring in the PROD position.
4803 * The packet that was received on that BD will be dropped (and
4804 * not passed to the upper stack).
4806 /* If this is an error packet then drop it */
4807 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4810 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4811 " dropping incoming packet\n", sw_comp_cons,
4812 le16toh(cqe->fast_path_regular.pars_flags.flags));
4813 fp->err_rx_hw_errors++;
4815 qlnx_reuse_rx_data(rxq);
4817 QLNX_INC_IERRORS(ifp);
4822 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4824 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4825 " incoming packet and reusing its buffer\n");
4826 qlnx_reuse_rx_data(rxq);
4828 fp->err_rx_alloc_errors++;
4830 QLNX_INC_IQDROPS(ifp);
4835 ecore_chain_consume(&rxq->rx_bd_ring);
4837 len_on_first_bd = fp_cqe->len_on_first_bd;
4839 mp->m_pkthdr.len = len;
4841 if ((len > 60 ) && (len > len_on_first_bd)) {
4843 mp->m_len = len_on_first_bd;
4845 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4846 (len - len_on_first_bd)) != 0) {
4850 QLNX_INC_IQDROPS(ifp);
4855 } else if (len_on_first_bd < len) {
4856 fp->err_rx_jumbo_chain_pkts++;
4861 mp->m_flags |= M_PKTHDR;
4863 /* assign packet to this interface interface */
4864 mp->m_pkthdr.rcvif = ifp;
4866 /* assume no hardware checksum has complated */
4867 mp->m_pkthdr.csum_flags = 0;
4869 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4871 #if __FreeBSD_version >= 1100000
4873 hash_type = fp_cqe->bitfields &
4874 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4875 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4877 switch (hash_type) {
4879 case RSS_HASH_TYPE_IPV4:
4880 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4883 case RSS_HASH_TYPE_TCP_IPV4:
4884 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4887 case RSS_HASH_TYPE_IPV6:
4888 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4891 case RSS_HASH_TYPE_TCP_IPV6:
4892 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4896 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4901 mp->m_flags |= M_FLOWID;
4904 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4905 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4908 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4909 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4912 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4913 mp->m_pkthdr.csum_data = 0xFFFF;
4914 mp->m_pkthdr.csum_flags |=
4915 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4918 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4919 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4920 mp->m_flags |= M_VLANTAG;
4923 QLNX_INC_IPACKETS(ifp);
4924 QLNX_INC_IBYTES(ifp, len);
4926 #ifdef QLNX_SOFT_LRO
4930 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4932 tcp_lro_queue_mbuf(lro, mp);
4936 if (tcp_lro_rx(lro, mp, 0))
4937 (*ifp->if_input)(ifp, mp);
4939 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4942 (*ifp->if_input)(ifp, mp);
4946 (*ifp->if_input)(ifp, mp);
4948 #endif /* #ifdef QLNX_SOFT_LRO */
4952 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4954 next_cqe: /* don't consume bd rx buffer */
4955 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4956 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4958 /* CR TPA - revisit how to handle budget in TPA perhaps
4959 increase on "end" */
4960 if (rx_pkt == budget)
4962 } /* repeat while sw_comp_cons != hw_comp_cons... */
4964 /* Update producers */
4965 qlnx_update_rx_prod(p_hwfn, rxq);
4972 * fast path interrupt
4976 qlnx_fp_isr(void *arg)
4978 qlnx_ivec_t *ivec = arg;
4980 struct qlnx_fastpath *fp = NULL;
4985 if (ha->state != QLNX_STATE_OPEN) {
4989 idx = ivec->rss_idx;
4991 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4992 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4993 ha->err_illegal_intr++;
4996 fp = &ha->fp_array[idx];
5001 int rx_int = 0, total_rx_count = 0;
5003 struct qlnx_tx_queue *txq;
5006 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
5008 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
5011 for (tc = 0; tc < ha->num_tc; tc++) {
5015 if((int)(elem_left =
5016 ecore_chain_get_elem_left(&txq->tx_pbl)) <
5017 QLNX_TX_ELEM_THRESH) {
5019 if (mtx_trylock(&fp->tx_mtx)) {
5020 #ifdef QLNX_TRACE_PERF_DATA
5021 tx_compl = fp->tx_pkts_completed;
5024 qlnx_tx_int(ha, fp, fp->txq[tc]);
5025 #ifdef QLNX_TRACE_PERF_DATA
5026 fp->tx_pkts_compl_intr +=
5027 (fp->tx_pkts_completed - tx_compl);
5028 if ((fp->tx_pkts_completed - tx_compl) <= 32)
5030 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
5031 ((fp->tx_pkts_completed - tx_compl) <= 64))
5033 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
5034 ((fp->tx_pkts_completed - tx_compl) <= 128))
5036 else if(((fp->tx_pkts_completed - tx_compl) > 128))
5039 mtx_unlock(&fp->tx_mtx);
5044 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
5048 fp->rx_pkts += rx_int;
5049 total_rx_count += rx_int;
5054 #ifdef QLNX_SOFT_LRO
5056 struct lro_ctrl *lro;
5058 lro = &fp->rxq->lro;
5060 if (lro_enable && total_rx_count) {
5062 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5064 #ifdef QLNX_TRACE_LRO_CNT
5065 if (lro->lro_mbuf_count & ~1023)
5067 else if (lro->lro_mbuf_count & ~511)
5069 else if (lro->lro_mbuf_count & ~255)
5071 else if (lro->lro_mbuf_count & ~127)
5073 else if (lro->lro_mbuf_count & ~63)
5075 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
5077 tcp_lro_flush_all(lro);
5080 struct lro_entry *queued;
5082 while ((!SLIST_EMPTY(&lro->lro_active))) {
5083 queued = SLIST_FIRST(&lro->lro_active);
5084 SLIST_REMOVE_HEAD(&lro->lro_active, \
5086 tcp_lro_flush(lro, queued);
5088 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5091 #endif /* #ifdef QLNX_SOFT_LRO */
5093 ecore_sb_update_sb_idx(fp->sb_info);
5095 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
5103 * slow path interrupt processing function
5104 * can be invoked in polled mode or in interrupt mode via taskqueue.
5107 qlnx_sp_isr(void *arg)
5109 struct ecore_hwfn *p_hwfn;
5114 ha = (qlnx_host_t *)p_hwfn->p_dev;
5116 ha->sp_interrupts++;
5118 QL_DPRINT2(ha, "enter\n");
5120 ecore_int_sp_dpc(p_hwfn);
5122 QL_DPRINT2(ha, "exit\n");
5127 /*****************************************************************************
5128 * Support Functions for DMA'able Memory
5129 *****************************************************************************/
5132 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5134 *((bus_addr_t *)arg) = 0;
5137 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5141 *((bus_addr_t *)arg) = segs[0].ds_addr;
5147 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5155 ret = bus_dma_tag_create(
5156 ha->parent_tag,/* parent */
5158 ((bus_size_t)(1ULL << 32)),/* boundary */
5159 BUS_SPACE_MAXADDR, /* lowaddr */
5160 BUS_SPACE_MAXADDR, /* highaddr */
5161 NULL, NULL, /* filter, filterarg */
5162 dma_buf->size, /* maxsize */
5164 dma_buf->size, /* maxsegsize */
5166 NULL, NULL, /* lockfunc, lockarg */
5170 QL_DPRINT1(ha, "could not create dma tag\n");
5171 goto qlnx_alloc_dmabuf_exit;
5173 ret = bus_dmamem_alloc(dma_buf->dma_tag,
5174 (void **)&dma_buf->dma_b,
5175 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5178 bus_dma_tag_destroy(dma_buf->dma_tag);
5179 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5180 goto qlnx_alloc_dmabuf_exit;
5183 ret = bus_dmamap_load(dma_buf->dma_tag,
5187 qlnx_dmamap_callback,
5188 &b_addr, BUS_DMA_NOWAIT);
5190 if (ret || !b_addr) {
5191 bus_dma_tag_destroy(dma_buf->dma_tag);
5192 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5195 goto qlnx_alloc_dmabuf_exit;
5198 dma_buf->dma_addr = b_addr;
5200 qlnx_alloc_dmabuf_exit:
5206 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5208 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5209 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5210 bus_dma_tag_destroy(dma_buf->dma_tag);
5215 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5222 ha = (qlnx_host_t *)ecore_dev;
5225 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5227 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5229 dma_buf.size = size + PAGE_SIZE;
5230 dma_buf.alignment = 8;
5232 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5234 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5236 *phys = dma_buf.dma_addr;
5238 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5240 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5242 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5243 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5244 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5246 return (dma_buf.dma_b);
5250 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5253 qlnx_dma_t dma_buf, *dma_p;
5257 ha = (qlnx_host_t *)ecore_dev;
5263 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5265 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5267 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5268 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5269 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5273 if (!ha->qlnxr_debug)
5274 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5279 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5287 * Allocate parent DMA Tag
5289 ret = bus_dma_tag_create(
5290 bus_get_dma_tag(dev), /* parent */
5291 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5292 BUS_SPACE_MAXADDR, /* lowaddr */
5293 BUS_SPACE_MAXADDR, /* highaddr */
5294 NULL, NULL, /* filter, filterarg */
5295 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5297 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5299 NULL, NULL, /* lockfunc, lockarg */
5303 QL_DPRINT1(ha, "could not create parent dma tag\n");
5307 ha->flags.parent_tag = 1;
5313 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5315 if (ha->parent_tag != NULL) {
5316 bus_dma_tag_destroy(ha->parent_tag);
5317 ha->parent_tag = NULL;
5323 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5325 if (bus_dma_tag_create(NULL, /* parent */
5326 1, 0, /* alignment, bounds */
5327 BUS_SPACE_MAXADDR, /* lowaddr */
5328 BUS_SPACE_MAXADDR, /* highaddr */
5329 NULL, NULL, /* filter, filterarg */
5330 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5331 QLNX_MAX_SEGMENTS, /* nsegments */
5332 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5334 NULL, /* lockfunc */
5335 NULL, /* lockfuncarg */
5338 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5346 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5348 if (ha->tx_tag != NULL) {
5349 bus_dma_tag_destroy(ha->tx_tag);
5356 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5358 if (bus_dma_tag_create(NULL, /* parent */
5359 1, 0, /* alignment, bounds */
5360 BUS_SPACE_MAXADDR, /* lowaddr */
5361 BUS_SPACE_MAXADDR, /* highaddr */
5362 NULL, NULL, /* filter, filterarg */
5363 MJUM9BYTES, /* maxsize */
5365 MJUM9BYTES, /* maxsegsize */
5367 NULL, /* lockfunc */
5368 NULL, /* lockfuncarg */
5371 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5379 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5381 if (ha->rx_tag != NULL) {
5382 bus_dma_tag_destroy(ha->rx_tag);
5388 /*********************************
5389 * Exported functions
5390 *********************************/
5392 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5396 bar_id = bar_id * 2;
5398 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5406 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5408 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5414 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5415 uint16_t *reg_value)
5417 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5423 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5424 uint32_t *reg_value)
5426 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5432 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5434 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5435 pci_reg, reg_value, 1);
5440 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5443 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5444 pci_reg, reg_value, 2);
5449 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5452 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5453 pci_reg, reg_value, 4);
5458 qlnx_pci_find_capability(void *ecore_dev, int cap)
5465 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5468 QL_DPRINT1(ha, "failed\n");
5474 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5481 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5484 QL_DPRINT1(ha, "failed\n");
5490 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5493 struct ecore_hwfn *p_hwfn;
5497 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5498 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5504 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5506 struct ecore_hwfn *p_hwfn = hwfn;
5508 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5509 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5515 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5517 struct ecore_hwfn *p_hwfn = hwfn;
5519 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5520 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5525 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5527 struct ecore_dev *cdev;
5528 struct ecore_hwfn *p_hwfn;
5533 cdev = p_hwfn->p_dev;
5535 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5536 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5542 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5544 struct ecore_hwfn *p_hwfn = hwfn;
5546 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5547 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5553 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5557 struct ecore_dev *cdev;
5559 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5560 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5562 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5568 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5571 struct ecore_dev *cdev;
5573 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5574 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5576 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5582 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5585 struct ecore_dev *cdev;
5587 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5588 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5590 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5595 qlnx_zalloc(uint32_t size)
5599 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5601 return ((void *)va);
5605 qlnx_barrier(void *p_hwfn)
5609 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5610 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5614 qlnx_link_update(void *p_hwfn)
5617 int prev_link_state;
5619 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5621 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5623 prev_link_state = ha->link_up;
5624 ha->link_up = ha->if_link.link_up;
5626 if (prev_link_state != ha->link_up) {
5628 if_link_state_change(ha->ifp, LINK_STATE_UP);
5630 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5634 #ifdef CONFIG_ECORE_SRIOV
5636 if (qlnx_vf_device(ha) != 0) {
5637 if (ha->sriov_initialized)
5638 qlnx_inform_vf_link_state(p_hwfn, ha);
5641 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5642 #endif /* #ifdef QLNX_VF */
5648 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5649 struct ecore_vf_acquire_sw_info *p_sw_info)
5651 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5652 (QLNX_VERSION_MINOR << 16) |
5654 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5660 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5663 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5669 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5670 struct qlnx_link_output *if_link)
5672 struct ecore_mcp_link_params link_params;
5673 struct ecore_mcp_link_state link_state;
5675 struct ecore_ptt *p_ptt = NULL;
5678 memset(if_link, 0, sizeof(*if_link));
5679 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5680 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5682 ha = (qlnx_host_t *)hwfn->p_dev;
5684 /* Prepare source inputs */
5685 /* we only deal with physical functions */
5686 if (qlnx_vf_device(ha) != 0) {
5688 p_ptt = ecore_ptt_acquire(hwfn);
5690 if (p_ptt == NULL) {
5691 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5695 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5696 ecore_ptt_release(hwfn, p_ptt);
5698 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5699 sizeof(link_params));
5700 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5701 sizeof(link_state));
5703 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5704 ecore_vf_read_bulletin(hwfn, &p_change);
5705 ecore_vf_get_link_params(hwfn, &link_params);
5706 ecore_vf_get_link_state(hwfn, &link_state);
5709 /* Set the link parameters to pass to protocol driver */
5710 if (link_state.link_up) {
5711 if_link->link_up = true;
5712 if_link->speed = link_state.speed;
5715 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5717 if (link_params.speed.autoneg)
5718 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5720 if (link_params.pause.autoneg ||
5721 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5722 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5724 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5725 link_params.pause.forced_tx)
5726 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5728 if (link_params.speed.advertised_speeds &
5729 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5730 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5731 QLNX_LINK_CAP_1000baseT_Full;
5733 if (link_params.speed.advertised_speeds &
5734 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5735 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5737 if (link_params.speed.advertised_speeds &
5738 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5739 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5741 if (link_params.speed.advertised_speeds &
5742 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5743 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5745 if (link_params.speed.advertised_speeds &
5746 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5747 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5749 if (link_params.speed.advertised_speeds &
5750 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5751 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5753 if_link->advertised_caps = if_link->supported_caps;
5755 if_link->autoneg = link_params.speed.autoneg;
5756 if_link->duplex = QLNX_LINK_DUPLEX;
5758 /* Link partner capabilities */
5760 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5761 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5763 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5764 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5766 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5767 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5769 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5770 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5772 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5773 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5775 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5776 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5778 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5779 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5781 if (link_state.an_complete)
5782 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5784 if (link_state.partner_adv_pause)
5785 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5787 if ((link_state.partner_adv_pause ==
5788 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5789 (link_state.partner_adv_pause ==
5790 ECORE_LINK_PARTNER_BOTH_PAUSE))
5791 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5797 qlnx_schedule_recovery(void *p_hwfn)
5801 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5803 if (qlnx_vf_device(ha) != 0) {
5804 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5811 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5815 for (i = 0; i < cdev->num_hwfns; i++) {
5816 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5817 p_hwfn->pf_params = *func_params;
5819 #ifdef QLNX_ENABLE_IWARP
5820 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5821 p_hwfn->using_ll2 = true;
5823 #endif /* #ifdef QLNX_ENABLE_IWARP */
5827 rc = ecore_resc_alloc(cdev);
5829 goto qlnx_nic_setup_exit;
5831 ecore_resc_setup(cdev);
5833 qlnx_nic_setup_exit:
5839 qlnx_nic_start(struct ecore_dev *cdev)
5842 struct ecore_hw_init_params params;
5844 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5846 params.p_tunn = NULL;
5847 params.b_hw_start = true;
5848 params.int_mode = cdev->int_mode;
5849 params.allow_npar_tx_switch = true;
5850 params.bin_fw_data = NULL;
5852 rc = ecore_hw_init(cdev, ¶ms);
5854 ecore_resc_free(cdev);
5862 qlnx_slowpath_start(qlnx_host_t *ha)
5864 struct ecore_dev *cdev;
5865 struct ecore_pf_params pf_params;
5868 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5869 pf_params.eth_pf_params.num_cons =
5870 (ha->num_rss) * (ha->num_tc + 1);
5872 #ifdef QLNX_ENABLE_IWARP
5873 if (qlnx_vf_device(ha) != 0) {
5874 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5875 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5876 pf_params.rdma_pf_params.num_qps = 1024;
5877 pf_params.rdma_pf_params.num_srqs = 1024;
5878 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5879 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5880 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5881 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5882 pf_params.rdma_pf_params.num_qps = 8192;
5883 pf_params.rdma_pf_params.num_srqs = 8192;
5884 //pf_params.rdma_pf_params.min_dpis = 0;
5885 pf_params.rdma_pf_params.min_dpis = 8;
5886 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5887 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5888 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5891 #endif /* #ifdef QLNX_ENABLE_IWARP */
5895 rc = qlnx_nic_setup(cdev, &pf_params);
5897 goto qlnx_slowpath_start_exit;
5899 cdev->int_mode = ECORE_INT_MODE_MSIX;
5900 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5902 #ifdef QLNX_MAX_COALESCE
5903 cdev->rx_coalesce_usecs = 255;
5904 cdev->tx_coalesce_usecs = 255;
5907 rc = qlnx_nic_start(cdev);
5909 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5910 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5912 #ifdef QLNX_USER_LLDP
5913 (void)qlnx_set_lldp_tlvx(ha, NULL);
5914 #endif /* #ifdef QLNX_USER_LLDP */
5916 qlnx_slowpath_start_exit:
5922 qlnx_slowpath_stop(qlnx_host_t *ha)
5924 struct ecore_dev *cdev;
5925 device_t dev = ha->pci_dev;
5930 ecore_hw_stop(cdev);
5932 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5934 if (ha->sp_handle[i])
5935 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5938 ha->sp_handle[i] = NULL;
5941 (void) bus_release_resource(dev, SYS_RES_IRQ,
5942 ha->sp_irq_rid[i], ha->sp_irq[i]);
5943 ha->sp_irq[i] = NULL;
5946 ecore_resc_free(cdev);
5952 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5953 char ver_str[VER_SIZE])
5957 memcpy(cdev->name, name, NAME_SIZE);
5959 for_each_hwfn(cdev, i) {
5960 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5963 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5969 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5971 enum ecore_mcp_protocol_type type;
5972 union ecore_mcp_protocol_stats *stats;
5973 struct ecore_eth_stats eth_stats;
5977 stats = proto_stats;
5982 case ECORE_MCP_LAN_STATS:
5983 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5984 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5985 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5986 stats->lan_stats.fcs_err = -1;
5990 ha->err_get_proto_invalid_type++;
5992 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5999 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
6001 struct ecore_hwfn *p_hwfn;
6002 struct ecore_ptt *p_ptt;
6004 p_hwfn = &ha->cdev.hwfns[0];
6005 p_ptt = ecore_ptt_acquire(p_hwfn);
6007 if (p_ptt == NULL) {
6008 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6011 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
6013 ecore_ptt_release(p_hwfn, p_ptt);
6019 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
6021 struct ecore_hwfn *p_hwfn;
6022 struct ecore_ptt *p_ptt;
6024 p_hwfn = &ha->cdev.hwfns[0];
6025 p_ptt = ecore_ptt_acquire(p_hwfn);
6027 if (p_ptt == NULL) {
6028 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
6031 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
6033 ecore_ptt_release(p_hwfn, p_ptt);
6039 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
6041 struct ecore_dev *cdev;
6045 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
6046 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
6047 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
6053 qlnx_init_fp(qlnx_host_t *ha)
6055 int rss_id, txq_array_index, tc;
6057 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6059 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6061 fp->rss_id = rss_id;
6063 fp->sb_info = &ha->sb_array[rss_id];
6064 fp->rxq = &ha->rxq_array[rss_id];
6065 fp->rxq->rxq_id = rss_id;
6067 for (tc = 0; tc < ha->num_tc; tc++) {
6068 txq_array_index = tc * ha->num_rss + rss_id;
6069 fp->txq[tc] = &ha->txq_array[txq_array_index];
6070 fp->txq[tc]->index = txq_array_index;
6073 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
6076 fp->tx_ring_full = 0;
6078 /* reset all the statistics counters */
6080 fp->tx_pkts_processed = 0;
6081 fp->tx_pkts_freed = 0;
6082 fp->tx_pkts_transmitted = 0;
6083 fp->tx_pkts_completed = 0;
6085 #ifdef QLNX_TRACE_PERF_DATA
6086 fp->tx_pkts_trans_ctx = 0;
6087 fp->tx_pkts_compl_ctx = 0;
6088 fp->tx_pkts_trans_fp = 0;
6089 fp->tx_pkts_compl_fp = 0;
6090 fp->tx_pkts_compl_intr = 0;
6092 fp->tx_lso_wnd_min_len = 0;
6094 fp->tx_nsegs_gt_elem_left = 0;
6095 fp->tx_tso_max_nsegs = 0;
6096 fp->tx_tso_min_nsegs = 0;
6097 fp->err_tx_nsegs_gt_elem_left = 0;
6098 fp->err_tx_dmamap_create = 0;
6099 fp->err_tx_defrag_dmamap_load = 0;
6100 fp->err_tx_non_tso_max_seg = 0;
6101 fp->err_tx_dmamap_load = 0;
6102 fp->err_tx_defrag = 0;
6103 fp->err_tx_free_pkt_null = 0;
6104 fp->err_tx_cons_idx_conflict = 0;
6107 fp->err_m_getcl = 0;
6108 fp->err_m_getjcl = 0;
6114 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
6116 struct ecore_dev *cdev;
6120 if (sb_info->sb_virt) {
6121 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
6122 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
6123 sb_info->sb_virt = NULL;
6128 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
6129 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
6131 struct ecore_hwfn *p_hwfn;
6135 hwfn_index = sb_id % cdev->num_hwfns;
6136 p_hwfn = &cdev->hwfns[hwfn_index];
6137 rel_sb_id = sb_id / cdev->num_hwfns;
6139 QL_DPRINT2(((qlnx_host_t *)cdev),
6140 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
6141 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
6142 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
6143 sb_virt_addr, (void *)sb_phy_addr);
6145 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
6146 sb_virt_addr, sb_phy_addr, rel_sb_id);
6151 /* This function allocates fast-path status block memory */
6153 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6155 struct status_block_e4 *sb_virt;
6159 struct ecore_dev *cdev;
6163 size = sizeof(*sb_virt);
6164 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6167 QL_DPRINT1(ha, "Status block allocation failed\n");
6171 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6173 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6180 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6183 struct sw_rx_data *rx_buf;
6185 for (i = 0; i < rxq->num_rx_buffers; i++) {
6187 rx_buf = &rxq->sw_rx_ring[i];
6189 if (rx_buf->data != NULL) {
6190 if (rx_buf->map != NULL) {
6191 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6192 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6195 m_freem(rx_buf->data);
6196 rx_buf->data = NULL;
6203 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6205 struct ecore_dev *cdev;
6210 qlnx_free_rx_buffers(ha, rxq);
6212 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6213 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6214 if (rxq->tpa_info[i].mpf != NULL)
6215 m_freem(rxq->tpa_info[i].mpf);
6218 bzero((void *)&rxq->sw_rx_ring[0],
6219 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6221 /* Free the real RQ ring used by FW */
6222 if (rxq->rx_bd_ring.p_virt_addr) {
6223 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6224 rxq->rx_bd_ring.p_virt_addr = NULL;
6227 /* Free the real completion ring used by FW */
6228 if (rxq->rx_comp_ring.p_virt_addr &&
6229 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6230 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6231 rxq->rx_comp_ring.p_virt_addr = NULL;
6232 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6235 #ifdef QLNX_SOFT_LRO
6237 struct lro_ctrl *lro;
6242 #endif /* #ifdef QLNX_SOFT_LRO */
6248 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6250 register struct mbuf *mp;
6251 uint16_t rx_buf_size;
6252 struct sw_rx_data *sw_rx_data;
6253 struct eth_rx_bd *rx_bd;
6254 dma_addr_t dma_addr;
6256 bus_dma_segment_t segs[1];
6259 struct ecore_dev *cdev;
6263 rx_buf_size = rxq->rx_buf_size;
6265 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6268 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6272 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6274 map = (bus_dmamap_t)0;
6276 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6278 dma_addr = segs[0].ds_addr;
6280 if (ret || !dma_addr || (nsegs != 1)) {
6282 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6283 ret, (long long unsigned int)dma_addr, nsegs);
6287 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6288 sw_rx_data->data = mp;
6289 sw_rx_data->dma_addr = dma_addr;
6290 sw_rx_data->map = map;
6292 /* Advance PROD and get BD pointer */
6293 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6294 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6295 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6296 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6298 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6304 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6305 struct qlnx_agg_info *tpa)
6308 dma_addr_t dma_addr;
6310 bus_dma_segment_t segs[1];
6313 struct sw_rx_data *rx_buf;
6315 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6318 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6322 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6324 map = (bus_dmamap_t)0;
6326 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6328 dma_addr = segs[0].ds_addr;
6330 if (ret || !dma_addr || (nsegs != 1)) {
6332 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6333 ret, (long long unsigned int)dma_addr, nsegs);
6337 rx_buf = &tpa->rx_buf;
6339 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6342 rx_buf->dma_addr = dma_addr;
6345 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6351 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6353 struct sw_rx_data *rx_buf;
6355 rx_buf = &tpa->rx_buf;
6357 if (rx_buf->data != NULL) {
6358 if (rx_buf->map != NULL) {
6359 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6360 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6363 m_freem(rx_buf->data);
6364 rx_buf->data = NULL;
6369 /* This function allocates all memory needed per Rx queue */
6371 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6373 int i, rc, num_allocated;
6375 struct ecore_dev *cdev;
6380 rxq->num_rx_buffers = RX_RING_SIZE;
6382 rxq->rx_buf_size = ha->rx_buf_size;
6384 /* Allocate the parallel driver ring for Rx buffers */
6385 bzero((void *)&rxq->sw_rx_ring[0],
6386 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6388 /* Allocate FW Rx ring */
6390 rc = ecore_chain_alloc(cdev,
6391 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6392 ECORE_CHAIN_MODE_NEXT_PTR,
6393 ECORE_CHAIN_CNT_TYPE_U16,
6395 sizeof(struct eth_rx_bd),
6396 &rxq->rx_bd_ring, NULL);
6401 /* Allocate FW completion ring */
6402 rc = ecore_chain_alloc(cdev,
6403 ECORE_CHAIN_USE_TO_CONSUME,
6404 ECORE_CHAIN_MODE_PBL,
6405 ECORE_CHAIN_CNT_TYPE_U16,
6407 sizeof(union eth_rx_cqe),
6408 &rxq->rx_comp_ring, NULL);
6413 /* Allocate buffers for the Rx ring */
6415 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6416 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6423 for (i = 0; i < rxq->num_rx_buffers; i++) {
6424 rc = qlnx_alloc_rx_buffer(ha, rxq);
6429 if (!num_allocated) {
6430 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6432 } else if (num_allocated < rxq->num_rx_buffers) {
6433 QL_DPRINT1(ha, "Allocated less buffers than"
6434 " desired (%d allocated)\n", num_allocated);
6437 #ifdef QLNX_SOFT_LRO
6440 struct lro_ctrl *lro;
6444 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6445 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6446 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6451 if (tcp_lro_init(lro)) {
6452 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6456 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6460 #endif /* #ifdef QLNX_SOFT_LRO */
6464 qlnx_free_mem_rxq(ha, rxq);
6470 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6471 struct qlnx_tx_queue *txq)
6473 struct ecore_dev *cdev;
6477 bzero((void *)&txq->sw_tx_ring[0],
6478 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6480 /* Free the real RQ ring used by FW */
6481 if (txq->tx_pbl.p_virt_addr) {
6482 ecore_chain_free(cdev, &txq->tx_pbl);
6483 txq->tx_pbl.p_virt_addr = NULL;
6488 /* This function allocates all memory needed per Tx queue */
6490 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6491 struct qlnx_tx_queue *txq)
6493 int ret = ECORE_SUCCESS;
6494 union eth_tx_bd_types *p_virt;
6495 struct ecore_dev *cdev;
6499 bzero((void *)&txq->sw_tx_ring[0],
6500 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6502 /* Allocate the real Tx ring to be used by FW */
6503 ret = ecore_chain_alloc(cdev,
6504 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6505 ECORE_CHAIN_MODE_PBL,
6506 ECORE_CHAIN_CNT_TYPE_U16,
6509 &txq->tx_pbl, NULL);
6511 if (ret != ECORE_SUCCESS) {
6515 txq->num_tx_buffers = TX_RING_SIZE;
6520 qlnx_free_mem_txq(ha, fp, txq);
6525 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6528 struct ifnet *ifp = ha->ifp;
6530 if (mtx_initialized(&fp->tx_mtx)) {
6532 if (fp->tx_br != NULL) {
6534 mtx_lock(&fp->tx_mtx);
6536 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6537 fp->tx_pkts_freed++;
6541 mtx_unlock(&fp->tx_mtx);
6543 buf_ring_free(fp->tx_br, M_DEVBUF);
6546 mtx_destroy(&fp->tx_mtx);
6552 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6556 qlnx_free_mem_sb(ha, fp->sb_info);
6558 qlnx_free_mem_rxq(ha, fp->rxq);
6560 for (tc = 0; tc < ha->num_tc; tc++)
6561 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6567 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6569 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6570 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6572 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6574 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6575 M_NOWAIT, &fp->tx_mtx);
6576 if (fp->tx_br == NULL) {
6577 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6578 ha->dev_unit, fp->rss_id);
6585 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6589 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6593 if (ha->rx_jumbo_buf_eq_mtu) {
6594 if (ha->max_frame_size <= MCLBYTES)
6595 ha->rx_buf_size = MCLBYTES;
6596 else if (ha->max_frame_size <= MJUMPAGESIZE)
6597 ha->rx_buf_size = MJUMPAGESIZE;
6598 else if (ha->max_frame_size <= MJUM9BYTES)
6599 ha->rx_buf_size = MJUM9BYTES;
6600 else if (ha->max_frame_size <= MJUM16BYTES)
6601 ha->rx_buf_size = MJUM16BYTES;
6603 if (ha->max_frame_size <= MCLBYTES)
6604 ha->rx_buf_size = MCLBYTES;
6606 ha->rx_buf_size = MJUMPAGESIZE;
6609 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6613 for (tc = 0; tc < ha->num_tc; tc++) {
6614 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6622 qlnx_free_mem_fp(ha, fp);
6627 qlnx_free_mem_load(qlnx_host_t *ha)
6630 struct ecore_dev *cdev;
6634 for (i = 0; i < ha->num_rss; i++) {
6635 struct qlnx_fastpath *fp = &ha->fp_array[i];
6637 qlnx_free_mem_fp(ha, fp);
6643 qlnx_alloc_mem_load(qlnx_host_t *ha)
6647 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6648 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6650 rc = qlnx_alloc_mem_fp(ha, fp);
6658 qlnx_start_vport(struct ecore_dev *cdev,
6662 u8 inner_vlan_removal_en_flg,
6667 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6670 ha = (qlnx_host_t *)cdev;
6672 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6673 vport_start_params.tx_switching = 0;
6674 vport_start_params.handle_ptp_pkts = 0;
6675 vport_start_params.only_untagged = 0;
6676 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6678 vport_start_params.tpa_mode =
6679 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6680 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6682 vport_start_params.vport_id = vport_id;
6683 vport_start_params.mtu = mtu;
6686 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6688 for_each_hwfn(cdev, i) {
6689 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6691 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6692 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6694 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6697 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6698 " with MTU %d\n" , vport_id, mtu);
6702 ecore_hw_start_fastpath(p_hwfn);
6704 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6712 qlnx_update_vport(struct ecore_dev *cdev,
6713 struct qlnx_update_vport_params *params)
6715 struct ecore_sp_vport_update_params sp_params;
6716 int rc, i, j, fp_index;
6717 struct ecore_hwfn *p_hwfn;
6718 struct ecore_rss_params *rss;
6719 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6720 struct qlnx_fastpath *fp;
6722 memset(&sp_params, 0, sizeof(sp_params));
6723 /* Translate protocol params into sp params */
6724 sp_params.vport_id = params->vport_id;
6726 sp_params.update_vport_active_rx_flg =
6727 params->update_vport_active_rx_flg;
6728 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6730 sp_params.update_vport_active_tx_flg =
6731 params->update_vport_active_tx_flg;
6732 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6734 sp_params.update_inner_vlan_removal_flg =
6735 params->update_inner_vlan_removal_flg;
6736 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6738 sp_params.sge_tpa_params = params->sge_tpa_params;
6740 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6741 * We need to re-fix the rss values per engine for CMT.
6743 if (params->rss_params->update_rss_config)
6744 sp_params.rss_params = params->rss_params;
6746 sp_params.rss_params = NULL;
6748 for_each_hwfn(cdev, i) {
6750 p_hwfn = &cdev->hwfns[i];
6752 if ((cdev->num_hwfns > 1) &&
6753 params->rss_params->update_rss_config &&
6754 params->rss_params->rss_enable) {
6756 rss = params->rss_params;
6758 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6760 fp_index = ((cdev->num_hwfns * j) + i) %
6763 fp = &ha->fp_array[fp_index];
6764 rss->rss_ind_table[j] = fp->rxq->handle;
6767 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6768 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6769 rss->rss_ind_table[j],
6770 rss->rss_ind_table[j+1],
6771 rss->rss_ind_table[j+2],
6772 rss->rss_ind_table[j+3],
6773 rss->rss_ind_table[j+4],
6774 rss->rss_ind_table[j+5],
6775 rss->rss_ind_table[j+6],
6776 rss->rss_ind_table[j+7]);
6781 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6783 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6785 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6786 ECORE_SPQ_MODE_EBLOCK, NULL);
6788 QL_DPRINT1(ha, "Failed to update VPORT\n");
6792 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6793 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6794 params->vport_id, params->vport_active_tx_flg,
6795 params->vport_active_rx_flg,
6796 params->update_vport_active_tx_flg,
6797 params->update_vport_active_rx_flg);
6804 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6806 struct eth_rx_bd *rx_bd_cons =
6807 ecore_chain_consume(&rxq->rx_bd_ring);
6808 struct eth_rx_bd *rx_bd_prod =
6809 ecore_chain_produce(&rxq->rx_bd_ring);
6810 struct sw_rx_data *sw_rx_data_cons =
6811 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6812 struct sw_rx_data *sw_rx_data_prod =
6813 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6815 sw_rx_data_prod->data = sw_rx_data_cons->data;
6816 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6818 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6819 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6825 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6831 struct eth_rx_prod_data rx_prod_data;
6835 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6836 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6838 /* Update producers */
6839 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6840 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6842 /* Make sure that the BD and SGE data is updated before updating the
6843 * producers since FW might read the BD/SGE right after the producer
6848 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6849 sizeof(rx_prods), &rx_prods.data32);
6851 /* mmiowb is needed to synchronize doorbell writes from more than one
6852 * processor. It guarantees that the write arrives to the device before
6853 * the napi lock is released and another qlnx_poll is called (possibly
6854 * on another CPU). Without this barrier, the next doorbell can bypass
6855 * this doorbell. This is applicable to IA64/Altix systems.
6862 static uint32_t qlnx_hash_key[] = {
6863 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6864 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6865 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6866 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6867 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6868 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6869 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6870 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6871 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6872 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6875 qlnx_start_queues(qlnx_host_t *ha)
6877 int rc, tc, i, vport_id = 0,
6878 drop_ttl0_flg = 1, vlan_removal_en = 1,
6879 tx_switching = 0, hw_lro_enable = 0;
6880 struct ecore_dev *cdev = &ha->cdev;
6881 struct ecore_rss_params *rss_params = &ha->rss_params;
6882 struct qlnx_update_vport_params vport_update_params;
6884 struct ecore_hwfn *p_hwfn;
6885 struct ecore_sge_tpa_params tpa_params;
6886 struct ecore_queue_start_common_params qparams;
6887 struct qlnx_fastpath *fp;
6891 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6894 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6895 " are no Rx queues\n");
6899 #ifndef QLNX_SOFT_LRO
6900 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6901 #endif /* #ifndef QLNX_SOFT_LRO */
6903 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6904 vlan_removal_en, tx_switching, hw_lro_enable);
6907 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6911 QL_DPRINT2(ha, "Start vport ramrod passed, "
6912 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6913 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6916 struct ecore_rxq_start_ret_params rx_ret_params;
6917 struct ecore_txq_start_ret_params tx_ret_params;
6919 fp = &ha->fp_array[i];
6920 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6922 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6923 bzero(&rx_ret_params,
6924 sizeof (struct ecore_rxq_start_ret_params));
6926 qparams.queue_id = i ;
6927 qparams.vport_id = vport_id;
6928 qparams.stats_id = vport_id;
6929 qparams.p_sb = fp->sb_info;
6930 qparams.sb_idx = RX_PI;
6933 rc = ecore_eth_rx_queue_start(p_hwfn,
6934 p_hwfn->hw_info.opaque_fid,
6936 fp->rxq->rx_buf_size, /* bd_max_bytes */
6937 /* bd_chain_phys_addr */
6938 fp->rxq->rx_bd_ring.p_phys_addr,
6940 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6942 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6946 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6950 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6951 fp->rxq->handle = rx_ret_params.p_handle;
6952 fp->rxq->hw_cons_ptr =
6953 &fp->sb_info->sb_virt->pi_array[RX_PI];
6955 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6957 for (tc = 0; tc < ha->num_tc; tc++) {
6958 struct qlnx_tx_queue *txq = fp->txq[tc];
6961 sizeof(struct ecore_queue_start_common_params));
6962 bzero(&tx_ret_params,
6963 sizeof (struct ecore_txq_start_ret_params));
6965 qparams.queue_id = txq->index / cdev->num_hwfns ;
6966 qparams.vport_id = vport_id;
6967 qparams.stats_id = vport_id;
6968 qparams.p_sb = fp->sb_info;
6969 qparams.sb_idx = TX_PI(tc);
6971 rc = ecore_eth_tx_queue_start(p_hwfn,
6972 p_hwfn->hw_info.opaque_fid,
6974 /* bd_chain_phys_addr */
6975 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6976 ecore_chain_get_page_cnt(&txq->tx_pbl),
6980 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6985 txq->doorbell_addr = tx_ret_params.p_doorbell;
6986 txq->handle = tx_ret_params.p_handle;
6989 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6990 SET_FIELD(txq->tx_db.data.params,
6991 ETH_DB_DATA_DEST, DB_DEST_XCM);
6992 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6994 SET_FIELD(txq->tx_db.data.params,
6995 ETH_DB_DATA_AGG_VAL_SEL,
6996 DQ_XCM_ETH_TX_BD_PROD_CMD);
6998 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
7002 /* Fill struct with RSS params */
7003 if (ha->num_rss > 1) {
7005 rss_params->update_rss_config = 1;
7006 rss_params->rss_enable = 1;
7007 rss_params->update_rss_capabilities = 1;
7008 rss_params->update_rss_ind_table = 1;
7009 rss_params->update_rss_key = 1;
7010 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
7011 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
7012 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
7014 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
7015 fp = &ha->fp_array[(i % ha->num_rss)];
7016 rss_params->rss_ind_table[i] = fp->rxq->handle;
7019 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
7020 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
7023 memset(rss_params, 0, sizeof(*rss_params));
7027 /* Prepare and send the vport enable */
7028 memset(&vport_update_params, 0, sizeof(vport_update_params));
7029 vport_update_params.vport_id = vport_id;
7030 vport_update_params.update_vport_active_tx_flg = 1;
7031 vport_update_params.vport_active_tx_flg = 1;
7032 vport_update_params.update_vport_active_rx_flg = 1;
7033 vport_update_params.vport_active_rx_flg = 1;
7034 vport_update_params.rss_params = rss_params;
7035 vport_update_params.update_inner_vlan_removal_flg = 1;
7036 vport_update_params.inner_vlan_removal_flg = 1;
7038 if (hw_lro_enable) {
7039 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
7041 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
7043 tpa_params.update_tpa_en_flg = 1;
7044 tpa_params.tpa_ipv4_en_flg = 1;
7045 tpa_params.tpa_ipv6_en_flg = 1;
7047 tpa_params.update_tpa_param_flg = 1;
7048 tpa_params.tpa_pkt_split_flg = 0;
7049 tpa_params.tpa_hdr_data_split_flg = 0;
7050 tpa_params.tpa_gro_consistent_flg = 0;
7051 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
7052 tpa_params.tpa_max_size = (uint16_t)(-1);
7053 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
7054 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
7056 vport_update_params.sge_tpa_params = &tpa_params;
7059 rc = qlnx_update_vport(cdev, &vport_update_params);
7061 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
7069 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
7070 struct qlnx_tx_queue *txq)
7072 uint16_t hw_bd_cons;
7073 uint16_t ecore_cons_idx;
7075 QL_DPRINT2(ha, "enter\n");
7077 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7079 while (hw_bd_cons !=
7080 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
7082 mtx_lock(&fp->tx_mtx);
7084 (void)qlnx_tx_int(ha, fp, txq);
7086 mtx_unlock(&fp->tx_mtx);
7088 qlnx_mdelay(__func__, 2);
7090 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7093 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
7099 qlnx_stop_queues(qlnx_host_t *ha)
7101 struct qlnx_update_vport_params vport_update_params;
7102 struct ecore_dev *cdev;
7103 struct qlnx_fastpath *fp;
7108 /* Disable the vport */
7110 memset(&vport_update_params, 0, sizeof(vport_update_params));
7112 vport_update_params.vport_id = 0;
7113 vport_update_params.update_vport_active_tx_flg = 1;
7114 vport_update_params.vport_active_tx_flg = 0;
7115 vport_update_params.update_vport_active_rx_flg = 1;
7116 vport_update_params.vport_active_rx_flg = 0;
7117 vport_update_params.rss_params = &ha->rss_params;
7118 vport_update_params.rss_params->update_rss_config = 0;
7119 vport_update_params.rss_params->rss_enable = 0;
7120 vport_update_params.update_inner_vlan_removal_flg = 0;
7121 vport_update_params.inner_vlan_removal_flg = 0;
7123 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
7125 rc = qlnx_update_vport(cdev, &vport_update_params);
7127 QL_DPRINT1(ha, "Failed to update vport\n");
7131 /* Flush Tx queues. If needed, request drain from MCP */
7133 fp = &ha->fp_array[i];
7135 for (tc = 0; tc < ha->num_tc; tc++) {
7136 struct qlnx_tx_queue *txq = fp->txq[tc];
7138 rc = qlnx_drain_txq(ha, fp, txq);
7144 /* Stop all Queues in reverse order*/
7145 for (i = ha->num_rss - 1; i >= 0; i--) {
7147 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
7149 fp = &ha->fp_array[i];
7151 /* Stop the Tx Queue(s)*/
7152 for (tc = 0; tc < ha->num_tc; tc++) {
7155 tx_queue_id = tc * ha->num_rss + i;
7156 rc = ecore_eth_tx_queue_stop(p_hwfn,
7157 fp->txq[tc]->handle);
7160 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
7166 /* Stop the Rx Queue*/
7167 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
7170 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7175 /* Stop the vport */
7176 for_each_hwfn(cdev, i) {
7178 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7180 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7183 QL_DPRINT1(ha, "Failed to stop VPORT\n");
7192 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7193 enum ecore_filter_opcode opcode,
7194 unsigned char mac[ETH_ALEN])
7196 struct ecore_filter_ucast ucast;
7197 struct ecore_dev *cdev;
7202 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7204 ucast.opcode = opcode;
7205 ucast.type = ECORE_FILTER_MAC;
7206 ucast.is_rx_filter = 1;
7207 ucast.vport_to_add_to = 0;
7208 memcpy(&ucast.mac[0], mac, ETH_ALEN);
7210 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7216 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7218 struct ecore_filter_ucast ucast;
7219 struct ecore_dev *cdev;
7222 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7224 ucast.opcode = ECORE_FILTER_REPLACE;
7225 ucast.type = ECORE_FILTER_MAC;
7226 ucast.is_rx_filter = 1;
7230 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7236 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7238 struct ecore_filter_mcast *mcast;
7239 struct ecore_dev *cdev;
7244 mcast = &ha->ecore_mcast;
7245 bzero(mcast, sizeof(struct ecore_filter_mcast));
7247 mcast->opcode = ECORE_FILTER_REMOVE;
7249 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7251 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7252 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7253 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7255 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7256 mcast->num_mc_addrs++;
7259 mcast = &ha->ecore_mcast;
7261 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7263 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7270 qlnx_clean_filters(qlnx_host_t *ha)
7274 /* Remove all unicast macs */
7275 rc = qlnx_remove_all_ucast_mac(ha);
7279 /* Remove all multicast macs */
7280 rc = qlnx_remove_all_mcast_mac(ha);
7284 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7290 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7292 struct ecore_filter_accept_flags accept;
7294 struct ecore_dev *cdev;
7298 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7300 accept.update_rx_mode_config = 1;
7301 accept.rx_accept_filter = filter;
7303 accept.update_tx_mode_config = 1;
7304 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7305 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7307 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7308 ECORE_SPQ_MODE_CB, NULL);
7314 qlnx_set_rx_mode(qlnx_host_t *ha)
7319 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7323 rc = qlnx_remove_all_mcast_mac(ha);
7327 filter = ECORE_ACCEPT_UCAST_MATCHED |
7328 ECORE_ACCEPT_MCAST_MATCHED |
7331 if (qlnx_vf_device(ha) == 0) {
7332 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7333 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7335 ha->filter = filter;
7337 rc = qlnx_set_rx_accept_filter(ha, filter);
7343 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7346 struct ecore_dev *cdev;
7347 struct ecore_hwfn *hwfn;
7348 struct ecore_ptt *ptt;
7350 if (qlnx_vf_device(ha) == 0)
7355 for_each_hwfn(cdev, i) {
7357 hwfn = &cdev->hwfns[i];
7359 ptt = ecore_ptt_acquire(hwfn);
7363 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7365 ecore_ptt_release(hwfn, ptt);
7373 #if __FreeBSD_version >= 1100000
7375 qlnx_get_counter(if_t ifp, ift_counter cnt)
7380 ha = (qlnx_host_t *)if_getsoftc(ifp);
7384 case IFCOUNTER_IPACKETS:
7385 count = ha->hw_stats.common.rx_ucast_pkts +
7386 ha->hw_stats.common.rx_mcast_pkts +
7387 ha->hw_stats.common.rx_bcast_pkts;
7390 case IFCOUNTER_IERRORS:
7391 count = ha->hw_stats.common.rx_crc_errors +
7392 ha->hw_stats.common.rx_align_errors +
7393 ha->hw_stats.common.rx_oversize_packets +
7394 ha->hw_stats.common.rx_undersize_packets;
7397 case IFCOUNTER_OPACKETS:
7398 count = ha->hw_stats.common.tx_ucast_pkts +
7399 ha->hw_stats.common.tx_mcast_pkts +
7400 ha->hw_stats.common.tx_bcast_pkts;
7403 case IFCOUNTER_OERRORS:
7404 count = ha->hw_stats.common.tx_err_drop_pkts;
7407 case IFCOUNTER_COLLISIONS:
7410 case IFCOUNTER_IBYTES:
7411 count = ha->hw_stats.common.rx_ucast_bytes +
7412 ha->hw_stats.common.rx_mcast_bytes +
7413 ha->hw_stats.common.rx_bcast_bytes;
7416 case IFCOUNTER_OBYTES:
7417 count = ha->hw_stats.common.tx_ucast_bytes +
7418 ha->hw_stats.common.tx_mcast_bytes +
7419 ha->hw_stats.common.tx_bcast_bytes;
7422 case IFCOUNTER_IMCASTS:
7423 count = ha->hw_stats.common.rx_mcast_bytes;
7426 case IFCOUNTER_OMCASTS:
7427 count = ha->hw_stats.common.tx_mcast_bytes;
7430 case IFCOUNTER_IQDROPS:
7431 case IFCOUNTER_OQDROPS:
7432 case IFCOUNTER_NOPROTO:
7435 return (if_get_counter_default(ifp, cnt));
7443 qlnx_timer(void *arg)
7447 ha = (qlnx_host_t *)arg;
7449 if (ha->error_recovery) {
7450 ha->error_recovery = 0;
7451 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7455 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7457 if (ha->storm_stats_gather)
7458 qlnx_sample_storm_stats(ha);
7460 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7466 qlnx_load(qlnx_host_t *ha)
7470 struct ecore_dev *cdev;
7476 QL_DPRINT2(ha, "enter\n");
7478 rc = qlnx_alloc_mem_arrays(ha);
7480 goto qlnx_load_exit0;
7484 rc = qlnx_alloc_mem_load(ha);
7486 goto qlnx_load_exit1;
7488 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7489 ha->num_rss, ha->num_tc);
7491 for (i = 0; i < ha->num_rss; i++) {
7493 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7494 (INTR_TYPE_NET | INTR_MPSAFE),
7495 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7496 &ha->irq_vec[i].handle))) {
7498 QL_DPRINT1(ha, "could not setup interrupt\n");
7499 goto qlnx_load_exit2;
7502 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7503 irq %p handle %p\n", i,
7504 ha->irq_vec[i].irq_rid,
7505 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7507 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7510 rc = qlnx_start_queues(ha);
7512 goto qlnx_load_exit2;
7514 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7516 /* Add primary mac and set Rx filters */
7517 rc = qlnx_set_rx_mode(ha);
7519 goto qlnx_load_exit2;
7521 /* Ask for link-up using current configuration */
7522 qlnx_set_link(ha, true);
7524 if (qlnx_vf_device(ha) == 0)
7525 qlnx_link_update(&ha->cdev.hwfns[0]);
7527 ha->state = QLNX_STATE_OPEN;
7529 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7531 if (ha->flags.callout_init)
7532 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7534 goto qlnx_load_exit0;
7537 qlnx_free_mem_load(ha);
7543 QL_DPRINT2(ha, "exit [%d]\n", rc);
7548 qlnx_drain_soft_lro(qlnx_host_t *ha)
7550 #ifdef QLNX_SOFT_LRO
7558 if (ifp->if_capenable & IFCAP_LRO) {
7560 for (i = 0; i < ha->num_rss; i++) {
7562 struct qlnx_fastpath *fp = &ha->fp_array[i];
7563 struct lro_ctrl *lro;
7565 lro = &fp->rxq->lro;
7567 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7569 tcp_lro_flush_all(lro);
7572 struct lro_entry *queued;
7574 while ((!SLIST_EMPTY(&lro->lro_active))){
7575 queued = SLIST_FIRST(&lro->lro_active);
7576 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7577 tcp_lro_flush(lro, queued);
7580 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7585 #endif /* #ifdef QLNX_SOFT_LRO */
7591 qlnx_unload(qlnx_host_t *ha)
7593 struct ecore_dev *cdev;
7600 QL_DPRINT2(ha, "enter\n");
7601 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7603 if (ha->state == QLNX_STATE_OPEN) {
7605 qlnx_set_link(ha, false);
7606 qlnx_clean_filters(ha);
7607 qlnx_stop_queues(ha);
7608 ecore_hw_stop_fastpath(cdev);
7610 for (i = 0; i < ha->num_rss; i++) {
7611 if (ha->irq_vec[i].handle) {
7612 (void)bus_teardown_intr(dev,
7614 ha->irq_vec[i].handle);
7615 ha->irq_vec[i].handle = NULL;
7619 qlnx_drain_fp_taskqueues(ha);
7620 qlnx_drain_soft_lro(ha);
7621 qlnx_free_mem_load(ha);
7624 if (ha->flags.callout_init)
7625 callout_drain(&ha->qlnx_callout);
7627 qlnx_mdelay(__func__, 1000);
7629 ha->state = QLNX_STATE_CLOSED;
7631 QL_DPRINT2(ha, "exit\n");
7636 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7639 struct ecore_hwfn *p_hwfn;
7640 struct ecore_ptt *p_ptt;
7642 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7644 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7645 p_ptt = ecore_ptt_acquire(p_hwfn);
7648 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7652 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7654 if (rval == DBG_STATUS_OK)
7657 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7661 ecore_ptt_release(p_hwfn, p_ptt);
7667 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7670 struct ecore_hwfn *p_hwfn;
7671 struct ecore_ptt *p_ptt;
7673 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7675 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7676 p_ptt = ecore_ptt_acquire(p_hwfn);
7679 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7683 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7685 if (rval == DBG_STATUS_OK)
7688 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7692 ecore_ptt_release(p_hwfn, p_ptt);
7699 qlnx_sample_storm_stats(qlnx_host_t *ha)
7702 struct ecore_dev *cdev;
7703 qlnx_storm_stats_t *s_stats;
7705 struct ecore_ptt *p_ptt;
7706 struct ecore_hwfn *hwfn;
7708 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7709 ha->storm_stats_gather = 0;
7715 for_each_hwfn(cdev, i) {
7717 hwfn = &cdev->hwfns[i];
7719 p_ptt = ecore_ptt_acquire(hwfn);
7723 index = ha->storm_stats_index +
7724 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7726 s_stats = &ha->storm_stats[index];
7729 reg = XSEM_REG_FAST_MEMORY +
7730 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7731 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7733 reg = XSEM_REG_FAST_MEMORY +
7734 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7735 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7737 reg = XSEM_REG_FAST_MEMORY +
7738 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7739 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7741 reg = XSEM_REG_FAST_MEMORY +
7742 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7743 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7746 reg = YSEM_REG_FAST_MEMORY +
7747 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7748 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7750 reg = YSEM_REG_FAST_MEMORY +
7751 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7752 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7754 reg = YSEM_REG_FAST_MEMORY +
7755 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7756 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7758 reg = YSEM_REG_FAST_MEMORY +
7759 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7760 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7763 reg = PSEM_REG_FAST_MEMORY +
7764 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7765 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7767 reg = PSEM_REG_FAST_MEMORY +
7768 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7769 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7771 reg = PSEM_REG_FAST_MEMORY +
7772 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7773 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7775 reg = PSEM_REG_FAST_MEMORY +
7776 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7777 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7780 reg = TSEM_REG_FAST_MEMORY +
7781 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7782 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7784 reg = TSEM_REG_FAST_MEMORY +
7785 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7786 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7788 reg = TSEM_REG_FAST_MEMORY +
7789 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7790 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7792 reg = TSEM_REG_FAST_MEMORY +
7793 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7794 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7797 reg = MSEM_REG_FAST_MEMORY +
7798 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7799 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7801 reg = MSEM_REG_FAST_MEMORY +
7802 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7803 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7805 reg = MSEM_REG_FAST_MEMORY +
7806 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7807 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7809 reg = MSEM_REG_FAST_MEMORY +
7810 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7811 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7814 reg = USEM_REG_FAST_MEMORY +
7815 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7816 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7818 reg = USEM_REG_FAST_MEMORY +
7819 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7820 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7822 reg = USEM_REG_FAST_MEMORY +
7823 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7824 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7826 reg = USEM_REG_FAST_MEMORY +
7827 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7828 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7830 ecore_ptt_release(hwfn, p_ptt);
7833 ha->storm_stats_index++;
7839 * Name: qlnx_dump_buf8
7840 * Function: dumps a buffer as bytes
7843 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7852 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7855 device_printf(dev,"0x%08x:"
7856 " %02x %02x %02x %02x %02x %02x %02x %02x"
7857 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7858 buf[0], buf[1], buf[2], buf[3],
7859 buf[4], buf[5], buf[6], buf[7],
7860 buf[8], buf[9], buf[10], buf[11],
7861 buf[12], buf[13], buf[14], buf[15]);
7868 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7871 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7874 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7875 i, buf[0], buf[1], buf[2]);
7878 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7879 buf[0], buf[1], buf[2], buf[3]);
7882 device_printf(dev,"0x%08x:"
7883 " %02x %02x %02x %02x %02x\n", i,
7884 buf[0], buf[1], buf[2], buf[3], buf[4]);
7887 device_printf(dev,"0x%08x:"
7888 " %02x %02x %02x %02x %02x %02x\n", i,
7889 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7892 device_printf(dev,"0x%08x:"
7893 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7894 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7897 device_printf(dev,"0x%08x:"
7898 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7899 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7903 device_printf(dev,"0x%08x:"
7904 " %02x %02x %02x %02x %02x %02x %02x %02x"
7906 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7910 device_printf(dev,"0x%08x:"
7911 " %02x %02x %02x %02x %02x %02x %02x %02x"
7913 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7914 buf[7], buf[8], buf[9]);
7917 device_printf(dev,"0x%08x:"
7918 " %02x %02x %02x %02x %02x %02x %02x %02x"
7919 " %02x %02x %02x\n", i,
7920 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7921 buf[7], buf[8], buf[9], buf[10]);
7924 device_printf(dev,"0x%08x:"
7925 " %02x %02x %02x %02x %02x %02x %02x %02x"
7926 " %02x %02x %02x %02x\n", i,
7927 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7928 buf[7], buf[8], buf[9], buf[10], buf[11]);
7931 device_printf(dev,"0x%08x:"
7932 " %02x %02x %02x %02x %02x %02x %02x %02x"
7933 " %02x %02x %02x %02x %02x\n", i,
7934 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7935 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7938 device_printf(dev,"0x%08x:"
7939 " %02x %02x %02x %02x %02x %02x %02x %02x"
7940 " %02x %02x %02x %02x %02x %02x\n", i,
7941 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7942 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7946 device_printf(dev,"0x%08x:"
7947 " %02x %02x %02x %02x %02x %02x %02x %02x"
7948 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7949 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7950 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7957 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7962 #ifdef CONFIG_ECORE_SRIOV
7965 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7967 struct ecore_public_vf_info *vf_info;
7969 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7974 /* Clear the VF mac */
7975 memset(vf_info->forced_mac, 0, ETH_ALEN);
7977 vf_info->forced_vlan = 0;
7983 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7985 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7990 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7991 struct ecore_filter_ucast *params)
7993 struct ecore_public_vf_info *vf;
7995 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7996 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7997 "VF[%d] vport not initialized\n", vfid);
8001 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
8005 /* No real decision to make; Store the configured MAC */
8006 if (params->type == ECORE_FILTER_MAC ||
8007 params->type == ECORE_FILTER_MAC_VLAN)
8008 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
8014 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
8016 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
8020 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
8021 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
8024 struct ecore_filter_accept_flags *flags;
8026 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
8027 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
8028 "VF[%d] vport not initialized\n", vfid);
8032 /* Untrusted VFs can't even be trusted to know that fact.
8033 * Simply indicate everything is configured fine, and trace
8034 * configuration 'behind their back'.
8036 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED;
8037 flags = ¶ms->accept_flags;
8038 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
8045 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
8047 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
8051 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
8054 struct ecore_dev *cdev;
8056 cdev = p_hwfn->p_dev;
8058 for (i = 0; i < cdev->num_hwfns; i++) {
8059 if (&cdev->hwfns[i] == p_hwfn)
8063 if (i >= cdev->num_hwfns)
8070 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
8072 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8075 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
8076 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
8078 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8081 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8083 atomic_testandset_32(&ha->sriov_task[i].flags,
8084 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
8086 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8087 &ha->sriov_task[i].pf_task);
8091 return (ECORE_SUCCESS);
8096 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
8098 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
8102 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
8104 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8107 if (!ha->sriov_initialized)
8110 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8111 ha, p_hwfn->p_dev, p_hwfn);
8113 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8117 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8119 atomic_testandset_32(&ha->sriov_task[i].flags,
8120 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
8122 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8123 &ha->sriov_task[i].pf_task);
8131 qlnx_vf_flr_update(void *p_hwfn)
8133 __qlnx_vf_flr_update(p_hwfn);
8141 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
8143 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8146 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8147 ha, p_hwfn->p_dev, p_hwfn);
8149 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8152 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
8153 ha, p_hwfn->p_dev, p_hwfn, i);
8155 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8157 atomic_testandset_32(&ha->sriov_task[i].flags,
8158 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
8160 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8161 &ha->sriov_task[i].pf_task);
8166 qlnx_initialize_sriov(qlnx_host_t *ha)
8169 nvlist_t *pf_schema, *vf_schema;
8174 pf_schema = pci_iov_schema_alloc_node();
8175 vf_schema = pci_iov_schema_alloc_node();
8177 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
8178 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
8179 IOV_SCHEMA_HASDEFAULT, FALSE);
8180 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
8181 IOV_SCHEMA_HASDEFAULT, FALSE);
8182 pci_iov_schema_add_uint16(vf_schema, "num-queues",
8183 IOV_SCHEMA_HASDEFAULT, 1);
8185 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
8187 if (iov_error != 0) {
8188 ha->sriov_initialized = 0;
8190 device_printf(dev, "SRIOV initialized\n");
8191 ha->sriov_initialized = 1;
8198 qlnx_sriov_disable(qlnx_host_t *ha)
8200 struct ecore_dev *cdev;
8205 ecore_iov_set_vfs_to_disable(cdev, true);
8208 for_each_hwfn(cdev, i) {
8210 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8211 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8214 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8217 /* Clean WFQ db and configure equal weight for all vports */
8218 ecore_clean_wfq_db(hwfn, ptt);
8220 ecore_for_each_vf(hwfn, j) {
8223 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8226 if (ecore_iov_is_vf_started(hwfn, j)) {
8227 /* Wait until VF is disabled before releasing */
8229 for (k = 0; k < 100; k++) {
8230 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8231 qlnx_mdelay(__func__, 10);
8238 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8242 "Timeout waiting for VF's FLR to end\n");
8245 ecore_ptt_release(hwfn, ptt);
8248 ecore_iov_set_vfs_to_disable(cdev, false);
8255 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8256 struct ecore_iov_vf_init_params *params)
8260 /* Since we have an equal resource distribution per-VF, and we assume
8261 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8262 * sequentially from there.
8264 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8266 params->rel_vf_id = vfid;
8268 for (i = 0; i < params->num_queues; i++) {
8269 params->req_rx_queue[i] = base + i;
8270 params->req_tx_queue[i] = base + i;
8273 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8274 params->vport_id = vfid + 1;
8275 params->rss_eng_id = vfid + 1;
8281 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8284 struct ecore_dev *cdev;
8285 struct ecore_iov_vf_init_params params;
8289 if ((ha = device_get_softc(dev)) == NULL) {
8290 device_printf(dev, "%s: cannot get softc\n", __func__);
8294 if (qlnx_create_pf_taskqueues(ha) != 0)
8295 goto qlnx_iov_init_err0;
8299 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8301 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8302 dev, num_vfs, max_vfs);
8304 if (num_vfs >= max_vfs) {
8305 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8306 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8307 goto qlnx_iov_init_err0;
8310 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8313 if (ha->vf_attr == NULL)
8314 goto qlnx_iov_init_err0;
8317 memset(¶ms, 0, sizeof(params));
8319 /* Initialize HW for VF access */
8320 for_each_hwfn(cdev, j) {
8321 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8322 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8324 /* Make sure not to use more than 16 queues per VF */
8325 params.num_queues = min_t(int,
8326 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8330 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8331 goto qlnx_iov_init_err1;
8334 for (i = 0; i < num_vfs; i++) {
8336 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8339 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8341 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8344 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8345 ecore_ptt_release(hwfn, ptt);
8346 goto qlnx_iov_init_err1;
8350 ecore_ptt_release(hwfn, ptt);
8353 ha->num_vfs = num_vfs;
8354 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8356 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8361 qlnx_sriov_disable(ha);
8364 qlnx_destroy_pf_taskqueues(ha);
8371 qlnx_iov_uninit(device_t dev)
8375 if ((ha = device_get_softc(dev)) == NULL) {
8376 device_printf(dev, "%s: cannot get softc\n", __func__);
8380 QL_DPRINT2(ha," dev = %p enter\n", dev);
8382 qlnx_sriov_disable(ha);
8383 qlnx_destroy_pf_taskqueues(ha);
8385 free(ha->vf_attr, M_QLNXBUF);
8390 QL_DPRINT2(ha," dev = %p exit\n", dev);
8395 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8398 qlnx_vf_attr_t *vf_attr;
8399 unsigned const char *mac;
8401 struct ecore_hwfn *p_hwfn;
8403 if ((ha = device_get_softc(dev)) == NULL) {
8404 device_printf(dev, "%s: cannot get softc\n", __func__);
8408 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8410 if (vfnum > (ha->num_vfs - 1)) {
8411 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8412 vfnum, (ha->num_vfs - 1));
8415 vf_attr = &ha->vf_attr[vfnum];
8417 if (nvlist_exists_binary(params, "mac-addr")) {
8418 mac = nvlist_get_binary(params, "mac-addr", &size);
8419 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8421 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8422 __func__, vf_attr->mac_addr[0],
8423 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8424 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8425 vf_attr->mac_addr[5]);
8426 p_hwfn = &ha->cdev.hwfns[0];
8427 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8431 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8436 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8438 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8439 struct ecore_ptt *ptt;
8442 ptt = ecore_ptt_acquire(p_hwfn);
8444 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8445 __qlnx_pf_vf_msg(p_hwfn, 0);
8449 ecore_iov_pf_get_pending_events(p_hwfn, events);
8451 QL_DPRINT2(ha, "Event mask of VF events:"
8452 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8453 events[0], events[1], events[2]);
8455 ecore_for_each_vf(p_hwfn, i) {
8457 /* Skip VFs with no pending messages */
8458 if (!(events[i / 64] & (1ULL << (i % 64))))
8462 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8463 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8465 /* Copy VF's message to PF's request buffer for that VF */
8466 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8469 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8472 ecore_ptt_release(p_hwfn, ptt);
8478 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8480 struct ecore_ptt *ptt;
8483 ptt = ecore_ptt_acquire(p_hwfn);
8486 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8487 __qlnx_vf_flr_update(p_hwfn);
8491 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8494 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8497 ecore_ptt_release(p_hwfn, ptt);
8503 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8505 struct ecore_ptt *ptt;
8508 ptt = ecore_ptt_acquire(p_hwfn);
8511 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8512 qlnx_vf_bulleting_update(p_hwfn);
8516 ecore_for_each_vf(p_hwfn, i) {
8517 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8519 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8522 ecore_ptt_release(p_hwfn, ptt);
8528 qlnx_pf_taskqueue(void *context, int pending)
8530 struct ecore_hwfn *p_hwfn;
8539 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8541 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8544 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8545 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8546 qlnx_handle_vf_msg(ha, p_hwfn);
8548 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8549 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8550 qlnx_handle_vf_flr_update(ha, p_hwfn);
8552 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8553 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8554 qlnx_handle_bulletin_update(ha, p_hwfn);
8560 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8563 uint8_t tq_name[32];
8565 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8567 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8569 bzero(tq_name, sizeof (tq_name));
8570 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8572 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8574 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8575 taskqueue_thread_enqueue,
8576 &ha->sriov_task[i].pf_taskqueue);
8578 if (ha->sriov_task[i].pf_taskqueue == NULL)
8581 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8582 PI_NET, "%s", tq_name);
8584 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8591 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8595 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8596 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8597 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8598 &ha->sriov_task[i].pf_task);
8599 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8600 ha->sriov_task[i].pf_taskqueue = NULL;
8607 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8609 struct ecore_mcp_link_capabilities caps;
8610 struct ecore_mcp_link_params params;
8611 struct ecore_mcp_link_state link;
8614 if (!p_hwfn->pf_iov_info)
8617 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8618 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8619 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8621 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8622 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8623 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8625 QL_DPRINT2(ha, "called\n");
8627 /* Update bulletin of all future possible VFs with link configuration */
8628 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8630 /* Modify link according to the VF's configured link state */
8632 link.link_up = false;
8635 link.link_up = true;
8636 /* Set speed according to maximum supported by HW.
8637 * that is 40G for regular devices and 100G for CMT
8640 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8641 100000 : link.speed;
8643 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8644 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8647 qlnx_vf_bulleting_update(p_hwfn);
8651 #endif /* #ifndef QLNX_VF */
8652 #endif /* #ifdef CONFIG_ECORE_SRIOV */