2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61 #include "ecore_iov_api.h"
62 #include "ecore_vf_api.h"
64 #include "qlnx_ioctl.h"
68 #ifdef QLNX_ENABLE_IWARP
69 #include "qlnx_rdma.h"
70 #endif /* #ifdef QLNX_ENABLE_IWARP */
79 * ioctl related functions
81 static void qlnx_add_sysctls(qlnx_host_t *ha);
86 static void qlnx_release(qlnx_host_t *ha);
87 static void qlnx_fp_isr(void *arg);
88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
89 static void qlnx_init(void *arg);
90 static void qlnx_init_locked(qlnx_host_t *ha);
91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
92 static int qlnx_set_promisc(qlnx_host_t *ha);
93 static int qlnx_set_allmulti(qlnx_host_t *ha);
94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95 static int qlnx_media_change(struct ifnet *ifp);
96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
97 static void qlnx_stop(qlnx_host_t *ha);
98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
99 struct mbuf **m_headp);
100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
101 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
102 struct qlnx_link_output *if_link);
103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
106 static void qlnx_qflush(struct ifnet *ifp);
108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118 static int qlnx_nic_setup(struct ecore_dev *cdev,
119 struct ecore_pf_params *func_params);
120 static int qlnx_nic_start(struct ecore_dev *cdev);
121 static int qlnx_slowpath_start(qlnx_host_t *ha);
122 static int qlnx_slowpath_stop(qlnx_host_t *ha);
123 static int qlnx_init_hw(qlnx_host_t *ha);
124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
125 char ver_str[VER_SIZE]);
126 static void qlnx_unload(qlnx_host_t *ha);
127 static int qlnx_load(qlnx_host_t *ha);
128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
135 struct qlnx_rx_queue *rxq);
136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 static void qlnx_timer(void *arg);
142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_trigger_dump(qlnx_host_t *ha);
145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 struct qlnx_tx_queue *txq);
147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
148 struct qlnx_tx_queue *txq);
149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 static void qlnx_fp_taskqueue(void *context, int pending);
152 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
154 struct qlnx_agg_info *tpa);
155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157 #if __FreeBSD_version >= 1100000
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
163 * Hooks to the Operating Systems
165 static int qlnx_pci_probe (device_t);
166 static int qlnx_pci_attach (device_t);
167 static int qlnx_pci_detach (device_t);
171 #ifdef CONFIG_ECORE_SRIOV
173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
174 static void qlnx_iov_uninit(device_t dev);
175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
176 static void qlnx_initialize_sriov(qlnx_host_t *ha);
177 static void qlnx_pf_taskqueue(void *context, int pending);
178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
182 #endif /* #ifdef CONFIG_ECORE_SRIOV */
184 static device_method_t qlnx_pci_methods[] = {
185 /* Device interface */
186 DEVMETHOD(device_probe, qlnx_pci_probe),
187 DEVMETHOD(device_attach, qlnx_pci_attach),
188 DEVMETHOD(device_detach, qlnx_pci_detach),
190 #ifdef CONFIG_ECORE_SRIOV
191 DEVMETHOD(pci_iov_init, qlnx_iov_init),
192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
194 #endif /* #ifdef CONFIG_ECORE_SRIOV */
198 static driver_t qlnx_pci_driver = {
199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
202 static devclass_t qlnx_devclass;
204 MODULE_VERSION(if_qlnxe,1);
205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
212 static device_method_t qlnxv_pci_methods[] = {
213 /* Device interface */
214 DEVMETHOD(device_probe, qlnx_pci_probe),
215 DEVMETHOD(device_attach, qlnx_pci_attach),
216 DEVMETHOD(device_detach, qlnx_pci_detach),
220 static driver_t qlnxv_pci_driver = {
221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
224 static devclass_t qlnxv_devclass;
225 MODULE_VERSION(if_qlnxev,1);
226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0);
228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
231 #endif /* #ifdef QLNX_VF */
233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
235 char qlnx_dev_str[128];
236 char qlnx_ver_str[VER_SIZE];
237 char qlnx_name_str[NAME_SIZE];
240 * Some PCI Configuration Space Related Defines
243 #ifndef PCI_VENDOR_QLOGIC
244 #define PCI_VENDOR_QLOGIC 0x1077
247 /* 40G Adapter QLE45xxx*/
248 #ifndef QLOGIC_PCI_DEVICE_ID_1634
249 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
252 /* 100G Adapter QLE45xxx*/
253 #ifndef QLOGIC_PCI_DEVICE_ID_1644
254 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
257 /* 25G Adapter QLE45xxx*/
258 #ifndef QLOGIC_PCI_DEVICE_ID_1656
259 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
262 /* 50G Adapter QLE45xxx*/
263 #ifndef QLOGIC_PCI_DEVICE_ID_1654
264 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
267 /* 10G/25G/40G Adapter QLE41xxx*/
268 #ifndef QLOGIC_PCI_DEVICE_ID_8070
269 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
272 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
273 #ifndef QLOGIC_PCI_DEVICE_ID_8090
274 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
279 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters");
281 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
282 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
284 #if __FreeBSD_version < 1100000
286 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
290 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
291 &qlnxe_queue_count, 0, "Multi-Queue queue count");
295 * Note on RDMA personality setting
297 * Read the personality configured in NVRAM
298 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
299 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
300 * use the personality in NVRAM.
302 * Otherwise use t the personality configured in sysctl.
305 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
306 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
307 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
308 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
309 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
310 #define QLNX_PERSONALIY_MASK 0xF
312 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
313 static uint64_t qlnxe_rdma_configuration = 0x22222222;
315 #if __FreeBSD_version < 1100000
317 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
319 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
320 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
324 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
325 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
327 #endif /* #if __FreeBSD_version < 1100000 */
330 qlnx_vf_device(qlnx_host_t *ha)
334 device_id = ha->device_id;
336 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
343 qlnx_valid_device(qlnx_host_t *ha)
347 device_id = ha->device_id;
350 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
351 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
352 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
353 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
354 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
357 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
360 #endif /* #ifndef QLNX_VF */
364 #ifdef QLNX_ENABLE_IWARP
366 qlnx_rdma_supported(struct qlnx_host *ha)
370 device_id = pci_get_device(ha->pci_dev);
372 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
373 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
374 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
375 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
380 #endif /* #ifdef QLNX_ENABLE_IWARP */
383 * Name: qlnx_pci_probe
384 * Function: Validate the PCI device to be a QLA80XX device
387 qlnx_pci_probe(device_t dev)
389 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
390 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
391 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
393 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
397 switch (pci_get_device(dev)) {
401 case QLOGIC_PCI_DEVICE_ID_1644:
402 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
403 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
404 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
406 device_set_desc_copy(dev, qlnx_dev_str);
410 case QLOGIC_PCI_DEVICE_ID_1634:
411 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
412 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
413 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
415 device_set_desc_copy(dev, qlnx_dev_str);
419 case QLOGIC_PCI_DEVICE_ID_1656:
420 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
421 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
422 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
424 device_set_desc_copy(dev, qlnx_dev_str);
428 case QLOGIC_PCI_DEVICE_ID_1654:
429 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
430 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
431 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
433 device_set_desc_copy(dev, qlnx_dev_str);
437 case QLOGIC_PCI_DEVICE_ID_8070:
438 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
439 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
440 " Adapter-Ethernet Function",
441 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
443 device_set_desc_copy(dev, qlnx_dev_str);
448 case QLOGIC_PCI_DEVICE_ID_8090:
449 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
450 "Qlogic SRIOV PCI CNA (AH) "
451 "Adapter-Ethernet Function",
452 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
454 device_set_desc_copy(dev, qlnx_dev_str);
458 #endif /* #ifndef QLNX_VF */
464 #ifdef QLNX_ENABLE_IWARP
466 #endif /* #ifdef QLNX_ENABLE_IWARP */
468 return (BUS_PROBE_DEFAULT);
472 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
473 struct qlnx_tx_queue *txq)
479 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
481 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
482 if (hw_bd_cons < ecore_cons_idx) {
483 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
485 diff = hw_bd_cons - ecore_cons_idx;
492 qlnx_sp_intr(void *arg)
494 struct ecore_hwfn *p_hwfn;
500 if (p_hwfn == NULL) {
501 printf("%s: spurious slowpath intr\n", __func__);
505 ha = (qlnx_host_t *)p_hwfn->p_dev;
507 QL_DPRINT2(ha, "enter\n");
509 for (i = 0; i < ha->cdev.num_hwfns; i++) {
510 if (&ha->cdev.hwfns[i] == p_hwfn) {
511 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
515 QL_DPRINT2(ha, "exit\n");
521 qlnx_sp_taskqueue(void *context, int pending)
523 struct ecore_hwfn *p_hwfn;
527 if (p_hwfn != NULL) {
534 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
539 for (i = 0; i < ha->cdev.num_hwfns; i++) {
541 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
543 bzero(tq_name, sizeof (tq_name));
544 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
546 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
548 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
549 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
551 if (ha->sp_taskqueue[i] == NULL)
554 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
557 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
564 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
568 for (i = 0; i < ha->cdev.num_hwfns; i++) {
569 if (ha->sp_taskqueue[i] != NULL) {
570 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
571 taskqueue_free(ha->sp_taskqueue[i]);
578 qlnx_fp_taskqueue(void *context, int pending)
580 struct qlnx_fastpath *fp;
589 ha = (qlnx_host_t *)fp->edev;
593 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
595 if (!drbr_empty(ifp, fp->tx_br)) {
597 if(mtx_trylock(&fp->tx_mtx)) {
599 #ifdef QLNX_TRACE_PERF_DATA
600 tx_pkts = fp->tx_pkts_transmitted;
601 tx_compl = fp->tx_pkts_completed;
604 qlnx_transmit_locked(ifp, fp, NULL);
606 #ifdef QLNX_TRACE_PERF_DATA
607 fp->tx_pkts_trans_fp +=
608 (fp->tx_pkts_transmitted - tx_pkts);
609 fp->tx_pkts_compl_fp +=
610 (fp->tx_pkts_completed - tx_compl);
612 mtx_unlock(&fp->tx_mtx);
617 QL_DPRINT2(ha, "exit \n");
622 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
626 struct qlnx_fastpath *fp;
628 for (i = 0; i < ha->num_rss; i++) {
630 fp = &ha->fp_array[i];
632 bzero(tq_name, sizeof (tq_name));
633 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
635 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
637 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
638 taskqueue_thread_enqueue,
641 if (fp->fp_taskqueue == NULL)
644 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
647 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
654 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
657 struct qlnx_fastpath *fp;
659 for (i = 0; i < ha->num_rss; i++) {
661 fp = &ha->fp_array[i];
663 if (fp->fp_taskqueue != NULL) {
665 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
666 taskqueue_free(fp->fp_taskqueue);
667 fp->fp_taskqueue = NULL;
674 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
677 struct qlnx_fastpath *fp;
679 for (i = 0; i < ha->num_rss; i++) {
680 fp = &ha->fp_array[i];
682 if (fp->fp_taskqueue != NULL) {
684 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
692 qlnx_get_params(qlnx_host_t *ha)
694 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
695 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
697 qlnxe_queue_count = 0;
703 qlnx_error_recovery_taskqueue(void *context, int pending)
709 QL_DPRINT2(ha, "enter\n");
715 #ifdef QLNX_ENABLE_IWARP
716 qlnx_rdma_dev_remove(ha);
717 #endif /* #ifdef QLNX_ENABLE_IWARP */
719 qlnx_slowpath_stop(ha);
720 qlnx_slowpath_start(ha);
722 #ifdef QLNX_ENABLE_IWARP
723 qlnx_rdma_dev_add(ha);
724 #endif /* #ifdef QLNX_ENABLE_IWARP */
728 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
730 QL_DPRINT2(ha, "exit\n");
736 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
740 bzero(tq_name, sizeof (tq_name));
741 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
743 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
745 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
746 taskqueue_thread_enqueue, &ha->err_taskqueue);
749 if (ha->err_taskqueue == NULL)
752 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
754 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
760 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
762 if (ha->err_taskqueue != NULL) {
763 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
764 taskqueue_free(ha->err_taskqueue);
767 ha->err_taskqueue = NULL;
773 * Name: qlnx_pci_attach
774 * Function: attaches the device to the operating system
777 qlnx_pci_attach(device_t dev)
779 qlnx_host_t *ha = NULL;
780 uint32_t rsrc_len_reg = 0;
781 uint32_t rsrc_len_dbells = 0;
782 uint32_t rsrc_len_msix = 0;
785 uint32_t num_sp_msix = 0;
786 uint32_t num_rdma_irqs = 0;
788 if ((ha = device_get_softc(dev)) == NULL) {
789 device_printf(dev, "cannot get softc\n");
793 memset(ha, 0, sizeof (qlnx_host_t));
795 ha->device_id = pci_get_device(dev);
797 if (qlnx_valid_device(ha) != 0) {
798 device_printf(dev, "device is not valid device\n");
801 ha->pci_func = pci_get_function(dev);
805 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
807 ha->flags.lock_init = 1;
809 pci_enable_busmaster(dev);
815 ha->reg_rid = PCIR_BAR(0);
816 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
819 if (ha->pci_reg == NULL) {
820 device_printf(dev, "unable to map BAR0\n");
821 goto qlnx_pci_attach_err;
824 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
827 ha->dbells_rid = PCIR_BAR(2);
828 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
831 if (rsrc_len_dbells) {
833 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
834 &ha->dbells_rid, RF_ACTIVE);
836 if (ha->pci_dbells == NULL) {
837 device_printf(dev, "unable to map BAR1\n");
838 goto qlnx_pci_attach_err;
840 ha->dbells_phys_addr = (uint64_t)
841 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
843 ha->dbells_size = rsrc_len_dbells;
845 if (qlnx_vf_device(ha) != 0) {
846 device_printf(dev, " BAR1 size is zero\n");
847 goto qlnx_pci_attach_err;
851 ha->msix_rid = PCIR_BAR(4);
852 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
853 &ha->msix_rid, RF_ACTIVE);
855 if (ha->msix_bar == NULL) {
856 device_printf(dev, "unable to map BAR2\n");
857 goto qlnx_pci_attach_err;
860 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
863 ha->dbg_level = 0x0000;
865 QL_DPRINT1(ha, "\n\t\t\t"
866 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
867 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
868 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
869 " msix_avail = 0x%x "
870 "\n\t\t\t[ncpus = %d]\n",
871 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
872 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
873 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
879 if (qlnx_alloc_parent_dma_tag(ha))
880 goto qlnx_pci_attach_err;
882 if (qlnx_alloc_tx_dma_tag(ha))
883 goto qlnx_pci_attach_err;
885 if (qlnx_alloc_rx_dma_tag(ha))
886 goto qlnx_pci_attach_err;
889 if (qlnx_init_hw(ha) != 0)
890 goto qlnx_pci_attach_err;
892 ha->flags.hw_init = 1;
896 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
897 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
898 qlnxe_queue_count = QLNX_MAX_RSS;
902 * Allocate MSI-x vectors
904 if (qlnx_vf_device(ha) != 0) {
906 if (qlnxe_queue_count == 0)
907 ha->num_rss = QLNX_DEFAULT_RSS;
909 ha->num_rss = qlnxe_queue_count;
911 num_sp_msix = ha->cdev.num_hwfns;
916 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
919 if (max_rxq < max_txq)
920 ha->num_rss = max_rxq;
922 ha->num_rss = max_txq;
924 if (ha->num_rss > QLNX_MAX_VF_RSS)
925 ha->num_rss = QLNX_MAX_VF_RSS;
930 if (ha->num_rss > mp_ncpus)
931 ha->num_rss = mp_ncpus;
933 ha->num_tc = QLNX_MAX_TC;
935 ha->msix_count = pci_msix_count(dev);
937 #ifdef QLNX_ENABLE_IWARP
939 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
941 #endif /* #ifdef QLNX_ENABLE_IWARP */
943 if (!ha->msix_count ||
944 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
945 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
947 goto qlnx_pci_attach_err;
950 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
951 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
953 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
955 QL_DPRINT1(ha, "\n\t\t\t"
956 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
957 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
958 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
959 " msix_avail = 0x%x msix_alloc = 0x%x"
960 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
961 ha->pci_reg, rsrc_len_reg,
962 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
963 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
964 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
966 if (pci_alloc_msix(dev, &ha->msix_count)) {
967 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
970 goto qlnx_pci_attach_err;
974 * Initialize slow path interrupt and task queue
979 if (qlnx_create_sp_taskqueues(ha) != 0)
980 goto qlnx_pci_attach_err;
982 for (i = 0; i < ha->cdev.num_hwfns; i++) {
984 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
986 ha->sp_irq_rid[i] = i + 1;
987 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
989 (RF_ACTIVE | RF_SHAREABLE));
990 if (ha->sp_irq[i] == NULL) {
992 "could not allocate mbx interrupt\n");
993 goto qlnx_pci_attach_err;
996 if (bus_setup_intr(dev, ha->sp_irq[i],
997 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
998 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
1000 "could not setup slow path interrupt\n");
1001 goto qlnx_pci_attach_err;
1004 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
1005 " sp_irq %p sp_handle %p\n", p_hwfn,
1006 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
1011 * initialize fast path interrupt
1013 if (qlnx_create_fp_taskqueues(ha) != 0)
1014 goto qlnx_pci_attach_err;
1016 for (i = 0; i < ha->num_rss; i++) {
1017 ha->irq_vec[i].rss_idx = i;
1018 ha->irq_vec[i].ha = ha;
1019 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
1021 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1022 &ha->irq_vec[i].irq_rid,
1023 (RF_ACTIVE | RF_SHAREABLE));
1025 if (ha->irq_vec[i].irq == NULL) {
1027 "could not allocate interrupt[%d] irq_rid = %d\n",
1028 i, ha->irq_vec[i].irq_rid);
1029 goto qlnx_pci_attach_err;
1032 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1033 device_printf(dev, "could not allocate tx_br[%d]\n", i);
1034 goto qlnx_pci_attach_err;
1040 if (qlnx_vf_device(ha) != 0) {
1042 callout_init(&ha->qlnx_callout, 1);
1043 ha->flags.callout_init = 1;
1045 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1047 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1048 goto qlnx_pci_attach_err;
1049 if (ha->grcdump_size[i] == 0)
1050 goto qlnx_pci_attach_err;
1052 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1053 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1054 i, ha->grcdump_size[i]);
1056 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1057 if (ha->grcdump[i] == NULL) {
1058 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1059 goto qlnx_pci_attach_err;
1062 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1063 goto qlnx_pci_attach_err;
1064 if (ha->idle_chk_size[i] == 0)
1065 goto qlnx_pci_attach_err;
1067 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1068 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1069 i, ha->idle_chk_size[i]);
1071 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1073 if (ha->idle_chk[i] == NULL) {
1074 device_printf(dev, "idle_chk alloc failed\n");
1075 goto qlnx_pci_attach_err;
1079 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1080 goto qlnx_pci_attach_err;
1083 if (qlnx_slowpath_start(ha) != 0)
1084 goto qlnx_pci_attach_err;
1086 ha->flags.slowpath_start = 1;
1088 if (qlnx_vf_device(ha) != 0) {
1089 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1090 qlnx_mdelay(__func__, 1000);
1091 qlnx_trigger_dump(ha);
1093 goto qlnx_pci_attach_err0;
1096 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1097 qlnx_mdelay(__func__, 1000);
1098 qlnx_trigger_dump(ha);
1100 goto qlnx_pci_attach_err0;
1103 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1104 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1107 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1108 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1109 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1110 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1111 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1112 FW_ENGINEERING_VERSION);
1114 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1115 ha->stormfw_ver, ha->mfw_ver);
1117 qlnx_init_ifnet(dev, ha);
1122 qlnx_add_sysctls(ha);
1124 qlnx_pci_attach_err0:
1126 * create ioctl device interface
1128 if (qlnx_vf_device(ha) != 0) {
1130 if (qlnx_make_cdev(ha)) {
1131 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1132 goto qlnx_pci_attach_err;
1135 #ifdef QLNX_ENABLE_IWARP
1136 qlnx_rdma_dev_add(ha);
1137 #endif /* #ifdef QLNX_ENABLE_IWARP */
1141 #ifdef CONFIG_ECORE_SRIOV
1143 if (qlnx_vf_device(ha) != 0)
1144 qlnx_initialize_sriov(ha);
1146 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1147 #endif /* #ifdef QLNX_VF */
1149 QL_DPRINT2(ha, "success\n");
1153 qlnx_pci_attach_err:
1161 * Name: qlnx_pci_detach
1162 * Function: Unhooks the device from the operating system
1165 qlnx_pci_detach(device_t dev)
1167 qlnx_host_t *ha = NULL;
1169 if ((ha = device_get_softc(dev)) == NULL) {
1170 device_printf(dev, "%s: cannot get softc\n", __func__);
1174 if (qlnx_vf_device(ha) != 0) {
1175 #ifdef CONFIG_ECORE_SRIOV
1178 ret = pci_iov_detach(dev);
1180 device_printf(dev, "%s: SRIOV in use\n", __func__);
1184 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1186 #ifdef QLNX_ENABLE_IWARP
1187 if (qlnx_rdma_dev_remove(ha) != 0)
1189 #endif /* #ifdef QLNX_ENABLE_IWARP */
1201 #ifdef QLNX_ENABLE_IWARP
1204 qlnx_get_personality(uint8_t pci_func)
1206 uint8_t personality;
1208 personality = (qlnxe_rdma_configuration >>
1209 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1210 QLNX_PERSONALIY_MASK;
1211 return (personality);
1215 qlnx_set_personality(qlnx_host_t *ha)
1217 struct ecore_hwfn *p_hwfn;
1218 uint8_t personality;
1220 p_hwfn = &ha->cdev.hwfns[0];
1222 personality = qlnx_get_personality(ha->pci_func);
1224 switch (personality) {
1226 case QLNX_PERSONALITY_DEFAULT:
1227 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1229 ha->personality = ECORE_PCI_DEFAULT;
1232 case QLNX_PERSONALITY_ETH_ONLY:
1233 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1235 ha->personality = ECORE_PCI_ETH;
1238 case QLNX_PERSONALITY_ETH_IWARP:
1239 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1241 ha->personality = ECORE_PCI_ETH_IWARP;
1244 case QLNX_PERSONALITY_ETH_ROCE:
1245 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1247 ha->personality = ECORE_PCI_ETH_ROCE;
1254 #endif /* #ifdef QLNX_ENABLE_IWARP */
1257 qlnx_init_hw(qlnx_host_t *ha)
1260 struct ecore_hw_prepare_params params;
1262 ecore_init_struct(&ha->cdev);
1264 /* ha->dp_module = ECORE_MSG_PROBE |
1270 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1271 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1272 ha->dp_level = ECORE_LEVEL_NOTICE;
1273 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1275 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1277 ha->cdev.regview = ha->pci_reg;
1279 ha->personality = ECORE_PCI_DEFAULT;
1281 if (qlnx_vf_device(ha) == 0) {
1282 ha->cdev.b_is_vf = true;
1284 if (ha->pci_dbells != NULL) {
1285 ha->cdev.doorbells = ha->pci_dbells;
1286 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1287 ha->cdev.db_size = ha->dbells_size;
1289 ha->pci_dbells = ha->pci_reg;
1292 ha->cdev.doorbells = ha->pci_dbells;
1293 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1294 ha->cdev.db_size = ha->dbells_size;
1296 #ifdef QLNX_ENABLE_IWARP
1298 if (qlnx_rdma_supported(ha) == 0)
1299 qlnx_set_personality(ha);
1301 #endif /* #ifdef QLNX_ENABLE_IWARP */
1304 QL_DPRINT2(ha, "%s: %s\n", __func__,
1305 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1307 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1309 params.personality = ha->personality;
1311 params.drv_resc_alloc = false;
1312 params.chk_reg_fifo = false;
1313 params.initiate_pf_flr = true;
1316 ecore_hw_prepare(&ha->cdev, ¶ms);
1318 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1320 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1321 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1327 qlnx_release(qlnx_host_t *ha)
1334 QL_DPRINT2(ha, "enter\n");
1336 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1337 if (ha->idle_chk[i] != NULL) {
1338 free(ha->idle_chk[i], M_QLNXBUF);
1339 ha->idle_chk[i] = NULL;
1342 if (ha->grcdump[i] != NULL) {
1343 free(ha->grcdump[i], M_QLNXBUF);
1344 ha->grcdump[i] = NULL;
1348 if (ha->flags.callout_init)
1349 callout_drain(&ha->qlnx_callout);
1351 if (ha->flags.slowpath_start) {
1352 qlnx_slowpath_stop(ha);
1355 if (ha->flags.hw_init)
1356 ecore_hw_remove(&ha->cdev);
1360 if (ha->ifp != NULL)
1361 ether_ifdetach(ha->ifp);
1363 qlnx_free_tx_dma_tag(ha);
1365 qlnx_free_rx_dma_tag(ha);
1367 qlnx_free_parent_dma_tag(ha);
1369 if (qlnx_vf_device(ha) != 0) {
1370 qlnx_destroy_error_recovery_taskqueue(ha);
1373 for (i = 0; i < ha->num_rss; i++) {
1374 struct qlnx_fastpath *fp = &ha->fp_array[i];
1376 if (ha->irq_vec[i].handle) {
1377 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1378 ha->irq_vec[i].handle);
1381 if (ha->irq_vec[i].irq) {
1382 (void)bus_release_resource(dev, SYS_RES_IRQ,
1383 ha->irq_vec[i].irq_rid,
1384 ha->irq_vec[i].irq);
1387 qlnx_free_tx_br(ha, fp);
1389 qlnx_destroy_fp_taskqueues(ha);
1391 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1392 if (ha->sp_handle[i])
1393 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1397 (void) bus_release_resource(dev, SYS_RES_IRQ,
1398 ha->sp_irq_rid[i], ha->sp_irq[i]);
1401 qlnx_destroy_sp_taskqueues(ha);
1404 pci_release_msi(dev);
1406 if (ha->flags.lock_init) {
1407 mtx_destroy(&ha->hw_lock);
1411 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1414 if (ha->dbells_size && ha->pci_dbells)
1415 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1419 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1422 QL_DPRINT2(ha, "exit\n");
1427 qlnx_trigger_dump(qlnx_host_t *ha)
1431 if (ha->ifp != NULL)
1432 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1434 QL_DPRINT2(ha, "enter\n");
1436 if (qlnx_vf_device(ha) == 0)
1439 ha->error_recovery = 1;
1441 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1442 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1443 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1446 QL_DPRINT2(ha, "exit\n");
1452 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1457 err = sysctl_handle_int(oidp, &ret, 0, req);
1459 if (err || !req->newptr)
1463 ha = (qlnx_host_t *)arg1;
1464 qlnx_trigger_dump(ha);
1470 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1472 int err, i, ret = 0, usecs = 0;
1474 struct ecore_hwfn *p_hwfn;
1475 struct qlnx_fastpath *fp;
1477 err = sysctl_handle_int(oidp, &usecs, 0, req);
1479 if (err || !req->newptr || !usecs || (usecs > 255))
1482 ha = (qlnx_host_t *)arg1;
1484 if (qlnx_vf_device(ha) == 0)
1487 for (i = 0; i < ha->num_rss; i++) {
1489 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1491 fp = &ha->fp_array[i];
1493 if (fp->txq[0]->handle != NULL) {
1494 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1495 (uint16_t)usecs, fp->txq[0]->handle);
1500 ha->tx_coalesce_usecs = (uint8_t)usecs;
1506 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1508 int err, i, ret = 0, usecs = 0;
1510 struct ecore_hwfn *p_hwfn;
1511 struct qlnx_fastpath *fp;
1513 err = sysctl_handle_int(oidp, &usecs, 0, req);
1515 if (err || !req->newptr || !usecs || (usecs > 255))
1518 ha = (qlnx_host_t *)arg1;
1520 if (qlnx_vf_device(ha) == 0)
1523 for (i = 0; i < ha->num_rss; i++) {
1525 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1527 fp = &ha->fp_array[i];
1529 if (fp->rxq->handle != NULL) {
1530 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1531 0, fp->rxq->handle);
1536 ha->rx_coalesce_usecs = (uint8_t)usecs;
1542 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1544 struct sysctl_ctx_list *ctx;
1545 struct sysctl_oid_list *children;
1546 struct sysctl_oid *ctx_oid;
1548 ctx = device_get_sysctl_ctx(ha->pci_dev);
1549 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1551 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1552 CTLFLAG_RD, NULL, "spstat");
1553 children = SYSCTL_CHILDREN(ctx_oid);
1555 SYSCTL_ADD_QUAD(ctx, children,
1556 OID_AUTO, "sp_interrupts",
1557 CTLFLAG_RD, &ha->sp_interrupts,
1558 "No. of slowpath interrupts");
1564 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1566 struct sysctl_ctx_list *ctx;
1567 struct sysctl_oid_list *children;
1568 struct sysctl_oid_list *node_children;
1569 struct sysctl_oid *ctx_oid;
1571 uint8_t name_str[16];
1573 ctx = device_get_sysctl_ctx(ha->pci_dev);
1574 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1576 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1577 CTLFLAG_RD, NULL, "fpstat");
1578 children = SYSCTL_CHILDREN(ctx_oid);
1580 for (i = 0; i < ha->num_rss; i++) {
1582 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1583 snprintf(name_str, sizeof(name_str), "%d", i);
1585 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1586 CTLFLAG_RD, NULL, name_str);
1587 node_children = SYSCTL_CHILDREN(ctx_oid);
1591 SYSCTL_ADD_QUAD(ctx, node_children,
1592 OID_AUTO, "tx_pkts_processed",
1593 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1594 "No. of packets processed for transmission");
1596 SYSCTL_ADD_QUAD(ctx, node_children,
1597 OID_AUTO, "tx_pkts_freed",
1598 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1599 "No. of freed packets");
1601 SYSCTL_ADD_QUAD(ctx, node_children,
1602 OID_AUTO, "tx_pkts_transmitted",
1603 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1604 "No. of transmitted packets");
1606 SYSCTL_ADD_QUAD(ctx, node_children,
1607 OID_AUTO, "tx_pkts_completed",
1608 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1609 "No. of transmit completions");
1611 SYSCTL_ADD_QUAD(ctx, node_children,
1612 OID_AUTO, "tx_non_tso_pkts",
1613 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1614 "No. of non LSO transmited packets");
1616 #ifdef QLNX_TRACE_PERF_DATA
1618 SYSCTL_ADD_QUAD(ctx, node_children,
1619 OID_AUTO, "tx_pkts_trans_ctx",
1620 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1621 "No. of transmitted packets in transmit context");
1623 SYSCTL_ADD_QUAD(ctx, node_children,
1624 OID_AUTO, "tx_pkts_compl_ctx",
1625 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1626 "No. of transmit completions in transmit context");
1628 SYSCTL_ADD_QUAD(ctx, node_children,
1629 OID_AUTO, "tx_pkts_trans_fp",
1630 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1631 "No. of transmitted packets in taskqueue");
1633 SYSCTL_ADD_QUAD(ctx, node_children,
1634 OID_AUTO, "tx_pkts_compl_fp",
1635 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1636 "No. of transmit completions in taskqueue");
1638 SYSCTL_ADD_QUAD(ctx, node_children,
1639 OID_AUTO, "tx_pkts_compl_intr",
1640 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1641 "No. of transmit completions in interrupt ctx");
1644 SYSCTL_ADD_QUAD(ctx, node_children,
1645 OID_AUTO, "tx_tso_pkts",
1646 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1647 "No. of LSO transmited packets");
1649 SYSCTL_ADD_QUAD(ctx, node_children,
1650 OID_AUTO, "tx_lso_wnd_min_len",
1651 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1652 "tx_lso_wnd_min_len");
1654 SYSCTL_ADD_QUAD(ctx, node_children,
1655 OID_AUTO, "tx_defrag",
1656 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1659 SYSCTL_ADD_QUAD(ctx, node_children,
1660 OID_AUTO, "tx_nsegs_gt_elem_left",
1661 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1662 "tx_nsegs_gt_elem_left");
1664 SYSCTL_ADD_UINT(ctx, node_children,
1665 OID_AUTO, "tx_tso_max_nsegs",
1666 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1667 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1669 SYSCTL_ADD_UINT(ctx, node_children,
1670 OID_AUTO, "tx_tso_min_nsegs",
1671 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1672 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1674 SYSCTL_ADD_UINT(ctx, node_children,
1675 OID_AUTO, "tx_tso_max_pkt_len",
1676 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1677 ha->fp_array[i].tx_tso_max_pkt_len,
1678 "tx_tso_max_pkt_len");
1680 SYSCTL_ADD_UINT(ctx, node_children,
1681 OID_AUTO, "tx_tso_min_pkt_len",
1682 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1683 ha->fp_array[i].tx_tso_min_pkt_len,
1684 "tx_tso_min_pkt_len");
1686 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1688 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1689 snprintf(name_str, sizeof(name_str),
1690 "tx_pkts_nseg_%02d", (j+1));
1692 SYSCTL_ADD_QUAD(ctx, node_children,
1693 OID_AUTO, name_str, CTLFLAG_RD,
1694 &ha->fp_array[i].tx_pkts[j], name_str);
1697 #ifdef QLNX_TRACE_PERF_DATA
1698 for (j = 0; j < 18; j++) {
1700 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1701 snprintf(name_str, sizeof(name_str),
1702 "tx_pkts_hist_%02d", (j+1));
1704 SYSCTL_ADD_QUAD(ctx, node_children,
1705 OID_AUTO, name_str, CTLFLAG_RD,
1706 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1708 for (j = 0; j < 5; j++) {
1710 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1711 snprintf(name_str, sizeof(name_str),
1712 "tx_comInt_%02d", (j+1));
1714 SYSCTL_ADD_QUAD(ctx, node_children,
1715 OID_AUTO, name_str, CTLFLAG_RD,
1716 &ha->fp_array[i].tx_comInt[j], name_str);
1718 for (j = 0; j < 18; j++) {
1720 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1721 snprintf(name_str, sizeof(name_str),
1722 "tx_pkts_q_%02d", (j+1));
1724 SYSCTL_ADD_QUAD(ctx, node_children,
1725 OID_AUTO, name_str, CTLFLAG_RD,
1726 &ha->fp_array[i].tx_pkts_q[j], name_str);
1730 SYSCTL_ADD_QUAD(ctx, node_children,
1731 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1732 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1733 "err_tx_nsegs_gt_elem_left");
1735 SYSCTL_ADD_QUAD(ctx, node_children,
1736 OID_AUTO, "err_tx_dmamap_create",
1737 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1738 "err_tx_dmamap_create");
1740 SYSCTL_ADD_QUAD(ctx, node_children,
1741 OID_AUTO, "err_tx_defrag_dmamap_load",
1742 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1743 "err_tx_defrag_dmamap_load");
1745 SYSCTL_ADD_QUAD(ctx, node_children,
1746 OID_AUTO, "err_tx_non_tso_max_seg",
1747 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1748 "err_tx_non_tso_max_seg");
1750 SYSCTL_ADD_QUAD(ctx, node_children,
1751 OID_AUTO, "err_tx_dmamap_load",
1752 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1753 "err_tx_dmamap_load");
1755 SYSCTL_ADD_QUAD(ctx, node_children,
1756 OID_AUTO, "err_tx_defrag",
1757 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1760 SYSCTL_ADD_QUAD(ctx, node_children,
1761 OID_AUTO, "err_tx_free_pkt_null",
1762 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1763 "err_tx_free_pkt_null");
1765 SYSCTL_ADD_QUAD(ctx, node_children,
1766 OID_AUTO, "err_tx_cons_idx_conflict",
1767 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1768 "err_tx_cons_idx_conflict");
1770 SYSCTL_ADD_QUAD(ctx, node_children,
1771 OID_AUTO, "lro_cnt_64",
1772 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1775 SYSCTL_ADD_QUAD(ctx, node_children,
1776 OID_AUTO, "lro_cnt_128",
1777 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1780 SYSCTL_ADD_QUAD(ctx, node_children,
1781 OID_AUTO, "lro_cnt_256",
1782 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1785 SYSCTL_ADD_QUAD(ctx, node_children,
1786 OID_AUTO, "lro_cnt_512",
1787 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1790 SYSCTL_ADD_QUAD(ctx, node_children,
1791 OID_AUTO, "lro_cnt_1024",
1792 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1797 SYSCTL_ADD_QUAD(ctx, node_children,
1798 OID_AUTO, "rx_pkts",
1799 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1800 "No. of received packets");
1802 SYSCTL_ADD_QUAD(ctx, node_children,
1803 OID_AUTO, "tpa_start",
1804 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1805 "No. of tpa_start packets");
1807 SYSCTL_ADD_QUAD(ctx, node_children,
1808 OID_AUTO, "tpa_cont",
1809 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1810 "No. of tpa_cont packets");
1812 SYSCTL_ADD_QUAD(ctx, node_children,
1813 OID_AUTO, "tpa_end",
1814 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1815 "No. of tpa_end packets");
1817 SYSCTL_ADD_QUAD(ctx, node_children,
1818 OID_AUTO, "err_m_getcl",
1819 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1822 SYSCTL_ADD_QUAD(ctx, node_children,
1823 OID_AUTO, "err_m_getjcl",
1824 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1827 SYSCTL_ADD_QUAD(ctx, node_children,
1828 OID_AUTO, "err_rx_hw_errors",
1829 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1830 "err_rx_hw_errors");
1832 SYSCTL_ADD_QUAD(ctx, node_children,
1833 OID_AUTO, "err_rx_alloc_errors",
1834 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1835 "err_rx_alloc_errors");
1842 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1844 struct sysctl_ctx_list *ctx;
1845 struct sysctl_oid_list *children;
1846 struct sysctl_oid *ctx_oid;
1848 ctx = device_get_sysctl_ctx(ha->pci_dev);
1849 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1851 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1852 CTLFLAG_RD, NULL, "hwstat");
1853 children = SYSCTL_CHILDREN(ctx_oid);
1855 SYSCTL_ADD_QUAD(ctx, children,
1856 OID_AUTO, "no_buff_discards",
1857 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1858 "No. of packets discarded due to lack of buffer");
1860 SYSCTL_ADD_QUAD(ctx, children,
1861 OID_AUTO, "packet_too_big_discard",
1862 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1863 "No. of packets discarded because packet was too big");
1865 SYSCTL_ADD_QUAD(ctx, children,
1866 OID_AUTO, "ttl0_discard",
1867 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1870 SYSCTL_ADD_QUAD(ctx, children,
1871 OID_AUTO, "rx_ucast_bytes",
1872 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1875 SYSCTL_ADD_QUAD(ctx, children,
1876 OID_AUTO, "rx_mcast_bytes",
1877 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1880 SYSCTL_ADD_QUAD(ctx, children,
1881 OID_AUTO, "rx_bcast_bytes",
1882 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1885 SYSCTL_ADD_QUAD(ctx, children,
1886 OID_AUTO, "rx_ucast_pkts",
1887 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1890 SYSCTL_ADD_QUAD(ctx, children,
1891 OID_AUTO, "rx_mcast_pkts",
1892 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1895 SYSCTL_ADD_QUAD(ctx, children,
1896 OID_AUTO, "rx_bcast_pkts",
1897 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1900 SYSCTL_ADD_QUAD(ctx, children,
1901 OID_AUTO, "mftag_filter_discards",
1902 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1903 "mftag_filter_discards");
1905 SYSCTL_ADD_QUAD(ctx, children,
1906 OID_AUTO, "mac_filter_discards",
1907 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1908 "mac_filter_discards");
1910 SYSCTL_ADD_QUAD(ctx, children,
1911 OID_AUTO, "tx_ucast_bytes",
1912 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1915 SYSCTL_ADD_QUAD(ctx, children,
1916 OID_AUTO, "tx_mcast_bytes",
1917 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1920 SYSCTL_ADD_QUAD(ctx, children,
1921 OID_AUTO, "tx_bcast_bytes",
1922 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1925 SYSCTL_ADD_QUAD(ctx, children,
1926 OID_AUTO, "tx_ucast_pkts",
1927 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1930 SYSCTL_ADD_QUAD(ctx, children,
1931 OID_AUTO, "tx_mcast_pkts",
1932 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1935 SYSCTL_ADD_QUAD(ctx, children,
1936 OID_AUTO, "tx_bcast_pkts",
1937 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1940 SYSCTL_ADD_QUAD(ctx, children,
1941 OID_AUTO, "tx_err_drop_pkts",
1942 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1943 "tx_err_drop_pkts");
1945 SYSCTL_ADD_QUAD(ctx, children,
1946 OID_AUTO, "tpa_coalesced_pkts",
1947 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1948 "tpa_coalesced_pkts");
1950 SYSCTL_ADD_QUAD(ctx, children,
1951 OID_AUTO, "tpa_coalesced_events",
1952 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1953 "tpa_coalesced_events");
1955 SYSCTL_ADD_QUAD(ctx, children,
1956 OID_AUTO, "tpa_aborts_num",
1957 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1960 SYSCTL_ADD_QUAD(ctx, children,
1961 OID_AUTO, "tpa_not_coalesced_pkts",
1962 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1963 "tpa_not_coalesced_pkts");
1965 SYSCTL_ADD_QUAD(ctx, children,
1966 OID_AUTO, "tpa_coalesced_bytes",
1967 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1968 "tpa_coalesced_bytes");
1970 SYSCTL_ADD_QUAD(ctx, children,
1971 OID_AUTO, "rx_64_byte_packets",
1972 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1973 "rx_64_byte_packets");
1975 SYSCTL_ADD_QUAD(ctx, children,
1976 OID_AUTO, "rx_65_to_127_byte_packets",
1977 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1978 "rx_65_to_127_byte_packets");
1980 SYSCTL_ADD_QUAD(ctx, children,
1981 OID_AUTO, "rx_128_to_255_byte_packets",
1982 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1983 "rx_128_to_255_byte_packets");
1985 SYSCTL_ADD_QUAD(ctx, children,
1986 OID_AUTO, "rx_256_to_511_byte_packets",
1987 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1988 "rx_256_to_511_byte_packets");
1990 SYSCTL_ADD_QUAD(ctx, children,
1991 OID_AUTO, "rx_512_to_1023_byte_packets",
1992 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1993 "rx_512_to_1023_byte_packets");
1995 SYSCTL_ADD_QUAD(ctx, children,
1996 OID_AUTO, "rx_1024_to_1518_byte_packets",
1997 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1998 "rx_1024_to_1518_byte_packets");
2000 SYSCTL_ADD_QUAD(ctx, children,
2001 OID_AUTO, "rx_1519_to_1522_byte_packets",
2002 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
2003 "rx_1519_to_1522_byte_packets");
2005 SYSCTL_ADD_QUAD(ctx, children,
2006 OID_AUTO, "rx_1523_to_2047_byte_packets",
2007 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
2008 "rx_1523_to_2047_byte_packets");
2010 SYSCTL_ADD_QUAD(ctx, children,
2011 OID_AUTO, "rx_2048_to_4095_byte_packets",
2012 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
2013 "rx_2048_to_4095_byte_packets");
2015 SYSCTL_ADD_QUAD(ctx, children,
2016 OID_AUTO, "rx_4096_to_9216_byte_packets",
2017 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
2018 "rx_4096_to_9216_byte_packets");
2020 SYSCTL_ADD_QUAD(ctx, children,
2021 OID_AUTO, "rx_9217_to_16383_byte_packets",
2022 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
2023 "rx_9217_to_16383_byte_packets");
2025 SYSCTL_ADD_QUAD(ctx, children,
2026 OID_AUTO, "rx_crc_errors",
2027 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
2030 SYSCTL_ADD_QUAD(ctx, children,
2031 OID_AUTO, "rx_mac_crtl_frames",
2032 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
2033 "rx_mac_crtl_frames");
2035 SYSCTL_ADD_QUAD(ctx, children,
2036 OID_AUTO, "rx_pause_frames",
2037 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
2040 SYSCTL_ADD_QUAD(ctx, children,
2041 OID_AUTO, "rx_pfc_frames",
2042 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2045 SYSCTL_ADD_QUAD(ctx, children,
2046 OID_AUTO, "rx_align_errors",
2047 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2050 SYSCTL_ADD_QUAD(ctx, children,
2051 OID_AUTO, "rx_carrier_errors",
2052 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2053 "rx_carrier_errors");
2055 SYSCTL_ADD_QUAD(ctx, children,
2056 OID_AUTO, "rx_oversize_packets",
2057 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2058 "rx_oversize_packets");
2060 SYSCTL_ADD_QUAD(ctx, children,
2061 OID_AUTO, "rx_jabbers",
2062 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2065 SYSCTL_ADD_QUAD(ctx, children,
2066 OID_AUTO, "rx_undersize_packets",
2067 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2068 "rx_undersize_packets");
2070 SYSCTL_ADD_QUAD(ctx, children,
2071 OID_AUTO, "rx_fragments",
2072 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2075 SYSCTL_ADD_QUAD(ctx, children,
2076 OID_AUTO, "tx_64_byte_packets",
2077 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2078 "tx_64_byte_packets");
2080 SYSCTL_ADD_QUAD(ctx, children,
2081 OID_AUTO, "tx_65_to_127_byte_packets",
2082 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2083 "tx_65_to_127_byte_packets");
2085 SYSCTL_ADD_QUAD(ctx, children,
2086 OID_AUTO, "tx_128_to_255_byte_packets",
2087 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2088 "tx_128_to_255_byte_packets");
2090 SYSCTL_ADD_QUAD(ctx, children,
2091 OID_AUTO, "tx_256_to_511_byte_packets",
2092 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2093 "tx_256_to_511_byte_packets");
2095 SYSCTL_ADD_QUAD(ctx, children,
2096 OID_AUTO, "tx_512_to_1023_byte_packets",
2097 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2098 "tx_512_to_1023_byte_packets");
2100 SYSCTL_ADD_QUAD(ctx, children,
2101 OID_AUTO, "tx_1024_to_1518_byte_packets",
2102 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2103 "tx_1024_to_1518_byte_packets");
2105 SYSCTL_ADD_QUAD(ctx, children,
2106 OID_AUTO, "tx_1519_to_2047_byte_packets",
2107 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2108 "tx_1519_to_2047_byte_packets");
2110 SYSCTL_ADD_QUAD(ctx, children,
2111 OID_AUTO, "tx_2048_to_4095_byte_packets",
2112 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2113 "tx_2048_to_4095_byte_packets");
2115 SYSCTL_ADD_QUAD(ctx, children,
2116 OID_AUTO, "tx_4096_to_9216_byte_packets",
2117 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2118 "tx_4096_to_9216_byte_packets");
2120 SYSCTL_ADD_QUAD(ctx, children,
2121 OID_AUTO, "tx_9217_to_16383_byte_packets",
2122 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2123 "tx_9217_to_16383_byte_packets");
2125 SYSCTL_ADD_QUAD(ctx, children,
2126 OID_AUTO, "tx_pause_frames",
2127 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2130 SYSCTL_ADD_QUAD(ctx, children,
2131 OID_AUTO, "tx_pfc_frames",
2132 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2135 SYSCTL_ADD_QUAD(ctx, children,
2136 OID_AUTO, "tx_lpi_entry_count",
2137 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2138 "tx_lpi_entry_count");
2140 SYSCTL_ADD_QUAD(ctx, children,
2141 OID_AUTO, "tx_total_collisions",
2142 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2143 "tx_total_collisions");
2145 SYSCTL_ADD_QUAD(ctx, children,
2146 OID_AUTO, "brb_truncates",
2147 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2150 SYSCTL_ADD_QUAD(ctx, children,
2151 OID_AUTO, "brb_discards",
2152 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2155 SYSCTL_ADD_QUAD(ctx, children,
2156 OID_AUTO, "rx_mac_bytes",
2157 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2160 SYSCTL_ADD_QUAD(ctx, children,
2161 OID_AUTO, "rx_mac_uc_packets",
2162 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2163 "rx_mac_uc_packets");
2165 SYSCTL_ADD_QUAD(ctx, children,
2166 OID_AUTO, "rx_mac_mc_packets",
2167 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2168 "rx_mac_mc_packets");
2170 SYSCTL_ADD_QUAD(ctx, children,
2171 OID_AUTO, "rx_mac_bc_packets",
2172 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2173 "rx_mac_bc_packets");
2175 SYSCTL_ADD_QUAD(ctx, children,
2176 OID_AUTO, "rx_mac_frames_ok",
2177 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2178 "rx_mac_frames_ok");
2180 SYSCTL_ADD_QUAD(ctx, children,
2181 OID_AUTO, "tx_mac_bytes",
2182 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2185 SYSCTL_ADD_QUAD(ctx, children,
2186 OID_AUTO, "tx_mac_uc_packets",
2187 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2188 "tx_mac_uc_packets");
2190 SYSCTL_ADD_QUAD(ctx, children,
2191 OID_AUTO, "tx_mac_mc_packets",
2192 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2193 "tx_mac_mc_packets");
2195 SYSCTL_ADD_QUAD(ctx, children,
2196 OID_AUTO, "tx_mac_bc_packets",
2197 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2198 "tx_mac_bc_packets");
2200 SYSCTL_ADD_QUAD(ctx, children,
2201 OID_AUTO, "tx_mac_ctrl_frames",
2202 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2203 "tx_mac_ctrl_frames");
2208 qlnx_add_sysctls(qlnx_host_t *ha)
2210 device_t dev = ha->pci_dev;
2211 struct sysctl_ctx_list *ctx;
2212 struct sysctl_oid_list *children;
2214 ctx = device_get_sysctl_ctx(dev);
2215 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2217 qlnx_add_fp_stats_sysctls(ha);
2218 qlnx_add_sp_stats_sysctls(ha);
2220 if (qlnx_vf_device(ha) != 0)
2221 qlnx_add_hw_stats_sysctls(ha);
2223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2224 CTLFLAG_RD, qlnx_ver_str, 0,
2227 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2228 CTLFLAG_RD, ha->stormfw_ver, 0,
2229 "STORM Firmware Version");
2231 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2232 CTLFLAG_RD, ha->mfw_ver, 0,
2233 "Management Firmware Version");
2235 SYSCTL_ADD_UINT(ctx, children,
2236 OID_AUTO, "personality", CTLFLAG_RD,
2237 &ha->personality, ha->personality,
2238 "\tpersonality = 0 => Ethernet Only\n"
2239 "\tpersonality = 3 => Ethernet and RoCE\n"
2240 "\tpersonality = 4 => Ethernet and iWARP\n"
2241 "\tpersonality = 6 => Default in Shared Memory\n");
2244 SYSCTL_ADD_UINT(ctx, children,
2245 OID_AUTO, "debug", CTLFLAG_RW,
2246 &ha->dbg_level, ha->dbg_level, "Debug Level");
2248 ha->dp_level = 0x01;
2249 SYSCTL_ADD_UINT(ctx, children,
2250 OID_AUTO, "dp_level", CTLFLAG_RW,
2251 &ha->dp_level, ha->dp_level, "DP Level");
2253 ha->dbg_trace_lro_cnt = 0;
2254 SYSCTL_ADD_UINT(ctx, children,
2255 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2256 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2257 "Trace LRO Counts");
2259 ha->dbg_trace_tso_pkt_len = 0;
2260 SYSCTL_ADD_UINT(ctx, children,
2261 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2262 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2263 "Trace TSO packet lengths");
2266 SYSCTL_ADD_UINT(ctx, children,
2267 OID_AUTO, "dp_module", CTLFLAG_RW,
2268 &ha->dp_module, ha->dp_module, "DP Module");
2272 SYSCTL_ADD_UINT(ctx, children,
2273 OID_AUTO, "err_inject", CTLFLAG_RW,
2274 &ha->err_inject, ha->err_inject, "Error Inject");
2276 ha->storm_stats_enable = 0;
2278 SYSCTL_ADD_UINT(ctx, children,
2279 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2280 &ha->storm_stats_enable, ha->storm_stats_enable,
2281 "Enable Storm Statistics Gathering");
2283 ha->storm_stats_index = 0;
2285 SYSCTL_ADD_UINT(ctx, children,
2286 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2287 &ha->storm_stats_index, ha->storm_stats_index,
2288 "Enable Storm Statistics Gathering Current Index");
2290 ha->grcdump_taken = 0;
2291 SYSCTL_ADD_UINT(ctx, children,
2292 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2293 &ha->grcdump_taken, ha->grcdump_taken,
2296 ha->idle_chk_taken = 0;
2297 SYSCTL_ADD_UINT(ctx, children,
2298 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2299 &ha->idle_chk_taken, ha->idle_chk_taken,
2302 SYSCTL_ADD_UINT(ctx, children,
2303 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2304 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2305 "rx_coalesce_usecs");
2307 SYSCTL_ADD_UINT(ctx, children,
2308 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2309 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2310 "tx_coalesce_usecs");
2312 SYSCTL_ADD_PROC(ctx, children,
2313 OID_AUTO, "trigger_dump", (CTLTYPE_INT | CTLFLAG_RW),
2315 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2317 SYSCTL_ADD_PROC(ctx, children,
2318 OID_AUTO, "set_rx_coalesce_usecs",
2319 (CTLTYPE_INT | CTLFLAG_RW),
2321 qlnx_set_rx_coalesce, "I",
2322 "rx interrupt coalesce period microseconds");
2324 SYSCTL_ADD_PROC(ctx, children,
2325 OID_AUTO, "set_tx_coalesce_usecs",
2326 (CTLTYPE_INT | CTLFLAG_RW),
2328 qlnx_set_tx_coalesce, "I",
2329 "tx interrupt coalesce period microseconds");
2331 ha->rx_pkt_threshold = 128;
2332 SYSCTL_ADD_UINT(ctx, children,
2333 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2334 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2335 "No. of Rx Pkts to process at a time");
2337 ha->rx_jumbo_buf_eq_mtu = 0;
2338 SYSCTL_ADD_UINT(ctx, children,
2339 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2340 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2341 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2342 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2344 SYSCTL_ADD_QUAD(ctx, children,
2345 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2346 &ha->err_illegal_intr, "err_illegal_intr");
2348 SYSCTL_ADD_QUAD(ctx, children,
2349 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2350 &ha->err_fp_null, "err_fp_null");
2352 SYSCTL_ADD_QUAD(ctx, children,
2353 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2354 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2360 /*****************************************************************************
2361 * Operating System Network Interface Functions
2362 *****************************************************************************/
2365 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2370 ifp = ha->ifp = if_alloc(IFT_ETHER);
2373 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2375 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2377 device_id = pci_get_device(ha->pci_dev);
2379 #if __FreeBSD_version >= 1000000
2381 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2382 ifp->if_baudrate = IF_Gbps(40);
2383 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2384 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2385 ifp->if_baudrate = IF_Gbps(25);
2386 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2387 ifp->if_baudrate = IF_Gbps(50);
2388 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2389 ifp->if_baudrate = IF_Gbps(100);
2391 ifp->if_capabilities = IFCAP_LINKSTATE;
2393 ifp->if_mtu = ETHERMTU;
2394 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2396 #endif /* #if __FreeBSD_version >= 1000000 */
2398 ifp->if_init = qlnx_init;
2400 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2401 ifp->if_ioctl = qlnx_ioctl;
2402 ifp->if_transmit = qlnx_transmit;
2403 ifp->if_qflush = qlnx_qflush;
2405 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2406 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2407 IFQ_SET_READY(&ifp->if_snd);
2409 #if __FreeBSD_version >= 1100036
2410 if_setgetcounterfn(ifp, qlnx_get_counter);
2413 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2415 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2417 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2418 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2419 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2424 ha->primary_mac[0] = 0x00;
2425 ha->primary_mac[1] = 0x0e;
2426 ha->primary_mac[2] = 0x1e;
2427 ha->primary_mac[3] = rnd & 0xFF;
2428 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2429 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2432 ether_ifattach(ifp, ha->primary_mac);
2433 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2435 ifp->if_capabilities = IFCAP_HWCSUM;
2436 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2438 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2439 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2440 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2441 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2442 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2443 ifp->if_capabilities |= IFCAP_TSO4;
2444 ifp->if_capabilities |= IFCAP_TSO6;
2445 ifp->if_capabilities |= IFCAP_LRO;
2447 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2448 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2449 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2450 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2453 ifp->if_capenable = ifp->if_capabilities;
2455 ifp->if_hwassist = CSUM_IP;
2456 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2457 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2458 ifp->if_hwassist |= CSUM_TSO;
2460 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2462 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2465 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2466 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2468 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2469 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2470 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2471 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2472 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2473 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2474 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2475 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2476 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2477 ifmedia_add(&ha->media,
2478 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2479 ifmedia_add(&ha->media,
2480 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2481 ifmedia_add(&ha->media,
2482 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2485 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2486 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2489 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2491 QL_DPRINT2(ha, "exit\n");
2497 qlnx_init_locked(qlnx_host_t *ha)
2499 struct ifnet *ifp = ha->ifp;
2501 QL_DPRINT1(ha, "Driver Initialization start \n");
2505 if (qlnx_load(ha) == 0) {
2507 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2508 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2510 #ifdef QLNX_ENABLE_IWARP
2511 if (qlnx_vf_device(ha) != 0) {
2512 qlnx_rdma_dev_open(ha);
2514 #endif /* #ifdef QLNX_ENABLE_IWARP */
2521 qlnx_init(void *arg)
2525 ha = (qlnx_host_t *)arg;
2527 QL_DPRINT2(ha, "enter\n");
2530 qlnx_init_locked(ha);
2533 QL_DPRINT2(ha, "exit\n");
2539 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2541 struct ecore_filter_mcast *mcast;
2542 struct ecore_dev *cdev;
2547 mcast = &ha->ecore_mcast;
2548 bzero(mcast, sizeof(struct ecore_filter_mcast));
2551 mcast->opcode = ECORE_FILTER_ADD;
2553 mcast->opcode = ECORE_FILTER_REMOVE;
2555 mcast->num_mc_addrs = 1;
2556 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2558 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2564 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2568 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2570 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2571 return 0; /* its been already added */
2574 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2576 if ((ha->mcast[i].addr[0] == 0) &&
2577 (ha->mcast[i].addr[1] == 0) &&
2578 (ha->mcast[i].addr[2] == 0) &&
2579 (ha->mcast[i].addr[3] == 0) &&
2580 (ha->mcast[i].addr[4] == 0) &&
2581 (ha->mcast[i].addr[5] == 0)) {
2583 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2586 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2596 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2600 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2601 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2603 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2606 ha->mcast[i].addr[0] = 0;
2607 ha->mcast[i].addr[1] = 0;
2608 ha->mcast[i].addr[2] = 0;
2609 ha->mcast[i].addr[3] = 0;
2610 ha->mcast[i].addr[4] = 0;
2611 ha->mcast[i].addr[5] = 0;
2622 * Name: qls_hw_set_multi
2623 * Function: Sets the Multicast Addresses provided the host O.S into the
2624 * hardware (for the given interface)
2627 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2632 for (i = 0; i < mcnt; i++) {
2634 if (qlnx_hw_add_mcast(ha, mta))
2637 if (qlnx_hw_del_mcast(ha, mta))
2641 mta += ETHER_HDR_LEN;
2647 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2649 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2651 uint8_t mta[QLNX_MCAST_ADDRS_SIZE];
2652 struct ifmultiaddr *ifma;
2654 struct ifnet *ifp = ha->ifp;
2657 if (qlnx_vf_device(ha) == 0)
2660 if_maddr_rlock(ifp);
2662 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2664 if (ifma->ifma_addr->sa_family != AF_LINK)
2667 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2670 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2671 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2676 if_maddr_runlock(ifp);
2679 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2686 qlnx_set_promisc(qlnx_host_t *ha)
2691 if (qlnx_vf_device(ha) == 0)
2694 filter = ha->filter;
2695 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2696 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2698 rc = qlnx_set_rx_accept_filter(ha, filter);
2703 qlnx_set_allmulti(qlnx_host_t *ha)
2708 if (qlnx_vf_device(ha) == 0)
2711 filter = ha->filter;
2712 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2713 rc = qlnx_set_rx_accept_filter(ha, filter);
2720 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2723 struct ifreq *ifr = (struct ifreq *)data;
2724 struct ifaddr *ifa = (struct ifaddr *)data;
2727 ha = (qlnx_host_t *)ifp->if_softc;
2731 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2733 if (ifa->ifa_addr->sa_family == AF_INET) {
2734 ifp->if_flags |= IFF_UP;
2735 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2737 qlnx_init_locked(ha);
2740 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2741 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2743 arp_ifinit(ifp, ifa);
2745 ether_ioctl(ifp, cmd, data);
2750 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2752 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2756 ifp->if_mtu = ifr->ifr_mtu;
2757 ha->max_frame_size =
2758 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2760 qlnx_init_locked(ha);
2769 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2773 if (ifp->if_flags & IFF_UP) {
2774 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2775 if ((ifp->if_flags ^ ha->if_flags) &
2777 ret = qlnx_set_promisc(ha);
2778 } else if ((ifp->if_flags ^ ha->if_flags) &
2780 ret = qlnx_set_allmulti(ha);
2783 ha->max_frame_size = ifp->if_mtu +
2784 ETHER_HDR_LEN + ETHER_CRC_LEN;
2785 qlnx_init_locked(ha);
2788 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2790 ha->if_flags = ifp->if_flags;
2797 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2799 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2800 if (qlnx_set_multi(ha, 1))
2806 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2808 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2809 if (qlnx_set_multi(ha, 0))
2816 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2818 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2823 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2825 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2827 if (mask & IFCAP_HWCSUM)
2828 ifp->if_capenable ^= IFCAP_HWCSUM;
2829 if (mask & IFCAP_TSO4)
2830 ifp->if_capenable ^= IFCAP_TSO4;
2831 if (mask & IFCAP_TSO6)
2832 ifp->if_capenable ^= IFCAP_TSO6;
2833 if (mask & IFCAP_VLAN_HWTAGGING)
2834 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2835 if (mask & IFCAP_VLAN_HWTSO)
2836 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2837 if (mask & IFCAP_LRO)
2838 ifp->if_capenable ^= IFCAP_LRO;
2842 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2843 qlnx_init_locked(ha);
2847 VLAN_CAPABILITIES(ifp);
2850 #if (__FreeBSD_version >= 1100101)
2854 struct ifi2creq i2c;
2855 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2856 struct ecore_ptt *p_ptt;
2858 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2863 if ((i2c.len > sizeof (i2c.data)) ||
2864 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2869 p_ptt = ecore_ptt_acquire(p_hwfn);
2872 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2877 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2878 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2879 i2c.len, &i2c.data[0]);
2881 ecore_ptt_release(p_hwfn, p_ptt);
2888 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2890 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2891 len = %d addr = 0x%02x offset = 0x%04x \
2892 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2893 0x%02x 0x%02x 0x%02x\n",
2894 ret, i2c.len, i2c.dev_addr, i2c.offset,
2895 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2896 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2899 #endif /* #if (__FreeBSD_version >= 1100101) */
2902 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2903 ret = ether_ioctl(ifp, cmd, data);
2911 qlnx_media_change(struct ifnet *ifp)
2914 struct ifmedia *ifm;
2917 ha = (qlnx_host_t *)ifp->if_softc;
2919 QL_DPRINT2(ha, "enter\n");
2923 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2926 QL_DPRINT2(ha, "exit\n");
2932 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2936 ha = (qlnx_host_t *)ifp->if_softc;
2938 QL_DPRINT2(ha, "enter\n");
2940 ifmr->ifm_status = IFM_AVALID;
2941 ifmr->ifm_active = IFM_ETHER;
2944 ifmr->ifm_status |= IFM_ACTIVE;
2946 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2948 if (ha->if_link.link_partner_caps &
2949 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2951 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2954 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2961 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2962 struct qlnx_tx_queue *txq)
2968 struct eth_tx_bd *tx_data_bd;
2969 struct eth_tx_1st_bd *first_bd;
2972 idx = txq->sw_tx_cons;
2973 mp = txq->sw_tx_ring[idx].mp;
2974 map = txq->sw_tx_ring[idx].map;
2976 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2978 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2980 QL_DPRINT1(ha, "(mp == NULL) "
2982 " ecore_prod_idx = 0x%x"
2983 " ecore_cons_idx = 0x%x"
2984 " hw_bd_cons = 0x%x"
2985 " txq_db_last = 0x%x"
2986 " elem_left = 0x%x\n",
2988 ecore_chain_get_prod_idx(&txq->tx_pbl),
2989 ecore_chain_get_cons_idx(&txq->tx_pbl),
2990 le16toh(*txq->hw_cons_ptr),
2992 ecore_chain_get_elem_left(&txq->tx_pbl));
2994 fp->err_tx_free_pkt_null++;
2997 qlnx_trigger_dump(ha);
3002 QLNX_INC_OPACKETS((ha->ifp));
3003 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
3005 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
3006 bus_dmamap_unload(ha->tx_tag, map);
3008 fp->tx_pkts_freed++;
3009 fp->tx_pkts_completed++;
3014 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
3015 nbds = first_bd->data.nbds;
3017 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
3019 for (i = 1; i < nbds; i++) {
3020 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
3021 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
3023 txq->sw_tx_ring[idx].flags = 0;
3024 txq->sw_tx_ring[idx].mp = NULL;
3025 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
3031 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3032 struct qlnx_tx_queue *txq)
3039 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
3041 while (hw_bd_cons !=
3042 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
3044 if (hw_bd_cons < ecore_cons_idx) {
3045 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
3047 diff = hw_bd_cons - ecore_cons_idx;
3049 if ((diff > TX_RING_SIZE) ||
3050 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
3052 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
3054 QL_DPRINT1(ha, "(diff = 0x%x) "
3056 " ecore_prod_idx = 0x%x"
3057 " ecore_cons_idx = 0x%x"
3058 " hw_bd_cons = 0x%x"
3059 " txq_db_last = 0x%x"
3060 " elem_left = 0x%x\n",
3063 ecore_chain_get_prod_idx(&txq->tx_pbl),
3064 ecore_chain_get_cons_idx(&txq->tx_pbl),
3065 le16toh(*txq->hw_cons_ptr),
3067 ecore_chain_get_elem_left(&txq->tx_pbl));
3069 fp->err_tx_cons_idx_conflict++;
3072 qlnx_trigger_dump(ha);
3075 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3076 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3077 prefetch(txq->sw_tx_ring[idx].mp);
3078 prefetch(txq->sw_tx_ring[idx2].mp);
3080 qlnx_free_tx_pkt(ha, fp, txq);
3082 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3088 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
3091 struct qlnx_tx_queue *txq;
3096 ha = (qlnx_host_t *)fp->edev;
3099 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3101 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3106 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3108 mp = drbr_peek(ifp, fp->tx_br);
3110 while (mp != NULL) {
3112 if (qlnx_send(ha, fp, &mp)) {
3115 drbr_putback(ifp, fp->tx_br, mp);
3117 fp->tx_pkts_processed++;
3118 drbr_advance(ifp, fp->tx_br);
3120 goto qlnx_transmit_locked_exit;
3123 drbr_advance(ifp, fp->tx_br);
3124 fp->tx_pkts_transmitted++;
3125 fp->tx_pkts_processed++;
3128 mp = drbr_peek(ifp, fp->tx_br);
3131 qlnx_transmit_locked_exit:
3132 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3133 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3134 < QLNX_TX_ELEM_MAX_THRESH))
3135 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3137 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3143 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
3145 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
3146 struct qlnx_fastpath *fp;
3147 int rss_id = 0, ret = 0;
3149 #ifdef QLNX_TRACEPERF_DATA
3150 uint64_t tx_pkts = 0, tx_compl = 0;
3153 QL_DPRINT2(ha, "enter\n");
3155 #if __FreeBSD_version >= 1100000
3156 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3158 if (mp->m_flags & M_FLOWID)
3160 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3163 fp = &ha->fp_array[rss_id];
3165 if (fp->tx_br == NULL) {
3167 goto qlnx_transmit_exit;
3170 if (mtx_trylock(&fp->tx_mtx)) {
3172 #ifdef QLNX_TRACEPERF_DATA
3173 tx_pkts = fp->tx_pkts_transmitted;
3174 tx_compl = fp->tx_pkts_completed;
3177 ret = qlnx_transmit_locked(ifp, fp, mp);
3179 #ifdef QLNX_TRACEPERF_DATA
3180 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3181 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3183 mtx_unlock(&fp->tx_mtx);
3185 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3186 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3187 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3193 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3198 qlnx_qflush(struct ifnet *ifp)
3201 struct qlnx_fastpath *fp;
3205 ha = (qlnx_host_t *)ifp->if_softc;
3207 QL_DPRINT2(ha, "enter\n");
3209 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3211 fp = &ha->fp_array[rss_id];
3217 mtx_lock(&fp->tx_mtx);
3219 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3220 fp->tx_pkts_freed++;
3223 mtx_unlock(&fp->tx_mtx);
3226 QL_DPRINT2(ha, "exit\n");
3232 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3234 struct ecore_dev *cdev;
3239 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3241 bus_write_4(ha->pci_dbells, offset, value);
3242 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3243 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3249 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3251 struct ether_vlan_header *eh = NULL;
3252 struct ip *ip = NULL;
3253 struct ip6_hdr *ip6 = NULL;
3254 struct tcphdr *th = NULL;
3255 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3258 uint8_t buf[sizeof(struct ip6_hdr)];
3262 eh = mtod(mp, struct ether_vlan_header *);
3264 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3265 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3266 etype = ntohs(eh->evl_proto);
3268 ehdrlen = ETHER_HDR_LEN;
3269 etype = ntohs(eh->evl_encap_proto);
3275 ip = (struct ip *)(mp->m_data + ehdrlen);
3277 ip_hlen = sizeof (struct ip);
3279 if (mp->m_len < (ehdrlen + ip_hlen)) {
3280 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3281 ip = (struct ip *)buf;
3284 th = (struct tcphdr *)(ip + 1);
3285 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3288 case ETHERTYPE_IPV6:
3289 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3291 ip_hlen = sizeof(struct ip6_hdr);
3293 if (mp->m_len < (ehdrlen + ip_hlen)) {
3294 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3296 ip6 = (struct ip6_hdr *)buf;
3298 th = (struct tcphdr *)(ip6 + 1);
3299 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3310 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3314 uint32_t sum, nbds_in_hdr = 1;
3316 bus_dma_segment_t *s_seg;
3318 /* If the header spans mulitple segments, skip those segments */
3320 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3325 while ((i < nsegs) && (offset >= segs->ds_len)) {
3326 offset = offset - segs->ds_len;
3332 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3336 while (nsegs >= window) {
3341 for (i = 0; i < window; i++){
3342 sum += s_seg->ds_len;
3346 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3347 fp->tx_lso_wnd_min_len++;
3359 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3361 bus_dma_segment_t *segs;
3362 bus_dmamap_t map = 0;
3365 struct mbuf *m_head = *m_headp;
3370 struct qlnx_tx_queue *txq;
3372 struct eth_tx_1st_bd *first_bd;
3373 struct eth_tx_2nd_bd *second_bd;
3374 struct eth_tx_3rd_bd *third_bd;
3375 struct eth_tx_bd *tx_data_bd;
3378 uint32_t nbds_in_hdr = 0;
3379 uint32_t offset = 0;
3381 #ifdef QLNX_TRACE_PERF_DATA
3385 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3397 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3398 QLNX_TX_ELEM_MIN_THRESH) {
3400 fp->tx_nsegs_gt_elem_left++;
3401 fp->err_tx_nsegs_gt_elem_left++;
3406 idx = txq->sw_tx_prod;
3408 map = txq->sw_tx_ring[idx].map;
3411 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3414 if (ha->dbg_trace_tso_pkt_len) {
3415 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3416 if (!fp->tx_tso_min_pkt_len) {
3417 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3418 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3420 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3421 fp->tx_tso_min_pkt_len =
3422 m_head->m_pkthdr.len;
3423 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3424 fp->tx_tso_max_pkt_len =
3425 m_head->m_pkthdr.len;
3430 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3431 offset = qlnx_tcp_offset(ha, m_head);
3433 if ((ret == EFBIG) ||
3434 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3435 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3436 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3437 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3441 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3445 m = m_defrag(m_head, M_NOWAIT);
3447 fp->err_tx_defrag++;
3448 fp->tx_pkts_freed++;
3451 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3458 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3459 segs, &nsegs, BUS_DMA_NOWAIT))) {
3461 fp->err_tx_defrag_dmamap_load++;
3464 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3465 ret, m_head->m_pkthdr.len);
3467 fp->tx_pkts_freed++;
3474 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3475 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3477 fp->err_tx_non_tso_max_seg++;
3480 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3481 ret, nsegs, m_head->m_pkthdr.len);
3483 fp->tx_pkts_freed++;
3489 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3490 offset = qlnx_tcp_offset(ha, m_head);
3494 fp->err_tx_dmamap_load++;
3496 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3497 ret, m_head->m_pkthdr.len);
3498 fp->tx_pkts_freed++;
3504 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3506 if (ha->dbg_trace_tso_pkt_len) {
3507 if (nsegs < QLNX_FP_MAX_SEGS)
3508 fp->tx_pkts[(nsegs - 1)]++;
3510 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3513 #ifdef QLNX_TRACE_PERF_DATA
3514 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3515 if(m_head->m_pkthdr.len <= 2048)
3516 fp->tx_pkts_hist[0]++;
3517 else if((m_head->m_pkthdr.len > 2048) &&
3518 (m_head->m_pkthdr.len <= 4096))
3519 fp->tx_pkts_hist[1]++;
3520 else if((m_head->m_pkthdr.len > 4096) &&
3521 (m_head->m_pkthdr.len <= 8192))
3522 fp->tx_pkts_hist[2]++;
3523 else if((m_head->m_pkthdr.len > 8192) &&
3524 (m_head->m_pkthdr.len <= 12288 ))
3525 fp->tx_pkts_hist[3]++;
3526 else if((m_head->m_pkthdr.len > 11288) &&
3527 (m_head->m_pkthdr.len <= 16394))
3528 fp->tx_pkts_hist[4]++;
3529 else if((m_head->m_pkthdr.len > 16384) &&
3530 (m_head->m_pkthdr.len <= 20480))
3531 fp->tx_pkts_hist[5]++;
3532 else if((m_head->m_pkthdr.len > 20480) &&
3533 (m_head->m_pkthdr.len <= 24576))
3534 fp->tx_pkts_hist[6]++;
3535 else if((m_head->m_pkthdr.len > 24576) &&
3536 (m_head->m_pkthdr.len <= 28672))
3537 fp->tx_pkts_hist[7]++;
3538 else if((m_head->m_pkthdr.len > 28762) &&
3539 (m_head->m_pkthdr.len <= 32768))
3540 fp->tx_pkts_hist[8]++;
3541 else if((m_head->m_pkthdr.len > 32768) &&
3542 (m_head->m_pkthdr.len <= 36864))
3543 fp->tx_pkts_hist[9]++;
3544 else if((m_head->m_pkthdr.len > 36864) &&
3545 (m_head->m_pkthdr.len <= 40960))
3546 fp->tx_pkts_hist[10]++;
3547 else if((m_head->m_pkthdr.len > 40960) &&
3548 (m_head->m_pkthdr.len <= 45056))
3549 fp->tx_pkts_hist[11]++;
3550 else if((m_head->m_pkthdr.len > 45056) &&
3551 (m_head->m_pkthdr.len <= 49152))
3552 fp->tx_pkts_hist[12]++;
3553 else if((m_head->m_pkthdr.len > 49512) &&
3554 m_head->m_pkthdr.len <= 53248))
3555 fp->tx_pkts_hist[13]++;
3556 else if((m_head->m_pkthdr.len > 53248) &&
3557 (m_head->m_pkthdr.len <= 57344))
3558 fp->tx_pkts_hist[14]++;
3559 else if((m_head->m_pkthdr.len > 53248) &&
3560 (m_head->m_pkthdr.len <= 57344))
3561 fp->tx_pkts_hist[15]++;
3562 else if((m_head->m_pkthdr.len > 57344) &&
3563 (m_head->m_pkthdr.len <= 61440))
3564 fp->tx_pkts_hist[16]++;
3566 fp->tx_pkts_hist[17]++;
3569 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3571 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3572 bd_used = TX_RING_SIZE - elem_left;
3576 else if((bd_used > 100) && (bd_used <= 500))
3578 else if((bd_used > 500) && (bd_used <= 1000))
3580 else if((bd_used > 1000) && (bd_used <= 2000))
3582 else if((bd_used > 3000) && (bd_used <= 4000))
3584 else if((bd_used > 4000) && (bd_used <= 5000))
3586 else if((bd_used > 6000) && (bd_used <= 7000))
3588 else if((bd_used > 7000) && (bd_used <= 8000))
3590 else if((bd_used > 8000) && (bd_used <= 9000))
3592 else if((bd_used > 9000) && (bd_used <= 10000))
3594 else if((bd_used > 10000) && (bd_used <= 11000))
3595 fp->tx_pkts_q[10]++;
3596 else if((bd_used > 11000) && (bd_used <= 12000))
3597 fp->tx_pkts_q[11]++;
3598 else if((bd_used > 12000) && (bd_used <= 13000))
3599 fp->tx_pkts_q[12]++;
3600 else if((bd_used > 13000) && (bd_used <= 14000))
3601 fp->tx_pkts_q[13]++;
3602 else if((bd_used > 14000) && (bd_used <= 15000))
3603 fp->tx_pkts_q[14]++;
3604 else if((bd_used > 15000) && (bd_used <= 16000))
3605 fp->tx_pkts_q[15]++;
3607 fp->tx_pkts_q[16]++;
3610 #endif /* end of QLNX_TRACE_PERF_DATA */
3612 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3613 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3615 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3616 " in chain[%d] trying to free packets\n",
3617 nsegs, elem_left, fp->rss_id);
3619 fp->tx_nsegs_gt_elem_left++;
3621 (void)qlnx_tx_int(ha, fp, txq);
3623 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3624 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3627 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3628 nsegs, elem_left, fp->rss_id);
3630 fp->err_tx_nsegs_gt_elem_left++;
3631 fp->tx_ring_full = 1;
3632 if (ha->storm_stats_enable)
3633 ha->storm_stats_gather = 1;
3638 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3640 txq->sw_tx_ring[idx].mp = m_head;
3642 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3644 memset(first_bd, 0, sizeof(*first_bd));
3646 first_bd->data.bd_flags.bitfields =
3647 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3649 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3653 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3654 first_bd->data.bd_flags.bitfields |=
3655 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3658 if (m_head->m_pkthdr.csum_flags &
3659 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3660 first_bd->data.bd_flags.bitfields |=
3661 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3664 if (m_head->m_flags & M_VLANTAG) {
3665 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3666 first_bd->data.bd_flags.bitfields |=
3667 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3670 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3672 first_bd->data.bd_flags.bitfields |=
3673 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3674 first_bd->data.bd_flags.bitfields |=
3675 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3679 if (offset == segs->ds_len) {
3680 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3684 second_bd = (struct eth_tx_2nd_bd *)
3685 ecore_chain_produce(&txq->tx_pbl);
3686 memset(second_bd, 0, sizeof(*second_bd));
3689 if (seg_idx < nsegs) {
3690 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3691 (segs->ds_addr), (segs->ds_len));
3696 third_bd = (struct eth_tx_3rd_bd *)
3697 ecore_chain_produce(&txq->tx_pbl);
3698 memset(third_bd, 0, sizeof(*third_bd));
3699 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3700 third_bd->data.bitfields |=
3701 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3704 if (seg_idx < nsegs) {
3705 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3706 (segs->ds_addr), (segs->ds_len));
3711 for (; seg_idx < nsegs; seg_idx++) {
3712 tx_data_bd = (struct eth_tx_bd *)
3713 ecore_chain_produce(&txq->tx_pbl);
3714 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3715 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3722 } else if (offset < segs->ds_len) {
3723 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3725 second_bd = (struct eth_tx_2nd_bd *)
3726 ecore_chain_produce(&txq->tx_pbl);
3727 memset(second_bd, 0, sizeof(*second_bd));
3728 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3729 (segs->ds_addr + offset),\
3730 (segs->ds_len - offset));
3734 third_bd = (struct eth_tx_3rd_bd *)
3735 ecore_chain_produce(&txq->tx_pbl);
3736 memset(third_bd, 0, sizeof(*third_bd));
3738 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3741 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3742 third_bd->data.bitfields |=
3743 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3747 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3748 tx_data_bd = (struct eth_tx_bd *)
3749 ecore_chain_produce(&txq->tx_pbl);
3750 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3751 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3759 offset = offset - segs->ds_len;
3762 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3767 tx_data_bd = (struct eth_tx_bd *)
3768 ecore_chain_produce(&txq->tx_pbl);
3769 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3771 if (second_bd == NULL) {
3772 second_bd = (struct eth_tx_2nd_bd *)
3774 } else if (third_bd == NULL) {
3775 third_bd = (struct eth_tx_3rd_bd *)
3779 if (offset && (offset < segs->ds_len)) {
3780 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3781 segs->ds_addr, offset);
3783 tx_data_bd = (struct eth_tx_bd *)
3784 ecore_chain_produce(&txq->tx_pbl);
3786 memset(tx_data_bd, 0,
3787 sizeof(*tx_data_bd));
3789 if (second_bd == NULL) {
3791 (struct eth_tx_2nd_bd *)tx_data_bd;
3792 } else if (third_bd == NULL) {
3794 (struct eth_tx_3rd_bd *)tx_data_bd;
3796 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3797 (segs->ds_addr + offset), \
3798 (segs->ds_len - offset));
3803 offset = offset - segs->ds_len;
3804 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3805 segs->ds_addr, segs->ds_len);
3811 if (third_bd == NULL) {
3812 third_bd = (struct eth_tx_3rd_bd *)
3813 ecore_chain_produce(&txq->tx_pbl);
3814 memset(third_bd, 0, sizeof(*third_bd));
3817 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3818 third_bd->data.bitfields |=
3819 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3824 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3825 tx_data_bd = (struct eth_tx_bd *)
3826 ecore_chain_produce(&txq->tx_pbl);
3827 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3828 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3833 first_bd->data.bitfields =
3834 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3835 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3836 first_bd->data.bitfields =
3837 htole16(first_bd->data.bitfields);
3838 fp->tx_non_tso_pkts++;
3842 first_bd->data.nbds = nbd;
3844 if (ha->dbg_trace_tso_pkt_len) {
3845 if (fp->tx_tso_max_nsegs < nsegs)
3846 fp->tx_tso_max_nsegs = nsegs;
3848 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3849 fp->tx_tso_min_nsegs = nsegs;
3852 txq->sw_tx_ring[idx].nsegs = nsegs;
3853 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3855 txq->tx_db.data.bd_prod =
3856 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3858 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3860 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3865 qlnx_stop(qlnx_host_t *ha)
3867 struct ifnet *ifp = ha->ifp;
3873 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3876 * We simply lock and unlock each fp->tx_mtx to
3877 * propagate the if_drv_flags
3878 * state to each tx thread
3880 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3882 if (ha->state == QLNX_STATE_OPEN) {
3883 for (i = 0; i < ha->num_rss; i++) {
3884 struct qlnx_fastpath *fp = &ha->fp_array[i];
3886 mtx_lock(&fp->tx_mtx);
3887 mtx_unlock(&fp->tx_mtx);
3889 if (fp->fp_taskqueue != NULL)
3890 taskqueue_enqueue(fp->fp_taskqueue,
3894 #ifdef QLNX_ENABLE_IWARP
3895 if (qlnx_vf_device(ha) != 0) {
3896 qlnx_rdma_dev_close(ha);
3898 #endif /* #ifdef QLNX_ENABLE_IWARP */
3906 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3908 return(TX_RING_SIZE - 1);
3912 qlnx_get_mac_addr(qlnx_host_t *ha)
3914 struct ecore_hwfn *p_hwfn;
3915 unsigned char mac[ETHER_ADDR_LEN];
3916 uint8_t p_is_forced;
3918 p_hwfn = &ha->cdev.hwfns[0];
3920 if (qlnx_vf_device(ha) != 0)
3921 return (p_hwfn->hw_info.hw_mac_addr);
3923 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3924 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3926 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3927 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3928 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3929 memcpy(ha->primary_mac, mac, ETH_ALEN);
3932 return (ha->primary_mac);
3936 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3938 uint32_t ifm_type = 0;
3940 switch (if_link->media_type) {
3942 case MEDIA_MODULE_FIBER:
3943 case MEDIA_UNSPECIFIED:
3944 if (if_link->speed == (100 * 1000))
3945 ifm_type = QLNX_IFM_100G_SR4;
3946 else if (if_link->speed == (40 * 1000))
3947 ifm_type = IFM_40G_SR4;
3948 else if (if_link->speed == (25 * 1000))
3949 ifm_type = QLNX_IFM_25G_SR;
3950 else if (if_link->speed == (10 * 1000))
3951 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3952 else if (if_link->speed == (1 * 1000))
3953 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3957 case MEDIA_DA_TWINAX:
3958 if (if_link->speed == (100 * 1000))
3959 ifm_type = QLNX_IFM_100G_CR4;
3960 else if (if_link->speed == (40 * 1000))
3961 ifm_type = IFM_40G_CR4;
3962 else if (if_link->speed == (25 * 1000))
3963 ifm_type = QLNX_IFM_25G_CR;
3964 else if (if_link->speed == (10 * 1000))
3965 ifm_type = IFM_10G_TWINAX;
3970 ifm_type = IFM_UNKNOWN;
3978 /*****************************************************************************
3979 * Interrupt Service Functions
3980 *****************************************************************************/
3983 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3984 struct mbuf *mp_head, uint16_t len)
3986 struct mbuf *mp, *mpf, *mpl;
3987 struct sw_rx_data *sw_rx_data;
3988 struct qlnx_rx_queue *rxq;
3989 uint16_t len_in_buffer;
3992 mpf = mpl = mp = NULL;
3996 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3998 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3999 mp = sw_rx_data->data;
4002 QL_DPRINT1(ha, "mp = NULL\n");
4003 fp->err_rx_mp_null++;
4005 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4012 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4013 BUS_DMASYNC_POSTREAD);
4015 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4017 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4018 " incoming packet and reusing its buffer\n");
4020 qlnx_reuse_rx_data(rxq);
4021 fp->err_rx_alloc_errors++;
4028 ecore_chain_consume(&rxq->rx_bd_ring);
4030 if (len > rxq->rx_buf_size)
4031 len_in_buffer = rxq->rx_buf_size;
4033 len_in_buffer = len;
4035 len = len - len_in_buffer;
4037 mp->m_flags &= ~M_PKTHDR;
4039 mp->m_len = len_in_buffer;
4050 mp_head->m_next = mpf;
4056 qlnx_tpa_start(qlnx_host_t *ha,
4057 struct qlnx_fastpath *fp,
4058 struct qlnx_rx_queue *rxq,
4059 struct eth_fast_path_rx_tpa_start_cqe *cqe)
4062 struct ifnet *ifp = ha->ifp;
4064 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4065 struct sw_rx_data *sw_rx_data;
4068 struct eth_rx_bd *rx_bd;
4071 #if __FreeBSD_version >= 1100000
4073 #endif /* #if __FreeBSD_version >= 1100000 */
4076 agg_index = cqe->tpa_agg_index;
4078 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
4080 \t bitfields = 0x%x\n \
4081 \t seg_len = 0x%x\n \
4082 \t pars_flags = 0x%x\n \
4083 \t vlan_tag = 0x%x\n \
4084 \t rss_hash = 0x%x\n \
4085 \t len_on_first_bd = 0x%x\n \
4086 \t placement_offset = 0x%x\n \
4087 \t tpa_agg_index = 0x%x\n \
4088 \t header_len = 0x%x\n \
4089 \t ext_bd_len_list[0] = 0x%x\n \
4090 \t ext_bd_len_list[1] = 0x%x\n \
4091 \t ext_bd_len_list[2] = 0x%x\n \
4092 \t ext_bd_len_list[3] = 0x%x\n \
4093 \t ext_bd_len_list[4] = 0x%x\n",
4094 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4095 cqe->pars_flags.flags, cqe->vlan_tag,
4096 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
4097 cqe->tpa_agg_index, cqe->header_len,
4098 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4099 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4100 cqe->ext_bd_len_list[4]);
4102 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4103 fp->err_rx_tpa_invalid_agg_num++;
4107 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4108 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4109 mp = sw_rx_data->data;
4111 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4114 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4115 fp->err_rx_mp_null++;
4116 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4121 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4123 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4124 " flags = %x, dropping incoming packet\n", fp->rss_id,
4125 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4127 fp->err_rx_hw_errors++;
4129 qlnx_reuse_rx_data(rxq);
4131 QLNX_INC_IERRORS(ifp);
4136 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4138 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4139 " dropping incoming packet and reusing its buffer\n",
4142 fp->err_rx_alloc_errors++;
4143 QLNX_INC_IQDROPS(ifp);
4146 * Load the tpa mbuf into the rx ring and save the
4150 map = sw_rx_data->map;
4151 addr = sw_rx_data->dma_addr;
4153 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4155 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4156 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4157 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4159 rxq->tpa_info[agg_index].rx_buf.data = mp;
4160 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4161 rxq->tpa_info[agg_index].rx_buf.map = map;
4163 rx_bd = (struct eth_rx_bd *)
4164 ecore_chain_produce(&rxq->rx_bd_ring);
4166 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4167 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4169 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4170 BUS_DMASYNC_PREREAD);
4172 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4173 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4175 ecore_chain_consume(&rxq->rx_bd_ring);
4177 /* Now reuse any buffers posted in ext_bd_len_list */
4178 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4180 if (cqe->ext_bd_len_list[i] == 0)
4183 qlnx_reuse_rx_data(rxq);
4186 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4190 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4192 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4193 " dropping incoming packet and reusing its buffer\n",
4196 QLNX_INC_IQDROPS(ifp);
4198 /* if we already have mbuf head in aggregation free it */
4199 if (rxq->tpa_info[agg_index].mpf) {
4200 m_freem(rxq->tpa_info[agg_index].mpf);
4201 rxq->tpa_info[agg_index].mpl = NULL;
4203 rxq->tpa_info[agg_index].mpf = mp;
4204 rxq->tpa_info[agg_index].mpl = NULL;
4206 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4207 ecore_chain_consume(&rxq->rx_bd_ring);
4209 /* Now reuse any buffers posted in ext_bd_len_list */
4210 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4212 if (cqe->ext_bd_len_list[i] == 0)
4215 qlnx_reuse_rx_data(rxq);
4217 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4223 * first process the ext_bd_len_list
4224 * if this fails then we simply drop the packet
4226 ecore_chain_consume(&rxq->rx_bd_ring);
4227 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4229 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4231 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4233 if (cqe->ext_bd_len_list[i] == 0)
4236 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4237 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4238 BUS_DMASYNC_POSTREAD);
4240 mpc = sw_rx_data->data;
4243 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4244 fp->err_rx_mp_null++;
4248 rxq->tpa_info[agg_index].agg_state =
4249 QLNX_AGG_STATE_ERROR;
4250 ecore_chain_consume(&rxq->rx_bd_ring);
4252 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4256 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4257 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4258 " dropping incoming packet and reusing its"
4259 " buffer\n", fp->rss_id);
4261 qlnx_reuse_rx_data(rxq);
4267 rxq->tpa_info[agg_index].agg_state =
4268 QLNX_AGG_STATE_ERROR;
4270 ecore_chain_consume(&rxq->rx_bd_ring);
4272 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4277 mpc->m_flags &= ~M_PKTHDR;
4279 mpc->m_len = cqe->ext_bd_len_list[i];
4285 mpl->m_len = ha->rx_buf_size;
4290 ecore_chain_consume(&rxq->rx_bd_ring);
4292 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4295 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4297 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4298 " incoming packet and reusing its buffer\n",
4301 QLNX_INC_IQDROPS(ifp);
4303 rxq->tpa_info[agg_index].mpf = mp;
4304 rxq->tpa_info[agg_index].mpl = NULL;
4309 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4312 mp->m_len = ha->rx_buf_size;
4314 rxq->tpa_info[agg_index].mpf = mp;
4315 rxq->tpa_info[agg_index].mpl = mpl;
4317 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4318 rxq->tpa_info[agg_index].mpf = mp;
4319 rxq->tpa_info[agg_index].mpl = mp;
4323 mp->m_flags |= M_PKTHDR;
4325 /* assign packet to this interface interface */
4326 mp->m_pkthdr.rcvif = ifp;
4328 /* assume no hardware checksum has complated */
4329 mp->m_pkthdr.csum_flags = 0;
4331 //mp->m_pkthdr.flowid = fp->rss_id;
4332 mp->m_pkthdr.flowid = cqe->rss_hash;
4334 #if __FreeBSD_version >= 1100000
4336 hash_type = cqe->bitfields &
4337 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4338 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4340 switch (hash_type) {
4342 case RSS_HASH_TYPE_IPV4:
4343 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4346 case RSS_HASH_TYPE_TCP_IPV4:
4347 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4350 case RSS_HASH_TYPE_IPV6:
4351 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4354 case RSS_HASH_TYPE_TCP_IPV6:
4355 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4359 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4364 mp->m_flags |= M_FLOWID;
4367 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4368 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4370 mp->m_pkthdr.csum_data = 0xFFFF;
4372 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4373 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4374 mp->m_flags |= M_VLANTAG;
4377 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4379 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4380 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4381 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4387 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4388 struct qlnx_rx_queue *rxq,
4389 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4391 struct sw_rx_data *sw_rx_data;
4393 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4400 QL_DPRINT7(ha, "[%d]: enter\n \
4402 \t tpa_agg_index = 0x%x\n \
4403 \t len_list[0] = 0x%x\n \
4404 \t len_list[1] = 0x%x\n \
4405 \t len_list[2] = 0x%x\n \
4406 \t len_list[3] = 0x%x\n \
4407 \t len_list[4] = 0x%x\n \
4408 \t len_list[5] = 0x%x\n",
4409 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4410 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4411 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4413 agg_index = cqe->tpa_agg_index;
4415 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4416 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4417 fp->err_rx_tpa_invalid_agg_num++;
4422 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4424 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4426 if (cqe->len_list[i] == 0)
4429 if (rxq->tpa_info[agg_index].agg_state !=
4430 QLNX_AGG_STATE_START) {
4431 qlnx_reuse_rx_data(rxq);
4435 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4436 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4437 BUS_DMASYNC_POSTREAD);
4439 mpc = sw_rx_data->data;
4443 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4445 fp->err_rx_mp_null++;
4449 rxq->tpa_info[agg_index].agg_state =
4450 QLNX_AGG_STATE_ERROR;
4451 ecore_chain_consume(&rxq->rx_bd_ring);
4453 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4457 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4459 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4460 " dropping incoming packet and reusing its"
4461 " buffer\n", fp->rss_id);
4463 qlnx_reuse_rx_data(rxq);
4469 rxq->tpa_info[agg_index].agg_state =
4470 QLNX_AGG_STATE_ERROR;
4472 ecore_chain_consume(&rxq->rx_bd_ring);
4474 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4479 mpc->m_flags &= ~M_PKTHDR;
4481 mpc->m_len = cqe->len_list[i];
4487 mpl->m_len = ha->rx_buf_size;
4492 ecore_chain_consume(&rxq->rx_bd_ring);
4494 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4497 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4498 fp->rss_id, mpf, mpl);
4501 mp = rxq->tpa_info[agg_index].mpl;
4502 mp->m_len = ha->rx_buf_size;
4504 rxq->tpa_info[agg_index].mpl = mpl;
4511 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4512 struct qlnx_rx_queue *rxq,
4513 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4515 struct sw_rx_data *sw_rx_data;
4517 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4521 struct ifnet *ifp = ha->ifp;
4526 QL_DPRINT7(ha, "[%d]: enter\n \
4528 \t tpa_agg_index = 0x%x\n \
4529 \t total_packet_len = 0x%x\n \
4530 \t num_of_bds = 0x%x\n \
4531 \t end_reason = 0x%x\n \
4532 \t num_of_coalesced_segs = 0x%x\n \
4533 \t ts_delta = 0x%x\n \
4534 \t len_list[0] = 0x%x\n \
4535 \t len_list[1] = 0x%x\n \
4536 \t len_list[2] = 0x%x\n \
4537 \t len_list[3] = 0x%x\n",
4538 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4539 cqe->total_packet_len, cqe->num_of_bds,
4540 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4541 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4544 agg_index = cqe->tpa_agg_index;
4546 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4548 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4550 fp->err_rx_tpa_invalid_agg_num++;
4555 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4557 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4559 if (cqe->len_list[i] == 0)
4562 if (rxq->tpa_info[agg_index].agg_state !=
4563 QLNX_AGG_STATE_START) {
4565 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4567 qlnx_reuse_rx_data(rxq);
4571 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4572 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4573 BUS_DMASYNC_POSTREAD);
4575 mpc = sw_rx_data->data;
4579 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4581 fp->err_rx_mp_null++;
4585 rxq->tpa_info[agg_index].agg_state =
4586 QLNX_AGG_STATE_ERROR;
4587 ecore_chain_consume(&rxq->rx_bd_ring);
4589 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4593 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4594 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4595 " dropping incoming packet and reusing its"
4596 " buffer\n", fp->rss_id);
4598 qlnx_reuse_rx_data(rxq);
4604 rxq->tpa_info[agg_index].agg_state =
4605 QLNX_AGG_STATE_ERROR;
4607 ecore_chain_consume(&rxq->rx_bd_ring);
4609 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4614 mpc->m_flags &= ~M_PKTHDR;
4616 mpc->m_len = cqe->len_list[i];
4622 mpl->m_len = ha->rx_buf_size;
4627 ecore_chain_consume(&rxq->rx_bd_ring);
4629 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4632 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4636 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4638 mp = rxq->tpa_info[agg_index].mpl;
4639 mp->m_len = ha->rx_buf_size;
4643 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4645 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4647 if (rxq->tpa_info[agg_index].mpf != NULL)
4648 m_freem(rxq->tpa_info[agg_index].mpf);
4649 rxq->tpa_info[agg_index].mpf = NULL;
4650 rxq->tpa_info[agg_index].mpl = NULL;
4651 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4655 mp = rxq->tpa_info[agg_index].mpf;
4656 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4657 mp->m_pkthdr.len = cqe->total_packet_len;
4659 if (mp->m_next == NULL)
4660 mp->m_len = mp->m_pkthdr.len;
4662 /* compute the total packet length */
4664 while (mpf != NULL) {
4669 if (cqe->total_packet_len > len) {
4670 mpl = rxq->tpa_info[agg_index].mpl;
4671 mpl->m_len += (cqe->total_packet_len - len);
4675 QLNX_INC_IPACKETS(ifp);
4676 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4678 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4679 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4680 fp->rss_id, mp->m_pkthdr.csum_data,
4681 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4683 (*ifp->if_input)(ifp, mp);
4685 rxq->tpa_info[agg_index].mpf = NULL;
4686 rxq->tpa_info[agg_index].mpl = NULL;
4687 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4689 return (cqe->num_of_coalesced_segs);
4693 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4696 uint16_t hw_comp_cons, sw_comp_cons;
4698 struct qlnx_rx_queue *rxq = fp->rxq;
4699 struct ifnet *ifp = ha->ifp;
4700 struct ecore_dev *cdev = &ha->cdev;
4701 struct ecore_hwfn *p_hwfn;
4703 #ifdef QLNX_SOFT_LRO
4704 struct lro_ctrl *lro;
4707 #endif /* #ifdef QLNX_SOFT_LRO */
4709 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4710 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4712 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4714 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4715 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4716 * read before it is written by FW, then FW writes CQE and SB, and then
4717 * the CPU reads the hw_comp_cons, it will use an old CQE.
4720 /* Loop to complete all indicated BDs */
4721 while (sw_comp_cons != hw_comp_cons) {
4722 union eth_rx_cqe *cqe;
4723 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4724 struct sw_rx_data *sw_rx_data;
4725 register struct mbuf *mp;
4726 enum eth_rx_cqe_type cqe_type;
4727 uint16_t len, pad, len_on_first_bd;
4729 #if __FreeBSD_version >= 1100000
4731 #endif /* #if __FreeBSD_version >= 1100000 */
4733 /* Get the CQE from the completion ring */
4734 cqe = (union eth_rx_cqe *)
4735 ecore_chain_consume(&rxq->rx_comp_ring);
4736 cqe_type = cqe->fast_path_regular.type;
4738 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4739 QL_DPRINT3(ha, "Got a slowath CQE\n");
4741 ecore_eth_cqe_completion(p_hwfn,
4742 (struct eth_slow_path_rx_cqe *)cqe);
4746 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4750 case ETH_RX_CQE_TYPE_TPA_START:
4751 qlnx_tpa_start(ha, fp, rxq,
4752 &cqe->fast_path_tpa_start);
4756 case ETH_RX_CQE_TYPE_TPA_CONT:
4757 qlnx_tpa_cont(ha, fp, rxq,
4758 &cqe->fast_path_tpa_cont);
4762 case ETH_RX_CQE_TYPE_TPA_END:
4763 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4764 &cqe->fast_path_tpa_end);
4775 /* Get the data from the SW ring */
4776 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4777 mp = sw_rx_data->data;
4780 QL_DPRINT1(ha, "mp = NULL\n");
4781 fp->err_rx_mp_null++;
4783 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4786 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4787 BUS_DMASYNC_POSTREAD);
4790 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4791 len = le16toh(fp_cqe->pkt_len);
4792 pad = fp_cqe->placement_offset;
4794 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4795 " len %u, parsing flags = %d pad = %d\n",
4796 cqe_type, fp_cqe->bitfields,
4797 le16toh(fp_cqe->vlan_tag),
4798 len, le16toh(fp_cqe->pars_flags.flags), pad);
4800 data = mtod(mp, uint8_t *);
4804 qlnx_dump_buf8(ha, __func__, data, len);
4806 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4807 * is always with a fixed size. If allocation fails, we take the
4808 * consumed BD and return it to the ring in the PROD position.
4809 * The packet that was received on that BD will be dropped (and
4810 * not passed to the upper stack).
4812 /* If this is an error packet then drop it */
4813 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4816 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4817 " dropping incoming packet\n", sw_comp_cons,
4818 le16toh(cqe->fast_path_regular.pars_flags.flags));
4819 fp->err_rx_hw_errors++;
4821 qlnx_reuse_rx_data(rxq);
4823 QLNX_INC_IERRORS(ifp);
4828 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4830 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4831 " incoming packet and reusing its buffer\n");
4832 qlnx_reuse_rx_data(rxq);
4834 fp->err_rx_alloc_errors++;
4836 QLNX_INC_IQDROPS(ifp);
4841 ecore_chain_consume(&rxq->rx_bd_ring);
4843 len_on_first_bd = fp_cqe->len_on_first_bd;
4845 mp->m_pkthdr.len = len;
4847 if ((len > 60 ) && (len > len_on_first_bd)) {
4849 mp->m_len = len_on_first_bd;
4851 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4852 (len - len_on_first_bd)) != 0) {
4856 QLNX_INC_IQDROPS(ifp);
4861 } else if (len_on_first_bd < len) {
4862 fp->err_rx_jumbo_chain_pkts++;
4867 mp->m_flags |= M_PKTHDR;
4869 /* assign packet to this interface interface */
4870 mp->m_pkthdr.rcvif = ifp;
4872 /* assume no hardware checksum has complated */
4873 mp->m_pkthdr.csum_flags = 0;
4875 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4877 #if __FreeBSD_version >= 1100000
4879 hash_type = fp_cqe->bitfields &
4880 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4881 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4883 switch (hash_type) {
4885 case RSS_HASH_TYPE_IPV4:
4886 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4889 case RSS_HASH_TYPE_TCP_IPV4:
4890 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4893 case RSS_HASH_TYPE_IPV6:
4894 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4897 case RSS_HASH_TYPE_TCP_IPV6:
4898 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4902 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4907 mp->m_flags |= M_FLOWID;
4910 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4911 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4914 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4915 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4918 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4919 mp->m_pkthdr.csum_data = 0xFFFF;
4920 mp->m_pkthdr.csum_flags |=
4921 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4924 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4925 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4926 mp->m_flags |= M_VLANTAG;
4929 QLNX_INC_IPACKETS(ifp);
4930 QLNX_INC_IBYTES(ifp, len);
4932 #ifdef QLNX_SOFT_LRO
4936 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4938 tcp_lro_queue_mbuf(lro, mp);
4942 if (tcp_lro_rx(lro, mp, 0))
4943 (*ifp->if_input)(ifp, mp);
4945 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4948 (*ifp->if_input)(ifp, mp);
4952 (*ifp->if_input)(ifp, mp);
4954 #endif /* #ifdef QLNX_SOFT_LRO */
4958 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4960 next_cqe: /* don't consume bd rx buffer */
4961 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4962 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4964 /* CR TPA - revisit how to handle budget in TPA perhaps
4965 increase on "end" */
4966 if (rx_pkt == budget)
4968 } /* repeat while sw_comp_cons != hw_comp_cons... */
4970 /* Update producers */
4971 qlnx_update_rx_prod(p_hwfn, rxq);
4978 * fast path interrupt
4982 qlnx_fp_isr(void *arg)
4984 qlnx_ivec_t *ivec = arg;
4986 struct qlnx_fastpath *fp = NULL;
4991 if (ha->state != QLNX_STATE_OPEN) {
4995 idx = ivec->rss_idx;
4997 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4998 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4999 ha->err_illegal_intr++;
5002 fp = &ha->fp_array[idx];
5007 int rx_int = 0, total_rx_count = 0;
5009 struct qlnx_tx_queue *txq;
5012 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
5014 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
5017 for (tc = 0; tc < ha->num_tc; tc++) {
5021 if((int)(elem_left =
5022 ecore_chain_get_elem_left(&txq->tx_pbl)) <
5023 QLNX_TX_ELEM_THRESH) {
5025 if (mtx_trylock(&fp->tx_mtx)) {
5026 #ifdef QLNX_TRACE_PERF_DATA
5027 tx_compl = fp->tx_pkts_completed;
5030 qlnx_tx_int(ha, fp, fp->txq[tc]);
5031 #ifdef QLNX_TRACE_PERF_DATA
5032 fp->tx_pkts_compl_intr +=
5033 (fp->tx_pkts_completed - tx_compl);
5034 if ((fp->tx_pkts_completed - tx_compl) <= 32)
5036 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
5037 ((fp->tx_pkts_completed - tx_compl) <= 64))
5039 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
5040 ((fp->tx_pkts_completed - tx_compl) <= 128))
5042 else if(((fp->tx_pkts_completed - tx_compl) > 128))
5045 mtx_unlock(&fp->tx_mtx);
5050 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
5054 fp->rx_pkts += rx_int;
5055 total_rx_count += rx_int;
5060 #ifdef QLNX_SOFT_LRO
5062 struct lro_ctrl *lro;
5064 lro = &fp->rxq->lro;
5066 if (lro_enable && total_rx_count) {
5068 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5070 #ifdef QLNX_TRACE_LRO_CNT
5071 if (lro->lro_mbuf_count & ~1023)
5073 else if (lro->lro_mbuf_count & ~511)
5075 else if (lro->lro_mbuf_count & ~255)
5077 else if (lro->lro_mbuf_count & ~127)
5079 else if (lro->lro_mbuf_count & ~63)
5081 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
5083 tcp_lro_flush_all(lro);
5086 struct lro_entry *queued;
5088 while ((!SLIST_EMPTY(&lro->lro_active))) {
5089 queued = SLIST_FIRST(&lro->lro_active);
5090 SLIST_REMOVE_HEAD(&lro->lro_active, \
5092 tcp_lro_flush(lro, queued);
5094 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5097 #endif /* #ifdef QLNX_SOFT_LRO */
5099 ecore_sb_update_sb_idx(fp->sb_info);
5101 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
5109 * slow path interrupt processing function
5110 * can be invoked in polled mode or in interrupt mode via taskqueue.
5113 qlnx_sp_isr(void *arg)
5115 struct ecore_hwfn *p_hwfn;
5120 ha = (qlnx_host_t *)p_hwfn->p_dev;
5122 ha->sp_interrupts++;
5124 QL_DPRINT2(ha, "enter\n");
5126 ecore_int_sp_dpc(p_hwfn);
5128 QL_DPRINT2(ha, "exit\n");
5133 /*****************************************************************************
5134 * Support Functions for DMA'able Memory
5135 *****************************************************************************/
5138 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5140 *((bus_addr_t *)arg) = 0;
5143 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5147 *((bus_addr_t *)arg) = segs[0].ds_addr;
5153 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5161 ret = bus_dma_tag_create(
5162 ha->parent_tag,/* parent */
5164 ((bus_size_t)(1ULL << 32)),/* boundary */
5165 BUS_SPACE_MAXADDR, /* lowaddr */
5166 BUS_SPACE_MAXADDR, /* highaddr */
5167 NULL, NULL, /* filter, filterarg */
5168 dma_buf->size, /* maxsize */
5170 dma_buf->size, /* maxsegsize */
5172 NULL, NULL, /* lockfunc, lockarg */
5176 QL_DPRINT1(ha, "could not create dma tag\n");
5177 goto qlnx_alloc_dmabuf_exit;
5179 ret = bus_dmamem_alloc(dma_buf->dma_tag,
5180 (void **)&dma_buf->dma_b,
5181 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5184 bus_dma_tag_destroy(dma_buf->dma_tag);
5185 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5186 goto qlnx_alloc_dmabuf_exit;
5189 ret = bus_dmamap_load(dma_buf->dma_tag,
5193 qlnx_dmamap_callback,
5194 &b_addr, BUS_DMA_NOWAIT);
5196 if (ret || !b_addr) {
5197 bus_dma_tag_destroy(dma_buf->dma_tag);
5198 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5201 goto qlnx_alloc_dmabuf_exit;
5204 dma_buf->dma_addr = b_addr;
5206 qlnx_alloc_dmabuf_exit:
5212 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5214 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5215 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5216 bus_dma_tag_destroy(dma_buf->dma_tag);
5221 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5228 ha = (qlnx_host_t *)ecore_dev;
5231 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5233 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5235 dma_buf.size = size + PAGE_SIZE;
5236 dma_buf.alignment = 8;
5238 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5240 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5242 *phys = dma_buf.dma_addr;
5244 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5246 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5248 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5249 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5250 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5252 return (dma_buf.dma_b);
5256 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5259 qlnx_dma_t dma_buf, *dma_p;
5263 ha = (qlnx_host_t *)ecore_dev;
5269 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5271 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5273 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5274 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5275 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5279 if (!ha->qlnxr_debug)
5280 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5285 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5293 * Allocate parent DMA Tag
5295 ret = bus_dma_tag_create(
5296 bus_get_dma_tag(dev), /* parent */
5297 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5298 BUS_SPACE_MAXADDR, /* lowaddr */
5299 BUS_SPACE_MAXADDR, /* highaddr */
5300 NULL, NULL, /* filter, filterarg */
5301 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5303 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5305 NULL, NULL, /* lockfunc, lockarg */
5309 QL_DPRINT1(ha, "could not create parent dma tag\n");
5313 ha->flags.parent_tag = 1;
5319 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5321 if (ha->parent_tag != NULL) {
5322 bus_dma_tag_destroy(ha->parent_tag);
5323 ha->parent_tag = NULL;
5329 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5331 if (bus_dma_tag_create(NULL, /* parent */
5332 1, 0, /* alignment, bounds */
5333 BUS_SPACE_MAXADDR, /* lowaddr */
5334 BUS_SPACE_MAXADDR, /* highaddr */
5335 NULL, NULL, /* filter, filterarg */
5336 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5337 QLNX_MAX_SEGMENTS, /* nsegments */
5338 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5340 NULL, /* lockfunc */
5341 NULL, /* lockfuncarg */
5344 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5352 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5354 if (ha->tx_tag != NULL) {
5355 bus_dma_tag_destroy(ha->tx_tag);
5362 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5364 if (bus_dma_tag_create(NULL, /* parent */
5365 1, 0, /* alignment, bounds */
5366 BUS_SPACE_MAXADDR, /* lowaddr */
5367 BUS_SPACE_MAXADDR, /* highaddr */
5368 NULL, NULL, /* filter, filterarg */
5369 MJUM9BYTES, /* maxsize */
5371 MJUM9BYTES, /* maxsegsize */
5373 NULL, /* lockfunc */
5374 NULL, /* lockfuncarg */
5377 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5385 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5387 if (ha->rx_tag != NULL) {
5388 bus_dma_tag_destroy(ha->rx_tag);
5394 /*********************************
5395 * Exported functions
5396 *********************************/
5398 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5402 bar_id = bar_id * 2;
5404 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5412 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5414 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5420 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5421 uint16_t *reg_value)
5423 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5429 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5430 uint32_t *reg_value)
5432 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5438 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5440 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5441 pci_reg, reg_value, 1);
5446 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5449 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5450 pci_reg, reg_value, 2);
5455 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5458 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5459 pci_reg, reg_value, 4);
5464 qlnx_pci_find_capability(void *ecore_dev, int cap)
5471 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5474 QL_DPRINT1(ha, "failed\n");
5480 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5487 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5490 QL_DPRINT1(ha, "failed\n");
5496 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5499 struct ecore_hwfn *p_hwfn;
5503 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5504 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5510 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5512 struct ecore_hwfn *p_hwfn = hwfn;
5514 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5515 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5521 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5523 struct ecore_hwfn *p_hwfn = hwfn;
5525 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5526 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5531 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5533 struct ecore_dev *cdev;
5534 struct ecore_hwfn *p_hwfn;
5539 cdev = p_hwfn->p_dev;
5541 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5542 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5548 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5550 struct ecore_hwfn *p_hwfn = hwfn;
5552 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5553 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5559 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5563 struct ecore_dev *cdev;
5565 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5566 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5568 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5574 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5577 struct ecore_dev *cdev;
5579 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5580 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5582 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5588 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5591 struct ecore_dev *cdev;
5593 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5594 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5596 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5601 qlnx_zalloc(uint32_t size)
5605 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5607 return ((void *)va);
5611 qlnx_barrier(void *p_hwfn)
5615 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5616 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5620 qlnx_link_update(void *p_hwfn)
5623 int prev_link_state;
5625 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5627 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5629 prev_link_state = ha->link_up;
5630 ha->link_up = ha->if_link.link_up;
5632 if (prev_link_state != ha->link_up) {
5634 if_link_state_change(ha->ifp, LINK_STATE_UP);
5636 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5640 #ifdef CONFIG_ECORE_SRIOV
5642 if (qlnx_vf_device(ha) != 0) {
5643 if (ha->sriov_initialized)
5644 qlnx_inform_vf_link_state(p_hwfn, ha);
5647 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5648 #endif /* #ifdef QLNX_VF */
5654 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5655 struct ecore_vf_acquire_sw_info *p_sw_info)
5657 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5658 (QLNX_VERSION_MINOR << 16) |
5660 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5666 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5669 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5675 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5676 struct qlnx_link_output *if_link)
5678 struct ecore_mcp_link_params link_params;
5679 struct ecore_mcp_link_state link_state;
5681 struct ecore_ptt *p_ptt = NULL;
5684 memset(if_link, 0, sizeof(*if_link));
5685 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5686 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5688 ha = (qlnx_host_t *)hwfn->p_dev;
5690 /* Prepare source inputs */
5691 /* we only deal with physical functions */
5692 if (qlnx_vf_device(ha) != 0) {
5694 p_ptt = ecore_ptt_acquire(hwfn);
5696 if (p_ptt == NULL) {
5697 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5701 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5702 ecore_ptt_release(hwfn, p_ptt);
5704 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5705 sizeof(link_params));
5706 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5707 sizeof(link_state));
5709 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5710 ecore_vf_read_bulletin(hwfn, &p_change);
5711 ecore_vf_get_link_params(hwfn, &link_params);
5712 ecore_vf_get_link_state(hwfn, &link_state);
5715 /* Set the link parameters to pass to protocol driver */
5716 if (link_state.link_up) {
5717 if_link->link_up = true;
5718 if_link->speed = link_state.speed;
5721 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5723 if (link_params.speed.autoneg)
5724 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5726 if (link_params.pause.autoneg ||
5727 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5728 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5730 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5731 link_params.pause.forced_tx)
5732 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5734 if (link_params.speed.advertised_speeds &
5735 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5736 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5737 QLNX_LINK_CAP_1000baseT_Full;
5739 if (link_params.speed.advertised_speeds &
5740 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5741 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5743 if (link_params.speed.advertised_speeds &
5744 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5745 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5747 if (link_params.speed.advertised_speeds &
5748 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5749 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5751 if (link_params.speed.advertised_speeds &
5752 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5753 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5755 if (link_params.speed.advertised_speeds &
5756 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5757 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5759 if_link->advertised_caps = if_link->supported_caps;
5761 if_link->autoneg = link_params.speed.autoneg;
5762 if_link->duplex = QLNX_LINK_DUPLEX;
5764 /* Link partner capabilities */
5766 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5767 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5769 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5770 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5772 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5773 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5775 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5776 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5778 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5779 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5781 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5782 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5784 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5785 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5787 if (link_state.an_complete)
5788 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5790 if (link_state.partner_adv_pause)
5791 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5793 if ((link_state.partner_adv_pause ==
5794 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5795 (link_state.partner_adv_pause ==
5796 ECORE_LINK_PARTNER_BOTH_PAUSE))
5797 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5803 qlnx_schedule_recovery(void *p_hwfn)
5807 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5809 if (qlnx_vf_device(ha) != 0) {
5810 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5817 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5821 for (i = 0; i < cdev->num_hwfns; i++) {
5822 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5823 p_hwfn->pf_params = *func_params;
5825 #ifdef QLNX_ENABLE_IWARP
5826 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5827 p_hwfn->using_ll2 = true;
5829 #endif /* #ifdef QLNX_ENABLE_IWARP */
5833 rc = ecore_resc_alloc(cdev);
5835 goto qlnx_nic_setup_exit;
5837 ecore_resc_setup(cdev);
5839 qlnx_nic_setup_exit:
5845 qlnx_nic_start(struct ecore_dev *cdev)
5848 struct ecore_hw_init_params params;
5850 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5852 params.p_tunn = NULL;
5853 params.b_hw_start = true;
5854 params.int_mode = cdev->int_mode;
5855 params.allow_npar_tx_switch = true;
5856 params.bin_fw_data = NULL;
5858 rc = ecore_hw_init(cdev, ¶ms);
5860 ecore_resc_free(cdev);
5868 qlnx_slowpath_start(qlnx_host_t *ha)
5870 struct ecore_dev *cdev;
5871 struct ecore_pf_params pf_params;
5874 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5875 pf_params.eth_pf_params.num_cons =
5876 (ha->num_rss) * (ha->num_tc + 1);
5878 #ifdef QLNX_ENABLE_IWARP
5879 if (qlnx_vf_device(ha) != 0) {
5880 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5881 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5882 pf_params.rdma_pf_params.num_qps = 1024;
5883 pf_params.rdma_pf_params.num_srqs = 1024;
5884 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5885 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5886 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5887 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5888 pf_params.rdma_pf_params.num_qps = 8192;
5889 pf_params.rdma_pf_params.num_srqs = 8192;
5890 //pf_params.rdma_pf_params.min_dpis = 0;
5891 pf_params.rdma_pf_params.min_dpis = 8;
5892 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5893 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5894 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5897 #endif /* #ifdef QLNX_ENABLE_IWARP */
5901 rc = qlnx_nic_setup(cdev, &pf_params);
5903 goto qlnx_slowpath_start_exit;
5905 cdev->int_mode = ECORE_INT_MODE_MSIX;
5906 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5908 #ifdef QLNX_MAX_COALESCE
5909 cdev->rx_coalesce_usecs = 255;
5910 cdev->tx_coalesce_usecs = 255;
5913 rc = qlnx_nic_start(cdev);
5915 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5916 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5918 #ifdef QLNX_USER_LLDP
5919 (void)qlnx_set_lldp_tlvx(ha, NULL);
5920 #endif /* #ifdef QLNX_USER_LLDP */
5922 qlnx_slowpath_start_exit:
5928 qlnx_slowpath_stop(qlnx_host_t *ha)
5930 struct ecore_dev *cdev;
5931 device_t dev = ha->pci_dev;
5936 ecore_hw_stop(cdev);
5938 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5940 if (ha->sp_handle[i])
5941 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5944 ha->sp_handle[i] = NULL;
5947 (void) bus_release_resource(dev, SYS_RES_IRQ,
5948 ha->sp_irq_rid[i], ha->sp_irq[i]);
5949 ha->sp_irq[i] = NULL;
5952 ecore_resc_free(cdev);
5958 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5959 char ver_str[VER_SIZE])
5963 memcpy(cdev->name, name, NAME_SIZE);
5965 for_each_hwfn(cdev, i) {
5966 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5969 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5975 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5977 enum ecore_mcp_protocol_type type;
5978 union ecore_mcp_protocol_stats *stats;
5979 struct ecore_eth_stats eth_stats;
5983 stats = proto_stats;
5988 case ECORE_MCP_LAN_STATS:
5989 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5990 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5991 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5992 stats->lan_stats.fcs_err = -1;
5996 ha->err_get_proto_invalid_type++;
5998 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
6005 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
6007 struct ecore_hwfn *p_hwfn;
6008 struct ecore_ptt *p_ptt;
6010 p_hwfn = &ha->cdev.hwfns[0];
6011 p_ptt = ecore_ptt_acquire(p_hwfn);
6013 if (p_ptt == NULL) {
6014 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6017 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
6019 ecore_ptt_release(p_hwfn, p_ptt);
6025 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
6027 struct ecore_hwfn *p_hwfn;
6028 struct ecore_ptt *p_ptt;
6030 p_hwfn = &ha->cdev.hwfns[0];
6031 p_ptt = ecore_ptt_acquire(p_hwfn);
6033 if (p_ptt == NULL) {
6034 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
6037 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
6039 ecore_ptt_release(p_hwfn, p_ptt);
6045 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
6047 struct ecore_dev *cdev;
6051 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
6052 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
6053 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
6059 qlnx_init_fp(qlnx_host_t *ha)
6061 int rss_id, txq_array_index, tc;
6063 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6065 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6067 fp->rss_id = rss_id;
6069 fp->sb_info = &ha->sb_array[rss_id];
6070 fp->rxq = &ha->rxq_array[rss_id];
6071 fp->rxq->rxq_id = rss_id;
6073 for (tc = 0; tc < ha->num_tc; tc++) {
6074 txq_array_index = tc * ha->num_rss + rss_id;
6075 fp->txq[tc] = &ha->txq_array[txq_array_index];
6076 fp->txq[tc]->index = txq_array_index;
6079 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
6082 fp->tx_ring_full = 0;
6084 /* reset all the statistics counters */
6086 fp->tx_pkts_processed = 0;
6087 fp->tx_pkts_freed = 0;
6088 fp->tx_pkts_transmitted = 0;
6089 fp->tx_pkts_completed = 0;
6091 #ifdef QLNX_TRACE_PERF_DATA
6092 fp->tx_pkts_trans_ctx = 0;
6093 fp->tx_pkts_compl_ctx = 0;
6094 fp->tx_pkts_trans_fp = 0;
6095 fp->tx_pkts_compl_fp = 0;
6096 fp->tx_pkts_compl_intr = 0;
6098 fp->tx_lso_wnd_min_len = 0;
6100 fp->tx_nsegs_gt_elem_left = 0;
6101 fp->tx_tso_max_nsegs = 0;
6102 fp->tx_tso_min_nsegs = 0;
6103 fp->err_tx_nsegs_gt_elem_left = 0;
6104 fp->err_tx_dmamap_create = 0;
6105 fp->err_tx_defrag_dmamap_load = 0;
6106 fp->err_tx_non_tso_max_seg = 0;
6107 fp->err_tx_dmamap_load = 0;
6108 fp->err_tx_defrag = 0;
6109 fp->err_tx_free_pkt_null = 0;
6110 fp->err_tx_cons_idx_conflict = 0;
6113 fp->err_m_getcl = 0;
6114 fp->err_m_getjcl = 0;
6120 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
6122 struct ecore_dev *cdev;
6126 if (sb_info->sb_virt) {
6127 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
6128 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
6129 sb_info->sb_virt = NULL;
6134 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
6135 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
6137 struct ecore_hwfn *p_hwfn;
6141 hwfn_index = sb_id % cdev->num_hwfns;
6142 p_hwfn = &cdev->hwfns[hwfn_index];
6143 rel_sb_id = sb_id / cdev->num_hwfns;
6145 QL_DPRINT2(((qlnx_host_t *)cdev),
6146 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
6147 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
6148 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
6149 sb_virt_addr, (void *)sb_phy_addr);
6151 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
6152 sb_virt_addr, sb_phy_addr, rel_sb_id);
6157 /* This function allocates fast-path status block memory */
6159 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6161 struct status_block_e4 *sb_virt;
6165 struct ecore_dev *cdev;
6169 size = sizeof(*sb_virt);
6170 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6173 QL_DPRINT1(ha, "Status block allocation failed\n");
6177 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6179 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6186 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6189 struct sw_rx_data *rx_buf;
6191 for (i = 0; i < rxq->num_rx_buffers; i++) {
6193 rx_buf = &rxq->sw_rx_ring[i];
6195 if (rx_buf->data != NULL) {
6196 if (rx_buf->map != NULL) {
6197 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6198 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6201 m_freem(rx_buf->data);
6202 rx_buf->data = NULL;
6209 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6211 struct ecore_dev *cdev;
6216 qlnx_free_rx_buffers(ha, rxq);
6218 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6219 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6220 if (rxq->tpa_info[i].mpf != NULL)
6221 m_freem(rxq->tpa_info[i].mpf);
6224 bzero((void *)&rxq->sw_rx_ring[0],
6225 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6227 /* Free the real RQ ring used by FW */
6228 if (rxq->rx_bd_ring.p_virt_addr) {
6229 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6230 rxq->rx_bd_ring.p_virt_addr = NULL;
6233 /* Free the real completion ring used by FW */
6234 if (rxq->rx_comp_ring.p_virt_addr &&
6235 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6236 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6237 rxq->rx_comp_ring.p_virt_addr = NULL;
6238 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6241 #ifdef QLNX_SOFT_LRO
6243 struct lro_ctrl *lro;
6248 #endif /* #ifdef QLNX_SOFT_LRO */
6254 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6256 register struct mbuf *mp;
6257 uint16_t rx_buf_size;
6258 struct sw_rx_data *sw_rx_data;
6259 struct eth_rx_bd *rx_bd;
6260 dma_addr_t dma_addr;
6262 bus_dma_segment_t segs[1];
6265 struct ecore_dev *cdev;
6269 rx_buf_size = rxq->rx_buf_size;
6271 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6274 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6278 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6280 map = (bus_dmamap_t)0;
6282 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6284 dma_addr = segs[0].ds_addr;
6286 if (ret || !dma_addr || (nsegs != 1)) {
6288 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6289 ret, (long long unsigned int)dma_addr, nsegs);
6293 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6294 sw_rx_data->data = mp;
6295 sw_rx_data->dma_addr = dma_addr;
6296 sw_rx_data->map = map;
6298 /* Advance PROD and get BD pointer */
6299 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6300 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6301 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6302 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6304 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6310 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6311 struct qlnx_agg_info *tpa)
6314 dma_addr_t dma_addr;
6316 bus_dma_segment_t segs[1];
6319 struct sw_rx_data *rx_buf;
6321 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6324 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6328 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6330 map = (bus_dmamap_t)0;
6332 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6334 dma_addr = segs[0].ds_addr;
6336 if (ret || !dma_addr || (nsegs != 1)) {
6338 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6339 ret, (long long unsigned int)dma_addr, nsegs);
6343 rx_buf = &tpa->rx_buf;
6345 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6348 rx_buf->dma_addr = dma_addr;
6351 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6357 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6359 struct sw_rx_data *rx_buf;
6361 rx_buf = &tpa->rx_buf;
6363 if (rx_buf->data != NULL) {
6364 if (rx_buf->map != NULL) {
6365 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6366 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6369 m_freem(rx_buf->data);
6370 rx_buf->data = NULL;
6375 /* This function allocates all memory needed per Rx queue */
6377 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6379 int i, rc, num_allocated;
6381 struct ecore_dev *cdev;
6386 rxq->num_rx_buffers = RX_RING_SIZE;
6388 rxq->rx_buf_size = ha->rx_buf_size;
6390 /* Allocate the parallel driver ring for Rx buffers */
6391 bzero((void *)&rxq->sw_rx_ring[0],
6392 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6394 /* Allocate FW Rx ring */
6396 rc = ecore_chain_alloc(cdev,
6397 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6398 ECORE_CHAIN_MODE_NEXT_PTR,
6399 ECORE_CHAIN_CNT_TYPE_U16,
6401 sizeof(struct eth_rx_bd),
6402 &rxq->rx_bd_ring, NULL);
6407 /* Allocate FW completion ring */
6408 rc = ecore_chain_alloc(cdev,
6409 ECORE_CHAIN_USE_TO_CONSUME,
6410 ECORE_CHAIN_MODE_PBL,
6411 ECORE_CHAIN_CNT_TYPE_U16,
6413 sizeof(union eth_rx_cqe),
6414 &rxq->rx_comp_ring, NULL);
6419 /* Allocate buffers for the Rx ring */
6421 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6422 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6429 for (i = 0; i < rxq->num_rx_buffers; i++) {
6430 rc = qlnx_alloc_rx_buffer(ha, rxq);
6435 if (!num_allocated) {
6436 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6438 } else if (num_allocated < rxq->num_rx_buffers) {
6439 QL_DPRINT1(ha, "Allocated less buffers than"
6440 " desired (%d allocated)\n", num_allocated);
6443 #ifdef QLNX_SOFT_LRO
6446 struct lro_ctrl *lro;
6450 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6451 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6452 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6457 if (tcp_lro_init(lro)) {
6458 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6462 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6466 #endif /* #ifdef QLNX_SOFT_LRO */
6470 qlnx_free_mem_rxq(ha, rxq);
6476 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6477 struct qlnx_tx_queue *txq)
6479 struct ecore_dev *cdev;
6483 bzero((void *)&txq->sw_tx_ring[0],
6484 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6486 /* Free the real RQ ring used by FW */
6487 if (txq->tx_pbl.p_virt_addr) {
6488 ecore_chain_free(cdev, &txq->tx_pbl);
6489 txq->tx_pbl.p_virt_addr = NULL;
6494 /* This function allocates all memory needed per Tx queue */
6496 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6497 struct qlnx_tx_queue *txq)
6499 int ret = ECORE_SUCCESS;
6500 union eth_tx_bd_types *p_virt;
6501 struct ecore_dev *cdev;
6505 bzero((void *)&txq->sw_tx_ring[0],
6506 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6508 /* Allocate the real Tx ring to be used by FW */
6509 ret = ecore_chain_alloc(cdev,
6510 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6511 ECORE_CHAIN_MODE_PBL,
6512 ECORE_CHAIN_CNT_TYPE_U16,
6515 &txq->tx_pbl, NULL);
6517 if (ret != ECORE_SUCCESS) {
6521 txq->num_tx_buffers = TX_RING_SIZE;
6526 qlnx_free_mem_txq(ha, fp, txq);
6531 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6534 struct ifnet *ifp = ha->ifp;
6536 if (mtx_initialized(&fp->tx_mtx)) {
6538 if (fp->tx_br != NULL) {
6540 mtx_lock(&fp->tx_mtx);
6542 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6543 fp->tx_pkts_freed++;
6547 mtx_unlock(&fp->tx_mtx);
6549 buf_ring_free(fp->tx_br, M_DEVBUF);
6552 mtx_destroy(&fp->tx_mtx);
6558 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6562 qlnx_free_mem_sb(ha, fp->sb_info);
6564 qlnx_free_mem_rxq(ha, fp->rxq);
6566 for (tc = 0; tc < ha->num_tc; tc++)
6567 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6573 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6575 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6576 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6578 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6580 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6581 M_NOWAIT, &fp->tx_mtx);
6582 if (fp->tx_br == NULL) {
6583 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6584 ha->dev_unit, fp->rss_id);
6591 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6595 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6599 if (ha->rx_jumbo_buf_eq_mtu) {
6600 if (ha->max_frame_size <= MCLBYTES)
6601 ha->rx_buf_size = MCLBYTES;
6602 else if (ha->max_frame_size <= MJUMPAGESIZE)
6603 ha->rx_buf_size = MJUMPAGESIZE;
6604 else if (ha->max_frame_size <= MJUM9BYTES)
6605 ha->rx_buf_size = MJUM9BYTES;
6606 else if (ha->max_frame_size <= MJUM16BYTES)
6607 ha->rx_buf_size = MJUM16BYTES;
6609 if (ha->max_frame_size <= MCLBYTES)
6610 ha->rx_buf_size = MCLBYTES;
6612 ha->rx_buf_size = MJUMPAGESIZE;
6615 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6619 for (tc = 0; tc < ha->num_tc; tc++) {
6620 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6628 qlnx_free_mem_fp(ha, fp);
6633 qlnx_free_mem_load(qlnx_host_t *ha)
6636 struct ecore_dev *cdev;
6640 for (i = 0; i < ha->num_rss; i++) {
6641 struct qlnx_fastpath *fp = &ha->fp_array[i];
6643 qlnx_free_mem_fp(ha, fp);
6649 qlnx_alloc_mem_load(qlnx_host_t *ha)
6653 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6654 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6656 rc = qlnx_alloc_mem_fp(ha, fp);
6664 qlnx_start_vport(struct ecore_dev *cdev,
6668 u8 inner_vlan_removal_en_flg,
6673 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6676 ha = (qlnx_host_t *)cdev;
6678 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6679 vport_start_params.tx_switching = 0;
6680 vport_start_params.handle_ptp_pkts = 0;
6681 vport_start_params.only_untagged = 0;
6682 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6684 vport_start_params.tpa_mode =
6685 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6686 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6688 vport_start_params.vport_id = vport_id;
6689 vport_start_params.mtu = mtu;
6692 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6694 for_each_hwfn(cdev, i) {
6695 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6697 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6698 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6700 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6703 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6704 " with MTU %d\n" , vport_id, mtu);
6708 ecore_hw_start_fastpath(p_hwfn);
6710 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6718 qlnx_update_vport(struct ecore_dev *cdev,
6719 struct qlnx_update_vport_params *params)
6721 struct ecore_sp_vport_update_params sp_params;
6722 int rc, i, j, fp_index;
6723 struct ecore_hwfn *p_hwfn;
6724 struct ecore_rss_params *rss;
6725 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6726 struct qlnx_fastpath *fp;
6728 memset(&sp_params, 0, sizeof(sp_params));
6729 /* Translate protocol params into sp params */
6730 sp_params.vport_id = params->vport_id;
6732 sp_params.update_vport_active_rx_flg =
6733 params->update_vport_active_rx_flg;
6734 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6736 sp_params.update_vport_active_tx_flg =
6737 params->update_vport_active_tx_flg;
6738 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6740 sp_params.update_inner_vlan_removal_flg =
6741 params->update_inner_vlan_removal_flg;
6742 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6744 sp_params.sge_tpa_params = params->sge_tpa_params;
6746 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6747 * We need to re-fix the rss values per engine for CMT.
6749 if (params->rss_params->update_rss_config)
6750 sp_params.rss_params = params->rss_params;
6752 sp_params.rss_params = NULL;
6754 for_each_hwfn(cdev, i) {
6756 p_hwfn = &cdev->hwfns[i];
6758 if ((cdev->num_hwfns > 1) &&
6759 params->rss_params->update_rss_config &&
6760 params->rss_params->rss_enable) {
6762 rss = params->rss_params;
6764 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6766 fp_index = ((cdev->num_hwfns * j) + i) %
6769 fp = &ha->fp_array[fp_index];
6770 rss->rss_ind_table[j] = fp->rxq->handle;
6773 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6774 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6775 rss->rss_ind_table[j],
6776 rss->rss_ind_table[j+1],
6777 rss->rss_ind_table[j+2],
6778 rss->rss_ind_table[j+3],
6779 rss->rss_ind_table[j+4],
6780 rss->rss_ind_table[j+5],
6781 rss->rss_ind_table[j+6],
6782 rss->rss_ind_table[j+7]);
6787 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6789 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6791 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6792 ECORE_SPQ_MODE_EBLOCK, NULL);
6794 QL_DPRINT1(ha, "Failed to update VPORT\n");
6798 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6799 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6800 params->vport_id, params->vport_active_tx_flg,
6801 params->vport_active_rx_flg,
6802 params->update_vport_active_tx_flg,
6803 params->update_vport_active_rx_flg);
6810 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6812 struct eth_rx_bd *rx_bd_cons =
6813 ecore_chain_consume(&rxq->rx_bd_ring);
6814 struct eth_rx_bd *rx_bd_prod =
6815 ecore_chain_produce(&rxq->rx_bd_ring);
6816 struct sw_rx_data *sw_rx_data_cons =
6817 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6818 struct sw_rx_data *sw_rx_data_prod =
6819 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6821 sw_rx_data_prod->data = sw_rx_data_cons->data;
6822 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6824 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6825 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6831 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6837 struct eth_rx_prod_data rx_prod_data;
6841 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6842 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6844 /* Update producers */
6845 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6846 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6848 /* Make sure that the BD and SGE data is updated before updating the
6849 * producers since FW might read the BD/SGE right after the producer
6854 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6855 sizeof(rx_prods), &rx_prods.data32);
6857 /* mmiowb is needed to synchronize doorbell writes from more than one
6858 * processor. It guarantees that the write arrives to the device before
6859 * the napi lock is released and another qlnx_poll is called (possibly
6860 * on another CPU). Without this barrier, the next doorbell can bypass
6861 * this doorbell. This is applicable to IA64/Altix systems.
6868 static uint32_t qlnx_hash_key[] = {
6869 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6870 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6871 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6872 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6873 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6874 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6875 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6876 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6877 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6878 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6881 qlnx_start_queues(qlnx_host_t *ha)
6883 int rc, tc, i, vport_id = 0,
6884 drop_ttl0_flg = 1, vlan_removal_en = 1,
6885 tx_switching = 0, hw_lro_enable = 0;
6886 struct ecore_dev *cdev = &ha->cdev;
6887 struct ecore_rss_params *rss_params = &ha->rss_params;
6888 struct qlnx_update_vport_params vport_update_params;
6890 struct ecore_hwfn *p_hwfn;
6891 struct ecore_sge_tpa_params tpa_params;
6892 struct ecore_queue_start_common_params qparams;
6893 struct qlnx_fastpath *fp;
6897 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6900 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6901 " are no Rx queues\n");
6905 #ifndef QLNX_SOFT_LRO
6906 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6907 #endif /* #ifndef QLNX_SOFT_LRO */
6909 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6910 vlan_removal_en, tx_switching, hw_lro_enable);
6913 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6917 QL_DPRINT2(ha, "Start vport ramrod passed, "
6918 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6919 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6922 struct ecore_rxq_start_ret_params rx_ret_params;
6923 struct ecore_txq_start_ret_params tx_ret_params;
6925 fp = &ha->fp_array[i];
6926 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6928 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6929 bzero(&rx_ret_params,
6930 sizeof (struct ecore_rxq_start_ret_params));
6932 qparams.queue_id = i ;
6933 qparams.vport_id = vport_id;
6934 qparams.stats_id = vport_id;
6935 qparams.p_sb = fp->sb_info;
6936 qparams.sb_idx = RX_PI;
6939 rc = ecore_eth_rx_queue_start(p_hwfn,
6940 p_hwfn->hw_info.opaque_fid,
6942 fp->rxq->rx_buf_size, /* bd_max_bytes */
6943 /* bd_chain_phys_addr */
6944 fp->rxq->rx_bd_ring.p_phys_addr,
6946 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6948 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6952 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6956 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6957 fp->rxq->handle = rx_ret_params.p_handle;
6958 fp->rxq->hw_cons_ptr =
6959 &fp->sb_info->sb_virt->pi_array[RX_PI];
6961 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6963 for (tc = 0; tc < ha->num_tc; tc++) {
6964 struct qlnx_tx_queue *txq = fp->txq[tc];
6967 sizeof(struct ecore_queue_start_common_params));
6968 bzero(&tx_ret_params,
6969 sizeof (struct ecore_txq_start_ret_params));
6971 qparams.queue_id = txq->index / cdev->num_hwfns ;
6972 qparams.vport_id = vport_id;
6973 qparams.stats_id = vport_id;
6974 qparams.p_sb = fp->sb_info;
6975 qparams.sb_idx = TX_PI(tc);
6977 rc = ecore_eth_tx_queue_start(p_hwfn,
6978 p_hwfn->hw_info.opaque_fid,
6980 /* bd_chain_phys_addr */
6981 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6982 ecore_chain_get_page_cnt(&txq->tx_pbl),
6986 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6991 txq->doorbell_addr = tx_ret_params.p_doorbell;
6992 txq->handle = tx_ret_params.p_handle;
6995 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6996 SET_FIELD(txq->tx_db.data.params,
6997 ETH_DB_DATA_DEST, DB_DEST_XCM);
6998 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
7000 SET_FIELD(txq->tx_db.data.params,
7001 ETH_DB_DATA_AGG_VAL_SEL,
7002 DQ_XCM_ETH_TX_BD_PROD_CMD);
7004 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
7008 /* Fill struct with RSS params */
7009 if (ha->num_rss > 1) {
7011 rss_params->update_rss_config = 1;
7012 rss_params->rss_enable = 1;
7013 rss_params->update_rss_capabilities = 1;
7014 rss_params->update_rss_ind_table = 1;
7015 rss_params->update_rss_key = 1;
7016 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
7017 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
7018 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
7020 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
7021 fp = &ha->fp_array[(i % ha->num_rss)];
7022 rss_params->rss_ind_table[i] = fp->rxq->handle;
7025 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
7026 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
7029 memset(rss_params, 0, sizeof(*rss_params));
7033 /* Prepare and send the vport enable */
7034 memset(&vport_update_params, 0, sizeof(vport_update_params));
7035 vport_update_params.vport_id = vport_id;
7036 vport_update_params.update_vport_active_tx_flg = 1;
7037 vport_update_params.vport_active_tx_flg = 1;
7038 vport_update_params.update_vport_active_rx_flg = 1;
7039 vport_update_params.vport_active_rx_flg = 1;
7040 vport_update_params.rss_params = rss_params;
7041 vport_update_params.update_inner_vlan_removal_flg = 1;
7042 vport_update_params.inner_vlan_removal_flg = 1;
7044 if (hw_lro_enable) {
7045 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
7047 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
7049 tpa_params.update_tpa_en_flg = 1;
7050 tpa_params.tpa_ipv4_en_flg = 1;
7051 tpa_params.tpa_ipv6_en_flg = 1;
7053 tpa_params.update_tpa_param_flg = 1;
7054 tpa_params.tpa_pkt_split_flg = 0;
7055 tpa_params.tpa_hdr_data_split_flg = 0;
7056 tpa_params.tpa_gro_consistent_flg = 0;
7057 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
7058 tpa_params.tpa_max_size = (uint16_t)(-1);
7059 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
7060 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
7062 vport_update_params.sge_tpa_params = &tpa_params;
7065 rc = qlnx_update_vport(cdev, &vport_update_params);
7067 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
7075 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
7076 struct qlnx_tx_queue *txq)
7078 uint16_t hw_bd_cons;
7079 uint16_t ecore_cons_idx;
7081 QL_DPRINT2(ha, "enter\n");
7083 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7085 while (hw_bd_cons !=
7086 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
7088 mtx_lock(&fp->tx_mtx);
7090 (void)qlnx_tx_int(ha, fp, txq);
7092 mtx_unlock(&fp->tx_mtx);
7094 qlnx_mdelay(__func__, 2);
7096 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7099 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
7105 qlnx_stop_queues(qlnx_host_t *ha)
7107 struct qlnx_update_vport_params vport_update_params;
7108 struct ecore_dev *cdev;
7109 struct qlnx_fastpath *fp;
7114 /* Disable the vport */
7116 memset(&vport_update_params, 0, sizeof(vport_update_params));
7118 vport_update_params.vport_id = 0;
7119 vport_update_params.update_vport_active_tx_flg = 1;
7120 vport_update_params.vport_active_tx_flg = 0;
7121 vport_update_params.update_vport_active_rx_flg = 1;
7122 vport_update_params.vport_active_rx_flg = 0;
7123 vport_update_params.rss_params = &ha->rss_params;
7124 vport_update_params.rss_params->update_rss_config = 0;
7125 vport_update_params.rss_params->rss_enable = 0;
7126 vport_update_params.update_inner_vlan_removal_flg = 0;
7127 vport_update_params.inner_vlan_removal_flg = 0;
7129 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
7131 rc = qlnx_update_vport(cdev, &vport_update_params);
7133 QL_DPRINT1(ha, "Failed to update vport\n");
7137 /* Flush Tx queues. If needed, request drain from MCP */
7139 fp = &ha->fp_array[i];
7141 for (tc = 0; tc < ha->num_tc; tc++) {
7142 struct qlnx_tx_queue *txq = fp->txq[tc];
7144 rc = qlnx_drain_txq(ha, fp, txq);
7150 /* Stop all Queues in reverse order*/
7151 for (i = ha->num_rss - 1; i >= 0; i--) {
7153 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
7155 fp = &ha->fp_array[i];
7157 /* Stop the Tx Queue(s)*/
7158 for (tc = 0; tc < ha->num_tc; tc++) {
7161 tx_queue_id = tc * ha->num_rss + i;
7162 rc = ecore_eth_tx_queue_stop(p_hwfn,
7163 fp->txq[tc]->handle);
7166 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
7172 /* Stop the Rx Queue*/
7173 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
7176 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7181 /* Stop the vport */
7182 for_each_hwfn(cdev, i) {
7184 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7186 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7189 QL_DPRINT1(ha, "Failed to stop VPORT\n");
7198 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7199 enum ecore_filter_opcode opcode,
7200 unsigned char mac[ETH_ALEN])
7202 struct ecore_filter_ucast ucast;
7203 struct ecore_dev *cdev;
7208 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7210 ucast.opcode = opcode;
7211 ucast.type = ECORE_FILTER_MAC;
7212 ucast.is_rx_filter = 1;
7213 ucast.vport_to_add_to = 0;
7214 memcpy(&ucast.mac[0], mac, ETH_ALEN);
7216 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7222 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7224 struct ecore_filter_ucast ucast;
7225 struct ecore_dev *cdev;
7228 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7230 ucast.opcode = ECORE_FILTER_REPLACE;
7231 ucast.type = ECORE_FILTER_MAC;
7232 ucast.is_rx_filter = 1;
7236 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7242 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7244 struct ecore_filter_mcast *mcast;
7245 struct ecore_dev *cdev;
7250 mcast = &ha->ecore_mcast;
7251 bzero(mcast, sizeof(struct ecore_filter_mcast));
7253 mcast->opcode = ECORE_FILTER_REMOVE;
7255 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7257 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7258 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7259 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7261 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7262 mcast->num_mc_addrs++;
7265 mcast = &ha->ecore_mcast;
7267 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7269 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7276 qlnx_clean_filters(qlnx_host_t *ha)
7280 /* Remove all unicast macs */
7281 rc = qlnx_remove_all_ucast_mac(ha);
7285 /* Remove all multicast macs */
7286 rc = qlnx_remove_all_mcast_mac(ha);
7290 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7296 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7298 struct ecore_filter_accept_flags accept;
7300 struct ecore_dev *cdev;
7304 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7306 accept.update_rx_mode_config = 1;
7307 accept.rx_accept_filter = filter;
7309 accept.update_tx_mode_config = 1;
7310 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7311 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7313 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7314 ECORE_SPQ_MODE_CB, NULL);
7320 qlnx_set_rx_mode(qlnx_host_t *ha)
7325 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7329 rc = qlnx_remove_all_mcast_mac(ha);
7333 filter = ECORE_ACCEPT_UCAST_MATCHED |
7334 ECORE_ACCEPT_MCAST_MATCHED |
7337 if (qlnx_vf_device(ha) == 0) {
7338 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7339 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7341 ha->filter = filter;
7343 rc = qlnx_set_rx_accept_filter(ha, filter);
7349 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7352 struct ecore_dev *cdev;
7353 struct ecore_hwfn *hwfn;
7354 struct ecore_ptt *ptt;
7356 if (qlnx_vf_device(ha) == 0)
7361 for_each_hwfn(cdev, i) {
7363 hwfn = &cdev->hwfns[i];
7365 ptt = ecore_ptt_acquire(hwfn);
7369 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7371 ecore_ptt_release(hwfn, ptt);
7379 #if __FreeBSD_version >= 1100000
7381 qlnx_get_counter(if_t ifp, ift_counter cnt)
7386 ha = (qlnx_host_t *)if_getsoftc(ifp);
7390 case IFCOUNTER_IPACKETS:
7391 count = ha->hw_stats.common.rx_ucast_pkts +
7392 ha->hw_stats.common.rx_mcast_pkts +
7393 ha->hw_stats.common.rx_bcast_pkts;
7396 case IFCOUNTER_IERRORS:
7397 count = ha->hw_stats.common.rx_crc_errors +
7398 ha->hw_stats.common.rx_align_errors +
7399 ha->hw_stats.common.rx_oversize_packets +
7400 ha->hw_stats.common.rx_undersize_packets;
7403 case IFCOUNTER_OPACKETS:
7404 count = ha->hw_stats.common.tx_ucast_pkts +
7405 ha->hw_stats.common.tx_mcast_pkts +
7406 ha->hw_stats.common.tx_bcast_pkts;
7409 case IFCOUNTER_OERRORS:
7410 count = ha->hw_stats.common.tx_err_drop_pkts;
7413 case IFCOUNTER_COLLISIONS:
7416 case IFCOUNTER_IBYTES:
7417 count = ha->hw_stats.common.rx_ucast_bytes +
7418 ha->hw_stats.common.rx_mcast_bytes +
7419 ha->hw_stats.common.rx_bcast_bytes;
7422 case IFCOUNTER_OBYTES:
7423 count = ha->hw_stats.common.tx_ucast_bytes +
7424 ha->hw_stats.common.tx_mcast_bytes +
7425 ha->hw_stats.common.tx_bcast_bytes;
7428 case IFCOUNTER_IMCASTS:
7429 count = ha->hw_stats.common.rx_mcast_bytes;
7432 case IFCOUNTER_OMCASTS:
7433 count = ha->hw_stats.common.tx_mcast_bytes;
7436 case IFCOUNTER_IQDROPS:
7437 case IFCOUNTER_OQDROPS:
7438 case IFCOUNTER_NOPROTO:
7441 return (if_get_counter_default(ifp, cnt));
7449 qlnx_timer(void *arg)
7453 ha = (qlnx_host_t *)arg;
7455 if (ha->error_recovery) {
7456 ha->error_recovery = 0;
7457 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7461 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7463 if (ha->storm_stats_gather)
7464 qlnx_sample_storm_stats(ha);
7466 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7472 qlnx_load(qlnx_host_t *ha)
7476 struct ecore_dev *cdev;
7482 QL_DPRINT2(ha, "enter\n");
7484 rc = qlnx_alloc_mem_arrays(ha);
7486 goto qlnx_load_exit0;
7490 rc = qlnx_alloc_mem_load(ha);
7492 goto qlnx_load_exit1;
7494 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7495 ha->num_rss, ha->num_tc);
7497 for (i = 0; i < ha->num_rss; i++) {
7499 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7500 (INTR_TYPE_NET | INTR_MPSAFE),
7501 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7502 &ha->irq_vec[i].handle))) {
7504 QL_DPRINT1(ha, "could not setup interrupt\n");
7505 goto qlnx_load_exit2;
7508 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7509 irq %p handle %p\n", i,
7510 ha->irq_vec[i].irq_rid,
7511 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7513 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7516 rc = qlnx_start_queues(ha);
7518 goto qlnx_load_exit2;
7520 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7522 /* Add primary mac and set Rx filters */
7523 rc = qlnx_set_rx_mode(ha);
7525 goto qlnx_load_exit2;
7527 /* Ask for link-up using current configuration */
7528 qlnx_set_link(ha, true);
7530 if (qlnx_vf_device(ha) == 0)
7531 qlnx_link_update(&ha->cdev.hwfns[0]);
7533 ha->state = QLNX_STATE_OPEN;
7535 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7537 if (ha->flags.callout_init)
7538 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7540 goto qlnx_load_exit0;
7543 qlnx_free_mem_load(ha);
7549 QL_DPRINT2(ha, "exit [%d]\n", rc);
7554 qlnx_drain_soft_lro(qlnx_host_t *ha)
7556 #ifdef QLNX_SOFT_LRO
7564 if (ifp->if_capenable & IFCAP_LRO) {
7566 for (i = 0; i < ha->num_rss; i++) {
7568 struct qlnx_fastpath *fp = &ha->fp_array[i];
7569 struct lro_ctrl *lro;
7571 lro = &fp->rxq->lro;
7573 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7575 tcp_lro_flush_all(lro);
7578 struct lro_entry *queued;
7580 while ((!SLIST_EMPTY(&lro->lro_active))){
7581 queued = SLIST_FIRST(&lro->lro_active);
7582 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7583 tcp_lro_flush(lro, queued);
7586 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7591 #endif /* #ifdef QLNX_SOFT_LRO */
7597 qlnx_unload(qlnx_host_t *ha)
7599 struct ecore_dev *cdev;
7606 QL_DPRINT2(ha, "enter\n");
7607 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7609 if (ha->state == QLNX_STATE_OPEN) {
7611 qlnx_set_link(ha, false);
7612 qlnx_clean_filters(ha);
7613 qlnx_stop_queues(ha);
7614 ecore_hw_stop_fastpath(cdev);
7616 for (i = 0; i < ha->num_rss; i++) {
7617 if (ha->irq_vec[i].handle) {
7618 (void)bus_teardown_intr(dev,
7620 ha->irq_vec[i].handle);
7621 ha->irq_vec[i].handle = NULL;
7625 qlnx_drain_fp_taskqueues(ha);
7626 qlnx_drain_soft_lro(ha);
7627 qlnx_free_mem_load(ha);
7630 if (ha->flags.callout_init)
7631 callout_drain(&ha->qlnx_callout);
7633 qlnx_mdelay(__func__, 1000);
7635 ha->state = QLNX_STATE_CLOSED;
7637 QL_DPRINT2(ha, "exit\n");
7642 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7645 struct ecore_hwfn *p_hwfn;
7646 struct ecore_ptt *p_ptt;
7648 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7650 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7651 p_ptt = ecore_ptt_acquire(p_hwfn);
7654 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7658 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7660 if (rval == DBG_STATUS_OK)
7663 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7667 ecore_ptt_release(p_hwfn, p_ptt);
7673 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7676 struct ecore_hwfn *p_hwfn;
7677 struct ecore_ptt *p_ptt;
7679 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7681 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7682 p_ptt = ecore_ptt_acquire(p_hwfn);
7685 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7689 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7691 if (rval == DBG_STATUS_OK)
7694 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7698 ecore_ptt_release(p_hwfn, p_ptt);
7705 qlnx_sample_storm_stats(qlnx_host_t *ha)
7708 struct ecore_dev *cdev;
7709 qlnx_storm_stats_t *s_stats;
7711 struct ecore_ptt *p_ptt;
7712 struct ecore_hwfn *hwfn;
7714 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7715 ha->storm_stats_gather = 0;
7721 for_each_hwfn(cdev, i) {
7723 hwfn = &cdev->hwfns[i];
7725 p_ptt = ecore_ptt_acquire(hwfn);
7729 index = ha->storm_stats_index +
7730 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7732 s_stats = &ha->storm_stats[index];
7735 reg = XSEM_REG_FAST_MEMORY +
7736 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7737 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7739 reg = XSEM_REG_FAST_MEMORY +
7740 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7741 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7743 reg = XSEM_REG_FAST_MEMORY +
7744 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7745 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7747 reg = XSEM_REG_FAST_MEMORY +
7748 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7749 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7752 reg = YSEM_REG_FAST_MEMORY +
7753 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7754 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7756 reg = YSEM_REG_FAST_MEMORY +
7757 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7758 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7760 reg = YSEM_REG_FAST_MEMORY +
7761 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7762 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7764 reg = YSEM_REG_FAST_MEMORY +
7765 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7766 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7769 reg = PSEM_REG_FAST_MEMORY +
7770 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7771 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7773 reg = PSEM_REG_FAST_MEMORY +
7774 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7775 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7777 reg = PSEM_REG_FAST_MEMORY +
7778 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7779 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7781 reg = PSEM_REG_FAST_MEMORY +
7782 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7783 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7786 reg = TSEM_REG_FAST_MEMORY +
7787 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7788 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7790 reg = TSEM_REG_FAST_MEMORY +
7791 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7792 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7794 reg = TSEM_REG_FAST_MEMORY +
7795 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7796 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7798 reg = TSEM_REG_FAST_MEMORY +
7799 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7800 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7803 reg = MSEM_REG_FAST_MEMORY +
7804 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7805 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7807 reg = MSEM_REG_FAST_MEMORY +
7808 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7809 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7811 reg = MSEM_REG_FAST_MEMORY +
7812 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7813 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7815 reg = MSEM_REG_FAST_MEMORY +
7816 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7817 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7820 reg = USEM_REG_FAST_MEMORY +
7821 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7822 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7824 reg = USEM_REG_FAST_MEMORY +
7825 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7826 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7828 reg = USEM_REG_FAST_MEMORY +
7829 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7830 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7832 reg = USEM_REG_FAST_MEMORY +
7833 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7834 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7836 ecore_ptt_release(hwfn, p_ptt);
7839 ha->storm_stats_index++;
7845 * Name: qlnx_dump_buf8
7846 * Function: dumps a buffer as bytes
7849 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7858 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7861 device_printf(dev,"0x%08x:"
7862 " %02x %02x %02x %02x %02x %02x %02x %02x"
7863 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7864 buf[0], buf[1], buf[2], buf[3],
7865 buf[4], buf[5], buf[6], buf[7],
7866 buf[8], buf[9], buf[10], buf[11],
7867 buf[12], buf[13], buf[14], buf[15]);
7874 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7877 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7880 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7881 i, buf[0], buf[1], buf[2]);
7884 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7885 buf[0], buf[1], buf[2], buf[3]);
7888 device_printf(dev,"0x%08x:"
7889 " %02x %02x %02x %02x %02x\n", i,
7890 buf[0], buf[1], buf[2], buf[3], buf[4]);
7893 device_printf(dev,"0x%08x:"
7894 " %02x %02x %02x %02x %02x %02x\n", i,
7895 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7898 device_printf(dev,"0x%08x:"
7899 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7900 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7903 device_printf(dev,"0x%08x:"
7904 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7905 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7909 device_printf(dev,"0x%08x:"
7910 " %02x %02x %02x %02x %02x %02x %02x %02x"
7912 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7916 device_printf(dev,"0x%08x:"
7917 " %02x %02x %02x %02x %02x %02x %02x %02x"
7919 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7920 buf[7], buf[8], buf[9]);
7923 device_printf(dev,"0x%08x:"
7924 " %02x %02x %02x %02x %02x %02x %02x %02x"
7925 " %02x %02x %02x\n", i,
7926 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7927 buf[7], buf[8], buf[9], buf[10]);
7930 device_printf(dev,"0x%08x:"
7931 " %02x %02x %02x %02x %02x %02x %02x %02x"
7932 " %02x %02x %02x %02x\n", i,
7933 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7934 buf[7], buf[8], buf[9], buf[10], buf[11]);
7937 device_printf(dev,"0x%08x:"
7938 " %02x %02x %02x %02x %02x %02x %02x %02x"
7939 " %02x %02x %02x %02x %02x\n", i,
7940 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7941 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7944 device_printf(dev,"0x%08x:"
7945 " %02x %02x %02x %02x %02x %02x %02x %02x"
7946 " %02x %02x %02x %02x %02x %02x\n", i,
7947 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7948 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7952 device_printf(dev,"0x%08x:"
7953 " %02x %02x %02x %02x %02x %02x %02x %02x"
7954 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7955 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7956 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7963 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7968 #ifdef CONFIG_ECORE_SRIOV
7971 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7973 struct ecore_public_vf_info *vf_info;
7975 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7980 /* Clear the VF mac */
7981 memset(vf_info->forced_mac, 0, ETH_ALEN);
7983 vf_info->forced_vlan = 0;
7989 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7991 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7996 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7997 struct ecore_filter_ucast *params)
7999 struct ecore_public_vf_info *vf;
8001 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
8002 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
8003 "VF[%d] vport not initialized\n", vfid);
8007 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
8011 /* No real decision to make; Store the configured MAC */
8012 if (params->type == ECORE_FILTER_MAC ||
8013 params->type == ECORE_FILTER_MAC_VLAN)
8014 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
8020 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
8022 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
8026 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
8027 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
8030 struct ecore_filter_accept_flags *flags;
8032 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
8033 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
8034 "VF[%d] vport not initialized\n", vfid);
8038 /* Untrusted VFs can't even be trusted to know that fact.
8039 * Simply indicate everything is configured fine, and trace
8040 * configuration 'behind their back'.
8042 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED;
8043 flags = ¶ms->accept_flags;
8044 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
8051 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
8053 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
8057 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
8060 struct ecore_dev *cdev;
8062 cdev = p_hwfn->p_dev;
8064 for (i = 0; i < cdev->num_hwfns; i++) {
8065 if (&cdev->hwfns[i] == p_hwfn)
8069 if (i >= cdev->num_hwfns)
8076 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
8078 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8081 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
8082 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
8084 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8087 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8089 atomic_testandset_32(&ha->sriov_task[i].flags,
8090 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
8092 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8093 &ha->sriov_task[i].pf_task);
8097 return (ECORE_SUCCESS);
8102 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
8104 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
8108 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
8110 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8113 if (!ha->sriov_initialized)
8116 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8117 ha, p_hwfn->p_dev, p_hwfn);
8119 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8123 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8125 atomic_testandset_32(&ha->sriov_task[i].flags,
8126 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
8128 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8129 &ha->sriov_task[i].pf_task);
8137 qlnx_vf_flr_update(void *p_hwfn)
8139 __qlnx_vf_flr_update(p_hwfn);
8147 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
8149 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8152 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8153 ha, p_hwfn->p_dev, p_hwfn);
8155 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8158 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
8159 ha, p_hwfn->p_dev, p_hwfn, i);
8161 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8163 atomic_testandset_32(&ha->sriov_task[i].flags,
8164 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
8166 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8167 &ha->sriov_task[i].pf_task);
8172 qlnx_initialize_sriov(qlnx_host_t *ha)
8175 nvlist_t *pf_schema, *vf_schema;
8180 pf_schema = pci_iov_schema_alloc_node();
8181 vf_schema = pci_iov_schema_alloc_node();
8183 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
8184 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
8185 IOV_SCHEMA_HASDEFAULT, FALSE);
8186 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
8187 IOV_SCHEMA_HASDEFAULT, FALSE);
8188 pci_iov_schema_add_uint16(vf_schema, "num-queues",
8189 IOV_SCHEMA_HASDEFAULT, 1);
8191 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
8193 if (iov_error != 0) {
8194 ha->sriov_initialized = 0;
8196 device_printf(dev, "SRIOV initialized\n");
8197 ha->sriov_initialized = 1;
8204 qlnx_sriov_disable(qlnx_host_t *ha)
8206 struct ecore_dev *cdev;
8211 ecore_iov_set_vfs_to_disable(cdev, true);
8214 for_each_hwfn(cdev, i) {
8216 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8217 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8220 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8223 /* Clean WFQ db and configure equal weight for all vports */
8224 ecore_clean_wfq_db(hwfn, ptt);
8226 ecore_for_each_vf(hwfn, j) {
8229 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8232 if (ecore_iov_is_vf_started(hwfn, j)) {
8233 /* Wait until VF is disabled before releasing */
8235 for (k = 0; k < 100; k++) {
8236 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8237 qlnx_mdelay(__func__, 10);
8244 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8248 "Timeout waiting for VF's FLR to end\n");
8251 ecore_ptt_release(hwfn, ptt);
8254 ecore_iov_set_vfs_to_disable(cdev, false);
8261 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8262 struct ecore_iov_vf_init_params *params)
8266 /* Since we have an equal resource distribution per-VF, and we assume
8267 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8268 * sequentially from there.
8270 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8272 params->rel_vf_id = vfid;
8274 for (i = 0; i < params->num_queues; i++) {
8275 params->req_rx_queue[i] = base + i;
8276 params->req_tx_queue[i] = base + i;
8279 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8280 params->vport_id = vfid + 1;
8281 params->rss_eng_id = vfid + 1;
8287 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8290 struct ecore_dev *cdev;
8291 struct ecore_iov_vf_init_params params;
8295 if ((ha = device_get_softc(dev)) == NULL) {
8296 device_printf(dev, "%s: cannot get softc\n", __func__);
8300 if (qlnx_create_pf_taskqueues(ha) != 0)
8301 goto qlnx_iov_init_err0;
8305 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8307 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8308 dev, num_vfs, max_vfs);
8310 if (num_vfs >= max_vfs) {
8311 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8312 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8313 goto qlnx_iov_init_err0;
8316 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8319 if (ha->vf_attr == NULL)
8320 goto qlnx_iov_init_err0;
8323 memset(¶ms, 0, sizeof(params));
8325 /* Initialize HW for VF access */
8326 for_each_hwfn(cdev, j) {
8327 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8328 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8330 /* Make sure not to use more than 16 queues per VF */
8331 params.num_queues = min_t(int,
8332 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8336 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8337 goto qlnx_iov_init_err1;
8340 for (i = 0; i < num_vfs; i++) {
8342 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8345 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8347 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8350 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8351 ecore_ptt_release(hwfn, ptt);
8352 goto qlnx_iov_init_err1;
8356 ecore_ptt_release(hwfn, ptt);
8359 ha->num_vfs = num_vfs;
8360 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8362 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8367 qlnx_sriov_disable(ha);
8370 qlnx_destroy_pf_taskqueues(ha);
8377 qlnx_iov_uninit(device_t dev)
8381 if ((ha = device_get_softc(dev)) == NULL) {
8382 device_printf(dev, "%s: cannot get softc\n", __func__);
8386 QL_DPRINT2(ha," dev = %p enter\n", dev);
8388 qlnx_sriov_disable(ha);
8389 qlnx_destroy_pf_taskqueues(ha);
8391 free(ha->vf_attr, M_QLNXBUF);
8396 QL_DPRINT2(ha," dev = %p exit\n", dev);
8401 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8404 qlnx_vf_attr_t *vf_attr;
8405 unsigned const char *mac;
8407 struct ecore_hwfn *p_hwfn;
8409 if ((ha = device_get_softc(dev)) == NULL) {
8410 device_printf(dev, "%s: cannot get softc\n", __func__);
8414 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8416 if (vfnum > (ha->num_vfs - 1)) {
8417 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8418 vfnum, (ha->num_vfs - 1));
8421 vf_attr = &ha->vf_attr[vfnum];
8423 if (nvlist_exists_binary(params, "mac-addr")) {
8424 mac = nvlist_get_binary(params, "mac-addr", &size);
8425 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8427 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8428 __func__, vf_attr->mac_addr[0],
8429 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8430 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8431 vf_attr->mac_addr[5]);
8432 p_hwfn = &ha->cdev.hwfns[0];
8433 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8437 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8442 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8444 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8445 struct ecore_ptt *ptt;
8448 ptt = ecore_ptt_acquire(p_hwfn);
8450 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8451 __qlnx_pf_vf_msg(p_hwfn, 0);
8455 ecore_iov_pf_get_pending_events(p_hwfn, events);
8457 QL_DPRINT2(ha, "Event mask of VF events:"
8458 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8459 events[0], events[1], events[2]);
8461 ecore_for_each_vf(p_hwfn, i) {
8463 /* Skip VFs with no pending messages */
8464 if (!(events[i / 64] & (1ULL << (i % 64))))
8468 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8469 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8471 /* Copy VF's message to PF's request buffer for that VF */
8472 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8475 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8478 ecore_ptt_release(p_hwfn, ptt);
8484 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8486 struct ecore_ptt *ptt;
8489 ptt = ecore_ptt_acquire(p_hwfn);
8492 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8493 __qlnx_vf_flr_update(p_hwfn);
8497 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8500 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8503 ecore_ptt_release(p_hwfn, ptt);
8509 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8511 struct ecore_ptt *ptt;
8514 ptt = ecore_ptt_acquire(p_hwfn);
8517 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8518 qlnx_vf_bulleting_update(p_hwfn);
8522 ecore_for_each_vf(p_hwfn, i) {
8523 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8525 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8528 ecore_ptt_release(p_hwfn, ptt);
8534 qlnx_pf_taskqueue(void *context, int pending)
8536 struct ecore_hwfn *p_hwfn;
8545 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8547 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8550 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8551 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8552 qlnx_handle_vf_msg(ha, p_hwfn);
8554 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8555 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8556 qlnx_handle_vf_flr_update(ha, p_hwfn);
8558 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8559 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8560 qlnx_handle_bulletin_update(ha, p_hwfn);
8566 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8569 uint8_t tq_name[32];
8571 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8573 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8575 bzero(tq_name, sizeof (tq_name));
8576 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8578 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8580 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8581 taskqueue_thread_enqueue,
8582 &ha->sriov_task[i].pf_taskqueue);
8584 if (ha->sriov_task[i].pf_taskqueue == NULL)
8587 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8588 PI_NET, "%s", tq_name);
8590 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8597 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8601 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8602 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8603 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8604 &ha->sriov_task[i].pf_task);
8605 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8606 ha->sriov_task[i].pf_taskqueue = NULL;
8613 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8615 struct ecore_mcp_link_capabilities caps;
8616 struct ecore_mcp_link_params params;
8617 struct ecore_mcp_link_state link;
8620 if (!p_hwfn->pf_iov_info)
8623 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8624 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8625 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8627 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8628 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8629 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8631 QL_DPRINT2(ha, "called\n");
8633 /* Update bulletin of all future possible VFs with link configuration */
8634 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8636 /* Modify link according to the VF's configured link state */
8638 link.link_up = false;
8641 link.link_up = true;
8642 /* Set speed according to maximum supported by HW.
8643 * that is 40G for regular devices and 100G for CMT
8646 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8647 100000 : link.speed;
8649 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8650 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8653 qlnx_vf_bulleting_update(p_hwfn);
8657 #endif /* #ifndef QLNX_VF */
8658 #endif /* #ifdef CONFIG_ECORE_SRIOV */