2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
39 #include "ecore_gtt_reg_addr.h"
41 #include "ecore_chain.h"
42 #include "ecore_status.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_init_ops.h"
46 #include "ecore_int.h"
47 #include "ecore_cxt.h"
48 #include "ecore_spq.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sp_commands.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_l2_api.h"
53 #include "ecore_mcp.h"
54 #include "ecore_hw_defs.h"
55 #include "mcp_public.h"
56 #include "ecore_iro.h"
58 #include "ecore_dev_api.h"
59 #include "ecore_dbg_fw_funcs.h"
60 #include "ecore_iov_api.h"
61 #include "ecore_vf_api.h"
63 #include "qlnx_ioctl.h"
67 #ifdef QLNX_ENABLE_IWARP
68 #include "qlnx_rdma.h"
69 #endif /* #ifdef QLNX_ENABLE_IWARP */
77 * ioctl related functions
79 static void qlnx_add_sysctls(qlnx_host_t *ha);
84 static void qlnx_release(qlnx_host_t *ha);
85 static void qlnx_fp_isr(void *arg);
86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
87 static void qlnx_init(void *arg);
88 static void qlnx_init_locked(qlnx_host_t *ha);
89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
90 static int qlnx_set_promisc(qlnx_host_t *ha);
91 static int qlnx_set_allmulti(qlnx_host_t *ha);
92 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93 static int qlnx_media_change(struct ifnet *ifp);
94 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void qlnx_stop(qlnx_host_t *ha);
96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
97 struct mbuf **m_headp);
98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
99 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
100 struct qlnx_link_output *if_link);
101 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
102 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
104 static void qlnx_qflush(struct ifnet *ifp);
106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
116 static int qlnx_nic_setup(struct ecore_dev *cdev,
117 struct ecore_pf_params *func_params);
118 static int qlnx_nic_start(struct ecore_dev *cdev);
119 static int qlnx_slowpath_start(qlnx_host_t *ha);
120 static int qlnx_slowpath_stop(qlnx_host_t *ha);
121 static int qlnx_init_hw(qlnx_host_t *ha);
122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
123 char ver_str[VER_SIZE]);
124 static void qlnx_unload(qlnx_host_t *ha);
125 static int qlnx_load(qlnx_host_t *ha);
126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
133 struct qlnx_rx_queue *rxq);
134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
139 static void qlnx_timer(void *arg);
140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
142 static void qlnx_trigger_dump(qlnx_host_t *ha);
143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
144 struct qlnx_tx_queue *txq);
145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 struct qlnx_tx_queue *txq);
147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
149 static void qlnx_fp_taskqueue(void *context, int pending);
150 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
152 struct qlnx_agg_info *tpa);
153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
155 #if __FreeBSD_version >= 1100000
156 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
160 * Hooks to the Operating Systems
162 static int qlnx_pci_probe (device_t);
163 static int qlnx_pci_attach (device_t);
164 static int qlnx_pci_detach (device_t);
168 #ifdef CONFIG_ECORE_SRIOV
170 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
171 static void qlnx_iov_uninit(device_t dev);
172 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
173 static void qlnx_initialize_sriov(qlnx_host_t *ha);
174 static void qlnx_pf_taskqueue(void *context, int pending);
175 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
176 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
179 #endif /* #ifdef CONFIG_ECORE_SRIOV */
181 static device_method_t qlnx_pci_methods[] = {
182 /* Device interface */
183 DEVMETHOD(device_probe, qlnx_pci_probe),
184 DEVMETHOD(device_attach, qlnx_pci_attach),
185 DEVMETHOD(device_detach, qlnx_pci_detach),
187 #ifdef CONFIG_ECORE_SRIOV
188 DEVMETHOD(pci_iov_init, qlnx_iov_init),
189 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
190 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
191 #endif /* #ifdef CONFIG_ECORE_SRIOV */
195 static driver_t qlnx_pci_driver = {
196 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
199 static devclass_t qlnx_devclass;
201 MODULE_VERSION(if_qlnxe,1);
202 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
204 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
205 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
209 static device_method_t qlnxv_pci_methods[] = {
210 /* Device interface */
211 DEVMETHOD(device_probe, qlnx_pci_probe),
212 DEVMETHOD(device_attach, qlnx_pci_attach),
213 DEVMETHOD(device_detach, qlnx_pci_detach),
217 static driver_t qlnxv_pci_driver = {
218 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
221 static devclass_t qlnxv_devclass;
222 MODULE_VERSION(if_qlnxev,1);
223 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0);
225 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
226 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
228 #endif /* #ifdef QLNX_VF */
230 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
232 char qlnx_dev_str[128];
233 char qlnx_ver_str[VER_SIZE];
234 char qlnx_name_str[NAME_SIZE];
237 * Some PCI Configuration Space Related Defines
240 #ifndef PCI_VENDOR_QLOGIC
241 #define PCI_VENDOR_QLOGIC 0x1077
244 /* 40G Adapter QLE45xxx*/
245 #ifndef QLOGIC_PCI_DEVICE_ID_1634
246 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
249 /* 100G Adapter QLE45xxx*/
250 #ifndef QLOGIC_PCI_DEVICE_ID_1644
251 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
254 /* 25G Adapter QLE45xxx*/
255 #ifndef QLOGIC_PCI_DEVICE_ID_1656
256 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
259 /* 50G Adapter QLE45xxx*/
260 #ifndef QLOGIC_PCI_DEVICE_ID_1654
261 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
264 /* 10G/25G/40G Adapter QLE41xxx*/
265 #ifndef QLOGIC_PCI_DEVICE_ID_8070
266 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
269 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
270 #ifndef QLOGIC_PCI_DEVICE_ID_8090
271 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
274 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
275 "qlnxe driver parameters");
277 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
278 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
280 #if __FreeBSD_version < 1100000
282 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
286 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
287 &qlnxe_queue_count, 0, "Multi-Queue queue count");
290 * Note on RDMA personality setting
292 * Read the personality configured in NVRAM
293 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
294 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
295 * use the personality in NVRAM.
297 * Otherwise use t the personality configured in sysctl.
300 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
301 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
302 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
303 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
304 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
305 #define QLNX_PERSONALIY_MASK 0xF
307 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
308 static uint64_t qlnxe_rdma_configuration = 0x22222222;
310 #if __FreeBSD_version < 1100000
312 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
314 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
315 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
319 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
320 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
322 #endif /* #if __FreeBSD_version < 1100000 */
325 qlnx_vf_device(qlnx_host_t *ha)
329 device_id = ha->device_id;
331 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
338 qlnx_valid_device(qlnx_host_t *ha)
342 device_id = ha->device_id;
345 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
346 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
347 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
348 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
349 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
352 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
355 #endif /* #ifndef QLNX_VF */
359 #ifdef QLNX_ENABLE_IWARP
361 qlnx_rdma_supported(struct qlnx_host *ha)
365 device_id = pci_get_device(ha->pci_dev);
367 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
368 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
369 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
370 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
375 #endif /* #ifdef QLNX_ENABLE_IWARP */
378 * Name: qlnx_pci_probe
379 * Function: Validate the PCI device to be a QLA80XX device
382 qlnx_pci_probe(device_t dev)
384 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
385 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
386 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
388 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
392 switch (pci_get_device(dev)) {
395 case QLOGIC_PCI_DEVICE_ID_1644:
396 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
397 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
398 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
400 device_set_desc_copy(dev, qlnx_dev_str);
404 case QLOGIC_PCI_DEVICE_ID_1634:
405 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
406 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
407 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
409 device_set_desc_copy(dev, qlnx_dev_str);
413 case QLOGIC_PCI_DEVICE_ID_1656:
414 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
415 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
416 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
418 device_set_desc_copy(dev, qlnx_dev_str);
422 case QLOGIC_PCI_DEVICE_ID_1654:
423 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
424 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
425 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
427 device_set_desc_copy(dev, qlnx_dev_str);
431 case QLOGIC_PCI_DEVICE_ID_8070:
432 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
433 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
434 " Adapter-Ethernet Function",
435 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
437 device_set_desc_copy(dev, qlnx_dev_str);
442 case QLOGIC_PCI_DEVICE_ID_8090:
443 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
444 "Qlogic SRIOV PCI CNA (AH) "
445 "Adapter-Ethernet Function",
446 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
448 device_set_desc_copy(dev, qlnx_dev_str);
452 #endif /* #ifndef QLNX_VF */
458 #ifdef QLNX_ENABLE_IWARP
460 #endif /* #ifdef QLNX_ENABLE_IWARP */
462 return (BUS_PROBE_DEFAULT);
466 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
467 struct qlnx_tx_queue *txq)
473 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
475 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
476 if (hw_bd_cons < ecore_cons_idx) {
477 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
479 diff = hw_bd_cons - ecore_cons_idx;
485 qlnx_sp_intr(void *arg)
487 struct ecore_hwfn *p_hwfn;
493 if (p_hwfn == NULL) {
494 printf("%s: spurious slowpath intr\n", __func__);
498 ha = (qlnx_host_t *)p_hwfn->p_dev;
500 QL_DPRINT2(ha, "enter\n");
502 for (i = 0; i < ha->cdev.num_hwfns; i++) {
503 if (&ha->cdev.hwfns[i] == p_hwfn) {
504 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
508 QL_DPRINT2(ha, "exit\n");
514 qlnx_sp_taskqueue(void *context, int pending)
516 struct ecore_hwfn *p_hwfn;
520 if (p_hwfn != NULL) {
527 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
532 for (i = 0; i < ha->cdev.num_hwfns; i++) {
533 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
535 bzero(tq_name, sizeof (tq_name));
536 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
538 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
540 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
541 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
543 if (ha->sp_taskqueue[i] == NULL)
546 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
549 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
556 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
560 for (i = 0; i < ha->cdev.num_hwfns; i++) {
561 if (ha->sp_taskqueue[i] != NULL) {
562 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
563 taskqueue_free(ha->sp_taskqueue[i]);
570 qlnx_fp_taskqueue(void *context, int pending)
572 struct qlnx_fastpath *fp;
581 ha = (qlnx_host_t *)fp->edev;
585 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
586 if (!drbr_empty(ifp, fp->tx_br)) {
587 if(mtx_trylock(&fp->tx_mtx)) {
588 #ifdef QLNX_TRACE_PERF_DATA
589 tx_pkts = fp->tx_pkts_transmitted;
590 tx_compl = fp->tx_pkts_completed;
593 qlnx_transmit_locked(ifp, fp, NULL);
595 #ifdef QLNX_TRACE_PERF_DATA
596 fp->tx_pkts_trans_fp +=
597 (fp->tx_pkts_transmitted - tx_pkts);
598 fp->tx_pkts_compl_fp +=
599 (fp->tx_pkts_completed - tx_compl);
601 mtx_unlock(&fp->tx_mtx);
606 QL_DPRINT2(ha, "exit \n");
611 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
615 struct qlnx_fastpath *fp;
617 for (i = 0; i < ha->num_rss; i++) {
618 fp = &ha->fp_array[i];
620 bzero(tq_name, sizeof (tq_name));
621 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
623 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
625 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
626 taskqueue_thread_enqueue,
629 if (fp->fp_taskqueue == NULL)
632 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
635 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
642 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
645 struct qlnx_fastpath *fp;
647 for (i = 0; i < ha->num_rss; i++) {
648 fp = &ha->fp_array[i];
650 if (fp->fp_taskqueue != NULL) {
651 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
652 taskqueue_free(fp->fp_taskqueue);
653 fp->fp_taskqueue = NULL;
660 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
663 struct qlnx_fastpath *fp;
665 for (i = 0; i < ha->num_rss; i++) {
666 fp = &ha->fp_array[i];
668 if (fp->fp_taskqueue != NULL) {
670 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
678 qlnx_get_params(qlnx_host_t *ha)
680 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
681 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
683 qlnxe_queue_count = 0;
689 qlnx_error_recovery_taskqueue(void *context, int pending)
695 QL_DPRINT2(ha, "enter\n");
701 #ifdef QLNX_ENABLE_IWARP
702 qlnx_rdma_dev_remove(ha);
703 #endif /* #ifdef QLNX_ENABLE_IWARP */
705 qlnx_slowpath_stop(ha);
706 qlnx_slowpath_start(ha);
708 #ifdef QLNX_ENABLE_IWARP
709 qlnx_rdma_dev_add(ha);
710 #endif /* #ifdef QLNX_ENABLE_IWARP */
714 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
716 QL_DPRINT2(ha, "exit\n");
722 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
726 bzero(tq_name, sizeof (tq_name));
727 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
729 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
731 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
732 taskqueue_thread_enqueue, &ha->err_taskqueue);
734 if (ha->err_taskqueue == NULL)
737 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
739 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
745 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
747 if (ha->err_taskqueue != NULL) {
748 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
749 taskqueue_free(ha->err_taskqueue);
752 ha->err_taskqueue = NULL;
758 * Name: qlnx_pci_attach
759 * Function: attaches the device to the operating system
762 qlnx_pci_attach(device_t dev)
764 qlnx_host_t *ha = NULL;
765 uint32_t rsrc_len_reg = 0;
766 uint32_t rsrc_len_dbells = 0;
767 uint32_t rsrc_len_msix = 0;
770 uint32_t num_sp_msix = 0;
771 uint32_t num_rdma_irqs = 0;
773 if ((ha = device_get_softc(dev)) == NULL) {
774 device_printf(dev, "cannot get softc\n");
778 memset(ha, 0, sizeof (qlnx_host_t));
780 ha->device_id = pci_get_device(dev);
782 if (qlnx_valid_device(ha) != 0) {
783 device_printf(dev, "device is not valid device\n");
786 ha->pci_func = pci_get_function(dev);
790 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
792 ha->flags.lock_init = 1;
794 pci_enable_busmaster(dev);
800 ha->reg_rid = PCIR_BAR(0);
801 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
804 if (ha->pci_reg == NULL) {
805 device_printf(dev, "unable to map BAR0\n");
806 goto qlnx_pci_attach_err;
809 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
812 ha->dbells_rid = PCIR_BAR(2);
813 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
816 if (rsrc_len_dbells) {
817 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
818 &ha->dbells_rid, RF_ACTIVE);
820 if (ha->pci_dbells == NULL) {
821 device_printf(dev, "unable to map BAR1\n");
822 goto qlnx_pci_attach_err;
824 ha->dbells_phys_addr = (uint64_t)
825 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
827 ha->dbells_size = rsrc_len_dbells;
829 if (qlnx_vf_device(ha) != 0) {
830 device_printf(dev, " BAR1 size is zero\n");
831 goto qlnx_pci_attach_err;
835 ha->msix_rid = PCIR_BAR(4);
836 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
837 &ha->msix_rid, RF_ACTIVE);
839 if (ha->msix_bar == NULL) {
840 device_printf(dev, "unable to map BAR2\n");
841 goto qlnx_pci_attach_err;
844 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
847 ha->dbg_level = 0x0000;
849 QL_DPRINT1(ha, "\n\t\t\t"
850 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
851 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
852 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
853 " msix_avail = 0x%x "
854 "\n\t\t\t[ncpus = %d]\n",
855 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
856 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
857 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
863 if (qlnx_alloc_parent_dma_tag(ha))
864 goto qlnx_pci_attach_err;
866 if (qlnx_alloc_tx_dma_tag(ha))
867 goto qlnx_pci_attach_err;
869 if (qlnx_alloc_rx_dma_tag(ha))
870 goto qlnx_pci_attach_err;
873 if (qlnx_init_hw(ha) != 0)
874 goto qlnx_pci_attach_err;
876 ha->flags.hw_init = 1;
880 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
881 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
882 qlnxe_queue_count = QLNX_MAX_RSS;
886 * Allocate MSI-x vectors
888 if (qlnx_vf_device(ha) != 0) {
889 if (qlnxe_queue_count == 0)
890 ha->num_rss = QLNX_DEFAULT_RSS;
892 ha->num_rss = qlnxe_queue_count;
894 num_sp_msix = ha->cdev.num_hwfns;
899 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
900 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
902 if (max_rxq < max_txq)
903 ha->num_rss = max_rxq;
905 ha->num_rss = max_txq;
907 if (ha->num_rss > QLNX_MAX_VF_RSS)
908 ha->num_rss = QLNX_MAX_VF_RSS;
913 if (ha->num_rss > mp_ncpus)
914 ha->num_rss = mp_ncpus;
916 ha->num_tc = QLNX_MAX_TC;
918 ha->msix_count = pci_msix_count(dev);
920 #ifdef QLNX_ENABLE_IWARP
922 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
924 #endif /* #ifdef QLNX_ENABLE_IWARP */
926 if (!ha->msix_count ||
927 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
928 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
930 goto qlnx_pci_attach_err;
933 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
934 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
936 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
938 QL_DPRINT1(ha, "\n\t\t\t"
939 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
940 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
941 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
942 " msix_avail = 0x%x msix_alloc = 0x%x"
943 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
944 ha->pci_reg, rsrc_len_reg,
945 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
946 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
947 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
949 if (pci_alloc_msix(dev, &ha->msix_count)) {
950 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
953 goto qlnx_pci_attach_err;
957 * Initialize slow path interrupt and task queue
961 if (qlnx_create_sp_taskqueues(ha) != 0)
962 goto qlnx_pci_attach_err;
964 for (i = 0; i < ha->cdev.num_hwfns; i++) {
965 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
967 ha->sp_irq_rid[i] = i + 1;
968 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
970 (RF_ACTIVE | RF_SHAREABLE));
971 if (ha->sp_irq[i] == NULL) {
973 "could not allocate mbx interrupt\n");
974 goto qlnx_pci_attach_err;
977 if (bus_setup_intr(dev, ha->sp_irq[i],
978 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
979 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
981 "could not setup slow path interrupt\n");
982 goto qlnx_pci_attach_err;
985 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
986 " sp_irq %p sp_handle %p\n", p_hwfn,
987 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
992 * initialize fast path interrupt
994 if (qlnx_create_fp_taskqueues(ha) != 0)
995 goto qlnx_pci_attach_err;
997 for (i = 0; i < ha->num_rss; i++) {
998 ha->irq_vec[i].rss_idx = i;
999 ha->irq_vec[i].ha = ha;
1000 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
1002 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1003 &ha->irq_vec[i].irq_rid,
1004 (RF_ACTIVE | RF_SHAREABLE));
1006 if (ha->irq_vec[i].irq == NULL) {
1008 "could not allocate interrupt[%d] irq_rid = %d\n",
1009 i, ha->irq_vec[i].irq_rid);
1010 goto qlnx_pci_attach_err;
1013 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1014 device_printf(dev, "could not allocate tx_br[%d]\n", i);
1015 goto qlnx_pci_attach_err;
1019 if (qlnx_vf_device(ha) != 0) {
1020 callout_init(&ha->qlnx_callout, 1);
1021 ha->flags.callout_init = 1;
1023 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1024 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1025 goto qlnx_pci_attach_err;
1026 if (ha->grcdump_size[i] == 0)
1027 goto qlnx_pci_attach_err;
1029 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1030 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1031 i, ha->grcdump_size[i]);
1033 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1034 if (ha->grcdump[i] == NULL) {
1035 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1036 goto qlnx_pci_attach_err;
1039 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1040 goto qlnx_pci_attach_err;
1041 if (ha->idle_chk_size[i] == 0)
1042 goto qlnx_pci_attach_err;
1044 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1045 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1046 i, ha->idle_chk_size[i]);
1048 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1050 if (ha->idle_chk[i] == NULL) {
1051 device_printf(dev, "idle_chk alloc failed\n");
1052 goto qlnx_pci_attach_err;
1056 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1057 goto qlnx_pci_attach_err;
1060 if (qlnx_slowpath_start(ha) != 0)
1061 goto qlnx_pci_attach_err;
1063 ha->flags.slowpath_start = 1;
1065 if (qlnx_vf_device(ha) != 0) {
1066 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1067 qlnx_mdelay(__func__, 1000);
1068 qlnx_trigger_dump(ha);
1070 goto qlnx_pci_attach_err0;
1073 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1074 qlnx_mdelay(__func__, 1000);
1075 qlnx_trigger_dump(ha);
1077 goto qlnx_pci_attach_err0;
1080 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1081 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1084 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1085 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1086 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1087 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1088 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1089 FW_ENGINEERING_VERSION);
1091 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1092 ha->stormfw_ver, ha->mfw_ver);
1094 qlnx_init_ifnet(dev, ha);
1099 qlnx_add_sysctls(ha);
1101 qlnx_pci_attach_err0:
1103 * create ioctl device interface
1105 if (qlnx_vf_device(ha) != 0) {
1106 if (qlnx_make_cdev(ha)) {
1107 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1108 goto qlnx_pci_attach_err;
1111 #ifdef QLNX_ENABLE_IWARP
1112 qlnx_rdma_dev_add(ha);
1113 #endif /* #ifdef QLNX_ENABLE_IWARP */
1117 #ifdef CONFIG_ECORE_SRIOV
1119 if (qlnx_vf_device(ha) != 0)
1120 qlnx_initialize_sriov(ha);
1122 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1123 #endif /* #ifdef QLNX_VF */
1125 QL_DPRINT2(ha, "success\n");
1129 qlnx_pci_attach_err:
1137 * Name: qlnx_pci_detach
1138 * Function: Unhooks the device from the operating system
1141 qlnx_pci_detach(device_t dev)
1143 qlnx_host_t *ha = NULL;
1145 if ((ha = device_get_softc(dev)) == NULL) {
1146 device_printf(dev, "%s: cannot get softc\n", __func__);
1150 if (qlnx_vf_device(ha) != 0) {
1151 #ifdef CONFIG_ECORE_SRIOV
1154 ret = pci_iov_detach(dev);
1156 device_printf(dev, "%s: SRIOV in use\n", __func__);
1160 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1162 #ifdef QLNX_ENABLE_IWARP
1163 if (qlnx_rdma_dev_remove(ha) != 0)
1165 #endif /* #ifdef QLNX_ENABLE_IWARP */
1177 #ifdef QLNX_ENABLE_IWARP
1180 qlnx_get_personality(uint8_t pci_func)
1182 uint8_t personality;
1184 personality = (qlnxe_rdma_configuration >>
1185 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1186 QLNX_PERSONALIY_MASK;
1187 return (personality);
1191 qlnx_set_personality(qlnx_host_t *ha)
1193 struct ecore_hwfn *p_hwfn;
1194 uint8_t personality;
1196 p_hwfn = &ha->cdev.hwfns[0];
1198 personality = qlnx_get_personality(ha->pci_func);
1200 switch (personality) {
1201 case QLNX_PERSONALITY_DEFAULT:
1202 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1204 ha->personality = ECORE_PCI_DEFAULT;
1207 case QLNX_PERSONALITY_ETH_ONLY:
1208 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1210 ha->personality = ECORE_PCI_ETH;
1213 case QLNX_PERSONALITY_ETH_IWARP:
1214 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1216 ha->personality = ECORE_PCI_ETH_IWARP;
1219 case QLNX_PERSONALITY_ETH_ROCE:
1220 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1222 ha->personality = ECORE_PCI_ETH_ROCE;
1229 #endif /* #ifdef QLNX_ENABLE_IWARP */
1232 qlnx_init_hw(qlnx_host_t *ha)
1235 struct ecore_hw_prepare_params params;
1237 ecore_init_struct(&ha->cdev);
1239 /* ha->dp_module = ECORE_MSG_PROBE |
1245 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1246 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1247 ha->dp_level = ECORE_LEVEL_NOTICE;
1248 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1250 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1252 ha->cdev.regview = ha->pci_reg;
1254 ha->personality = ECORE_PCI_DEFAULT;
1256 if (qlnx_vf_device(ha) == 0) {
1257 ha->cdev.b_is_vf = true;
1259 if (ha->pci_dbells != NULL) {
1260 ha->cdev.doorbells = ha->pci_dbells;
1261 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1262 ha->cdev.db_size = ha->dbells_size;
1264 ha->pci_dbells = ha->pci_reg;
1267 ha->cdev.doorbells = ha->pci_dbells;
1268 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1269 ha->cdev.db_size = ha->dbells_size;
1271 #ifdef QLNX_ENABLE_IWARP
1273 if (qlnx_rdma_supported(ha) == 0)
1274 qlnx_set_personality(ha);
1276 #endif /* #ifdef QLNX_ENABLE_IWARP */
1278 QL_DPRINT2(ha, "%s: %s\n", __func__,
1279 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1281 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1283 params.personality = ha->personality;
1285 params.drv_resc_alloc = false;
1286 params.chk_reg_fifo = false;
1287 params.initiate_pf_flr = true;
1290 ecore_hw_prepare(&ha->cdev, ¶ms);
1292 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1294 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1295 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1301 qlnx_release(qlnx_host_t *ha)
1308 QL_DPRINT2(ha, "enter\n");
1310 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1311 if (ha->idle_chk[i] != NULL) {
1312 free(ha->idle_chk[i], M_QLNXBUF);
1313 ha->idle_chk[i] = NULL;
1316 if (ha->grcdump[i] != NULL) {
1317 free(ha->grcdump[i], M_QLNXBUF);
1318 ha->grcdump[i] = NULL;
1322 if (ha->flags.callout_init)
1323 callout_drain(&ha->qlnx_callout);
1325 if (ha->flags.slowpath_start) {
1326 qlnx_slowpath_stop(ha);
1329 if (ha->flags.hw_init)
1330 ecore_hw_remove(&ha->cdev);
1334 if (ha->ifp != NULL)
1335 ether_ifdetach(ha->ifp);
1337 qlnx_free_tx_dma_tag(ha);
1339 qlnx_free_rx_dma_tag(ha);
1341 qlnx_free_parent_dma_tag(ha);
1343 if (qlnx_vf_device(ha) != 0) {
1344 qlnx_destroy_error_recovery_taskqueue(ha);
1347 for (i = 0; i < ha->num_rss; i++) {
1348 struct qlnx_fastpath *fp = &ha->fp_array[i];
1350 if (ha->irq_vec[i].handle) {
1351 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1352 ha->irq_vec[i].handle);
1355 if (ha->irq_vec[i].irq) {
1356 (void)bus_release_resource(dev, SYS_RES_IRQ,
1357 ha->irq_vec[i].irq_rid,
1358 ha->irq_vec[i].irq);
1361 qlnx_free_tx_br(ha, fp);
1363 qlnx_destroy_fp_taskqueues(ha);
1365 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1366 if (ha->sp_handle[i])
1367 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1371 (void) bus_release_resource(dev, SYS_RES_IRQ,
1372 ha->sp_irq_rid[i], ha->sp_irq[i]);
1375 qlnx_destroy_sp_taskqueues(ha);
1378 pci_release_msi(dev);
1380 if (ha->flags.lock_init) {
1381 mtx_destroy(&ha->hw_lock);
1385 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1388 if (ha->dbells_size && ha->pci_dbells)
1389 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1393 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1396 QL_DPRINT2(ha, "exit\n");
1401 qlnx_trigger_dump(qlnx_host_t *ha)
1405 if (ha->ifp != NULL)
1406 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1408 QL_DPRINT2(ha, "enter\n");
1410 if (qlnx_vf_device(ha) == 0)
1413 ha->error_recovery = 1;
1415 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1416 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1417 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1420 QL_DPRINT2(ha, "exit\n");
1426 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1431 err = sysctl_handle_int(oidp, &ret, 0, req);
1433 if (err || !req->newptr)
1437 ha = (qlnx_host_t *)arg1;
1438 qlnx_trigger_dump(ha);
1444 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1446 int err, i, ret = 0, usecs = 0;
1448 struct ecore_hwfn *p_hwfn;
1449 struct qlnx_fastpath *fp;
1451 err = sysctl_handle_int(oidp, &usecs, 0, req);
1453 if (err || !req->newptr || !usecs || (usecs > 255))
1456 ha = (qlnx_host_t *)arg1;
1458 if (qlnx_vf_device(ha) == 0)
1461 for (i = 0; i < ha->num_rss; i++) {
1462 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1464 fp = &ha->fp_array[i];
1466 if (fp->txq[0]->handle != NULL) {
1467 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1468 (uint16_t)usecs, fp->txq[0]->handle);
1473 ha->tx_coalesce_usecs = (uint8_t)usecs;
1479 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1481 int err, i, ret = 0, usecs = 0;
1483 struct ecore_hwfn *p_hwfn;
1484 struct qlnx_fastpath *fp;
1486 err = sysctl_handle_int(oidp, &usecs, 0, req);
1488 if (err || !req->newptr || !usecs || (usecs > 255))
1491 ha = (qlnx_host_t *)arg1;
1493 if (qlnx_vf_device(ha) == 0)
1496 for (i = 0; i < ha->num_rss; i++) {
1497 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1499 fp = &ha->fp_array[i];
1501 if (fp->rxq->handle != NULL) {
1502 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1503 0, fp->rxq->handle);
1508 ha->rx_coalesce_usecs = (uint8_t)usecs;
1514 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1516 struct sysctl_ctx_list *ctx;
1517 struct sysctl_oid_list *children;
1518 struct sysctl_oid *ctx_oid;
1520 ctx = device_get_sysctl_ctx(ha->pci_dev);
1521 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1523 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1524 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1525 children = SYSCTL_CHILDREN(ctx_oid);
1527 SYSCTL_ADD_QUAD(ctx, children,
1528 OID_AUTO, "sp_interrupts",
1529 CTLFLAG_RD, &ha->sp_interrupts,
1530 "No. of slowpath interrupts");
1536 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1538 struct sysctl_ctx_list *ctx;
1539 struct sysctl_oid_list *children;
1540 struct sysctl_oid_list *node_children;
1541 struct sysctl_oid *ctx_oid;
1543 uint8_t name_str[16];
1545 ctx = device_get_sysctl_ctx(ha->pci_dev);
1546 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1548 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1549 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1550 children = SYSCTL_CHILDREN(ctx_oid);
1552 for (i = 0; i < ha->num_rss; i++) {
1553 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1554 snprintf(name_str, sizeof(name_str), "%d", i);
1556 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1557 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1558 node_children = SYSCTL_CHILDREN(ctx_oid);
1562 SYSCTL_ADD_QUAD(ctx, node_children,
1563 OID_AUTO, "tx_pkts_processed",
1564 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1565 "No. of packets processed for transmission");
1567 SYSCTL_ADD_QUAD(ctx, node_children,
1568 OID_AUTO, "tx_pkts_freed",
1569 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1570 "No. of freed packets");
1572 SYSCTL_ADD_QUAD(ctx, node_children,
1573 OID_AUTO, "tx_pkts_transmitted",
1574 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1575 "No. of transmitted packets");
1577 SYSCTL_ADD_QUAD(ctx, node_children,
1578 OID_AUTO, "tx_pkts_completed",
1579 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1580 "No. of transmit completions");
1582 SYSCTL_ADD_QUAD(ctx, node_children,
1583 OID_AUTO, "tx_non_tso_pkts",
1584 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1585 "No. of non LSO transmited packets");
1587 #ifdef QLNX_TRACE_PERF_DATA
1589 SYSCTL_ADD_QUAD(ctx, node_children,
1590 OID_AUTO, "tx_pkts_trans_ctx",
1591 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1592 "No. of transmitted packets in transmit context");
1594 SYSCTL_ADD_QUAD(ctx, node_children,
1595 OID_AUTO, "tx_pkts_compl_ctx",
1596 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1597 "No. of transmit completions in transmit context");
1599 SYSCTL_ADD_QUAD(ctx, node_children,
1600 OID_AUTO, "tx_pkts_trans_fp",
1601 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1602 "No. of transmitted packets in taskqueue");
1604 SYSCTL_ADD_QUAD(ctx, node_children,
1605 OID_AUTO, "tx_pkts_compl_fp",
1606 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1607 "No. of transmit completions in taskqueue");
1609 SYSCTL_ADD_QUAD(ctx, node_children,
1610 OID_AUTO, "tx_pkts_compl_intr",
1611 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1612 "No. of transmit completions in interrupt ctx");
1615 SYSCTL_ADD_QUAD(ctx, node_children,
1616 OID_AUTO, "tx_tso_pkts",
1617 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1618 "No. of LSO transmited packets");
1620 SYSCTL_ADD_QUAD(ctx, node_children,
1621 OID_AUTO, "tx_lso_wnd_min_len",
1622 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1623 "tx_lso_wnd_min_len");
1625 SYSCTL_ADD_QUAD(ctx, node_children,
1626 OID_AUTO, "tx_defrag",
1627 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1630 SYSCTL_ADD_QUAD(ctx, node_children,
1631 OID_AUTO, "tx_nsegs_gt_elem_left",
1632 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1633 "tx_nsegs_gt_elem_left");
1635 SYSCTL_ADD_UINT(ctx, node_children,
1636 OID_AUTO, "tx_tso_max_nsegs",
1637 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1638 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1640 SYSCTL_ADD_UINT(ctx, node_children,
1641 OID_AUTO, "tx_tso_min_nsegs",
1642 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1643 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1645 SYSCTL_ADD_UINT(ctx, node_children,
1646 OID_AUTO, "tx_tso_max_pkt_len",
1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1648 ha->fp_array[i].tx_tso_max_pkt_len,
1649 "tx_tso_max_pkt_len");
1651 SYSCTL_ADD_UINT(ctx, node_children,
1652 OID_AUTO, "tx_tso_min_pkt_len",
1653 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1654 ha->fp_array[i].tx_tso_min_pkt_len,
1655 "tx_tso_min_pkt_len");
1657 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1658 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1659 snprintf(name_str, sizeof(name_str),
1660 "tx_pkts_nseg_%02d", (j+1));
1662 SYSCTL_ADD_QUAD(ctx, node_children,
1663 OID_AUTO, name_str, CTLFLAG_RD,
1664 &ha->fp_array[i].tx_pkts[j], name_str);
1667 #ifdef QLNX_TRACE_PERF_DATA
1668 for (j = 0; j < 18; j++) {
1669 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1670 snprintf(name_str, sizeof(name_str),
1671 "tx_pkts_hist_%02d", (j+1));
1673 SYSCTL_ADD_QUAD(ctx, node_children,
1674 OID_AUTO, name_str, CTLFLAG_RD,
1675 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1677 for (j = 0; j < 5; j++) {
1678 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1679 snprintf(name_str, sizeof(name_str),
1680 "tx_comInt_%02d", (j+1));
1682 SYSCTL_ADD_QUAD(ctx, node_children,
1683 OID_AUTO, name_str, CTLFLAG_RD,
1684 &ha->fp_array[i].tx_comInt[j], name_str);
1686 for (j = 0; j < 18; j++) {
1687 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1688 snprintf(name_str, sizeof(name_str),
1689 "tx_pkts_q_%02d", (j+1));
1691 SYSCTL_ADD_QUAD(ctx, node_children,
1692 OID_AUTO, name_str, CTLFLAG_RD,
1693 &ha->fp_array[i].tx_pkts_q[j], name_str);
1697 SYSCTL_ADD_QUAD(ctx, node_children,
1698 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1699 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1700 "err_tx_nsegs_gt_elem_left");
1702 SYSCTL_ADD_QUAD(ctx, node_children,
1703 OID_AUTO, "err_tx_dmamap_create",
1704 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1705 "err_tx_dmamap_create");
1707 SYSCTL_ADD_QUAD(ctx, node_children,
1708 OID_AUTO, "err_tx_defrag_dmamap_load",
1709 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1710 "err_tx_defrag_dmamap_load");
1712 SYSCTL_ADD_QUAD(ctx, node_children,
1713 OID_AUTO, "err_tx_non_tso_max_seg",
1714 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1715 "err_tx_non_tso_max_seg");
1717 SYSCTL_ADD_QUAD(ctx, node_children,
1718 OID_AUTO, "err_tx_dmamap_load",
1719 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1720 "err_tx_dmamap_load");
1722 SYSCTL_ADD_QUAD(ctx, node_children,
1723 OID_AUTO, "err_tx_defrag",
1724 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1727 SYSCTL_ADD_QUAD(ctx, node_children,
1728 OID_AUTO, "err_tx_free_pkt_null",
1729 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1730 "err_tx_free_pkt_null");
1732 SYSCTL_ADD_QUAD(ctx, node_children,
1733 OID_AUTO, "err_tx_cons_idx_conflict",
1734 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1735 "err_tx_cons_idx_conflict");
1737 SYSCTL_ADD_QUAD(ctx, node_children,
1738 OID_AUTO, "lro_cnt_64",
1739 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1742 SYSCTL_ADD_QUAD(ctx, node_children,
1743 OID_AUTO, "lro_cnt_128",
1744 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1747 SYSCTL_ADD_QUAD(ctx, node_children,
1748 OID_AUTO, "lro_cnt_256",
1749 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1752 SYSCTL_ADD_QUAD(ctx, node_children,
1753 OID_AUTO, "lro_cnt_512",
1754 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1757 SYSCTL_ADD_QUAD(ctx, node_children,
1758 OID_AUTO, "lro_cnt_1024",
1759 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1764 SYSCTL_ADD_QUAD(ctx, node_children,
1765 OID_AUTO, "rx_pkts",
1766 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1767 "No. of received packets");
1769 SYSCTL_ADD_QUAD(ctx, node_children,
1770 OID_AUTO, "tpa_start",
1771 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1772 "No. of tpa_start packets");
1774 SYSCTL_ADD_QUAD(ctx, node_children,
1775 OID_AUTO, "tpa_cont",
1776 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1777 "No. of tpa_cont packets");
1779 SYSCTL_ADD_QUAD(ctx, node_children,
1780 OID_AUTO, "tpa_end",
1781 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1782 "No. of tpa_end packets");
1784 SYSCTL_ADD_QUAD(ctx, node_children,
1785 OID_AUTO, "err_m_getcl",
1786 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1789 SYSCTL_ADD_QUAD(ctx, node_children,
1790 OID_AUTO, "err_m_getjcl",
1791 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1794 SYSCTL_ADD_QUAD(ctx, node_children,
1795 OID_AUTO, "err_rx_hw_errors",
1796 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1797 "err_rx_hw_errors");
1799 SYSCTL_ADD_QUAD(ctx, node_children,
1800 OID_AUTO, "err_rx_alloc_errors",
1801 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1802 "err_rx_alloc_errors");
1809 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1811 struct sysctl_ctx_list *ctx;
1812 struct sysctl_oid_list *children;
1813 struct sysctl_oid *ctx_oid;
1815 ctx = device_get_sysctl_ctx(ha->pci_dev);
1816 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1818 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1819 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1820 children = SYSCTL_CHILDREN(ctx_oid);
1822 SYSCTL_ADD_QUAD(ctx, children,
1823 OID_AUTO, "no_buff_discards",
1824 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1825 "No. of packets discarded due to lack of buffer");
1827 SYSCTL_ADD_QUAD(ctx, children,
1828 OID_AUTO, "packet_too_big_discard",
1829 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1830 "No. of packets discarded because packet was too big");
1832 SYSCTL_ADD_QUAD(ctx, children,
1833 OID_AUTO, "ttl0_discard",
1834 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1837 SYSCTL_ADD_QUAD(ctx, children,
1838 OID_AUTO, "rx_ucast_bytes",
1839 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1842 SYSCTL_ADD_QUAD(ctx, children,
1843 OID_AUTO, "rx_mcast_bytes",
1844 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1847 SYSCTL_ADD_QUAD(ctx, children,
1848 OID_AUTO, "rx_bcast_bytes",
1849 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1852 SYSCTL_ADD_QUAD(ctx, children,
1853 OID_AUTO, "rx_ucast_pkts",
1854 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1857 SYSCTL_ADD_QUAD(ctx, children,
1858 OID_AUTO, "rx_mcast_pkts",
1859 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1862 SYSCTL_ADD_QUAD(ctx, children,
1863 OID_AUTO, "rx_bcast_pkts",
1864 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1867 SYSCTL_ADD_QUAD(ctx, children,
1868 OID_AUTO, "mftag_filter_discards",
1869 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1870 "mftag_filter_discards");
1872 SYSCTL_ADD_QUAD(ctx, children,
1873 OID_AUTO, "mac_filter_discards",
1874 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1875 "mac_filter_discards");
1877 SYSCTL_ADD_QUAD(ctx, children,
1878 OID_AUTO, "tx_ucast_bytes",
1879 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1882 SYSCTL_ADD_QUAD(ctx, children,
1883 OID_AUTO, "tx_mcast_bytes",
1884 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1887 SYSCTL_ADD_QUAD(ctx, children,
1888 OID_AUTO, "tx_bcast_bytes",
1889 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1892 SYSCTL_ADD_QUAD(ctx, children,
1893 OID_AUTO, "tx_ucast_pkts",
1894 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1897 SYSCTL_ADD_QUAD(ctx, children,
1898 OID_AUTO, "tx_mcast_pkts",
1899 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1902 SYSCTL_ADD_QUAD(ctx, children,
1903 OID_AUTO, "tx_bcast_pkts",
1904 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1907 SYSCTL_ADD_QUAD(ctx, children,
1908 OID_AUTO, "tx_err_drop_pkts",
1909 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1910 "tx_err_drop_pkts");
1912 SYSCTL_ADD_QUAD(ctx, children,
1913 OID_AUTO, "tpa_coalesced_pkts",
1914 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1915 "tpa_coalesced_pkts");
1917 SYSCTL_ADD_QUAD(ctx, children,
1918 OID_AUTO, "tpa_coalesced_events",
1919 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1920 "tpa_coalesced_events");
1922 SYSCTL_ADD_QUAD(ctx, children,
1923 OID_AUTO, "tpa_aborts_num",
1924 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1927 SYSCTL_ADD_QUAD(ctx, children,
1928 OID_AUTO, "tpa_not_coalesced_pkts",
1929 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1930 "tpa_not_coalesced_pkts");
1932 SYSCTL_ADD_QUAD(ctx, children,
1933 OID_AUTO, "tpa_coalesced_bytes",
1934 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1935 "tpa_coalesced_bytes");
1937 SYSCTL_ADD_QUAD(ctx, children,
1938 OID_AUTO, "rx_64_byte_packets",
1939 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1940 "rx_64_byte_packets");
1942 SYSCTL_ADD_QUAD(ctx, children,
1943 OID_AUTO, "rx_65_to_127_byte_packets",
1944 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1945 "rx_65_to_127_byte_packets");
1947 SYSCTL_ADD_QUAD(ctx, children,
1948 OID_AUTO, "rx_128_to_255_byte_packets",
1949 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1950 "rx_128_to_255_byte_packets");
1952 SYSCTL_ADD_QUAD(ctx, children,
1953 OID_AUTO, "rx_256_to_511_byte_packets",
1954 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1955 "rx_256_to_511_byte_packets");
1957 SYSCTL_ADD_QUAD(ctx, children,
1958 OID_AUTO, "rx_512_to_1023_byte_packets",
1959 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1960 "rx_512_to_1023_byte_packets");
1962 SYSCTL_ADD_QUAD(ctx, children,
1963 OID_AUTO, "rx_1024_to_1518_byte_packets",
1964 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1965 "rx_1024_to_1518_byte_packets");
1967 SYSCTL_ADD_QUAD(ctx, children,
1968 OID_AUTO, "rx_1519_to_1522_byte_packets",
1969 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1970 "rx_1519_to_1522_byte_packets");
1972 SYSCTL_ADD_QUAD(ctx, children,
1973 OID_AUTO, "rx_1523_to_2047_byte_packets",
1974 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1975 "rx_1523_to_2047_byte_packets");
1977 SYSCTL_ADD_QUAD(ctx, children,
1978 OID_AUTO, "rx_2048_to_4095_byte_packets",
1979 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1980 "rx_2048_to_4095_byte_packets");
1982 SYSCTL_ADD_QUAD(ctx, children,
1983 OID_AUTO, "rx_4096_to_9216_byte_packets",
1984 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1985 "rx_4096_to_9216_byte_packets");
1987 SYSCTL_ADD_QUAD(ctx, children,
1988 OID_AUTO, "rx_9217_to_16383_byte_packets",
1989 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1990 "rx_9217_to_16383_byte_packets");
1992 SYSCTL_ADD_QUAD(ctx, children,
1993 OID_AUTO, "rx_crc_errors",
1994 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1997 SYSCTL_ADD_QUAD(ctx, children,
1998 OID_AUTO, "rx_mac_crtl_frames",
1999 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
2000 "rx_mac_crtl_frames");
2002 SYSCTL_ADD_QUAD(ctx, children,
2003 OID_AUTO, "rx_pause_frames",
2004 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
2007 SYSCTL_ADD_QUAD(ctx, children,
2008 OID_AUTO, "rx_pfc_frames",
2009 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2012 SYSCTL_ADD_QUAD(ctx, children,
2013 OID_AUTO, "rx_align_errors",
2014 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2017 SYSCTL_ADD_QUAD(ctx, children,
2018 OID_AUTO, "rx_carrier_errors",
2019 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2020 "rx_carrier_errors");
2022 SYSCTL_ADD_QUAD(ctx, children,
2023 OID_AUTO, "rx_oversize_packets",
2024 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2025 "rx_oversize_packets");
2027 SYSCTL_ADD_QUAD(ctx, children,
2028 OID_AUTO, "rx_jabbers",
2029 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2032 SYSCTL_ADD_QUAD(ctx, children,
2033 OID_AUTO, "rx_undersize_packets",
2034 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2035 "rx_undersize_packets");
2037 SYSCTL_ADD_QUAD(ctx, children,
2038 OID_AUTO, "rx_fragments",
2039 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2042 SYSCTL_ADD_QUAD(ctx, children,
2043 OID_AUTO, "tx_64_byte_packets",
2044 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2045 "tx_64_byte_packets");
2047 SYSCTL_ADD_QUAD(ctx, children,
2048 OID_AUTO, "tx_65_to_127_byte_packets",
2049 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2050 "tx_65_to_127_byte_packets");
2052 SYSCTL_ADD_QUAD(ctx, children,
2053 OID_AUTO, "tx_128_to_255_byte_packets",
2054 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2055 "tx_128_to_255_byte_packets");
2057 SYSCTL_ADD_QUAD(ctx, children,
2058 OID_AUTO, "tx_256_to_511_byte_packets",
2059 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2060 "tx_256_to_511_byte_packets");
2062 SYSCTL_ADD_QUAD(ctx, children,
2063 OID_AUTO, "tx_512_to_1023_byte_packets",
2064 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2065 "tx_512_to_1023_byte_packets");
2067 SYSCTL_ADD_QUAD(ctx, children,
2068 OID_AUTO, "tx_1024_to_1518_byte_packets",
2069 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2070 "tx_1024_to_1518_byte_packets");
2072 SYSCTL_ADD_QUAD(ctx, children,
2073 OID_AUTO, "tx_1519_to_2047_byte_packets",
2074 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2075 "tx_1519_to_2047_byte_packets");
2077 SYSCTL_ADD_QUAD(ctx, children,
2078 OID_AUTO, "tx_2048_to_4095_byte_packets",
2079 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2080 "tx_2048_to_4095_byte_packets");
2082 SYSCTL_ADD_QUAD(ctx, children,
2083 OID_AUTO, "tx_4096_to_9216_byte_packets",
2084 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2085 "tx_4096_to_9216_byte_packets");
2087 SYSCTL_ADD_QUAD(ctx, children,
2088 OID_AUTO, "tx_9217_to_16383_byte_packets",
2089 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2090 "tx_9217_to_16383_byte_packets");
2092 SYSCTL_ADD_QUAD(ctx, children,
2093 OID_AUTO, "tx_pause_frames",
2094 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2097 SYSCTL_ADD_QUAD(ctx, children,
2098 OID_AUTO, "tx_pfc_frames",
2099 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2102 SYSCTL_ADD_QUAD(ctx, children,
2103 OID_AUTO, "tx_lpi_entry_count",
2104 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2105 "tx_lpi_entry_count");
2107 SYSCTL_ADD_QUAD(ctx, children,
2108 OID_AUTO, "tx_total_collisions",
2109 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2110 "tx_total_collisions");
2112 SYSCTL_ADD_QUAD(ctx, children,
2113 OID_AUTO, "brb_truncates",
2114 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2117 SYSCTL_ADD_QUAD(ctx, children,
2118 OID_AUTO, "brb_discards",
2119 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2122 SYSCTL_ADD_QUAD(ctx, children,
2123 OID_AUTO, "rx_mac_bytes",
2124 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2127 SYSCTL_ADD_QUAD(ctx, children,
2128 OID_AUTO, "rx_mac_uc_packets",
2129 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2130 "rx_mac_uc_packets");
2132 SYSCTL_ADD_QUAD(ctx, children,
2133 OID_AUTO, "rx_mac_mc_packets",
2134 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2135 "rx_mac_mc_packets");
2137 SYSCTL_ADD_QUAD(ctx, children,
2138 OID_AUTO, "rx_mac_bc_packets",
2139 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2140 "rx_mac_bc_packets");
2142 SYSCTL_ADD_QUAD(ctx, children,
2143 OID_AUTO, "rx_mac_frames_ok",
2144 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2145 "rx_mac_frames_ok");
2147 SYSCTL_ADD_QUAD(ctx, children,
2148 OID_AUTO, "tx_mac_bytes",
2149 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2152 SYSCTL_ADD_QUAD(ctx, children,
2153 OID_AUTO, "tx_mac_uc_packets",
2154 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2155 "tx_mac_uc_packets");
2157 SYSCTL_ADD_QUAD(ctx, children,
2158 OID_AUTO, "tx_mac_mc_packets",
2159 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2160 "tx_mac_mc_packets");
2162 SYSCTL_ADD_QUAD(ctx, children,
2163 OID_AUTO, "tx_mac_bc_packets",
2164 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2165 "tx_mac_bc_packets");
2167 SYSCTL_ADD_QUAD(ctx, children,
2168 OID_AUTO, "tx_mac_ctrl_frames",
2169 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2170 "tx_mac_ctrl_frames");
2175 qlnx_add_sysctls(qlnx_host_t *ha)
2177 device_t dev = ha->pci_dev;
2178 struct sysctl_ctx_list *ctx;
2179 struct sysctl_oid_list *children;
2181 ctx = device_get_sysctl_ctx(dev);
2182 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2184 qlnx_add_fp_stats_sysctls(ha);
2185 qlnx_add_sp_stats_sysctls(ha);
2187 if (qlnx_vf_device(ha) != 0)
2188 qlnx_add_hw_stats_sysctls(ha);
2190 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2191 CTLFLAG_RD, qlnx_ver_str, 0,
2194 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2195 CTLFLAG_RD, ha->stormfw_ver, 0,
2196 "STORM Firmware Version");
2198 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2199 CTLFLAG_RD, ha->mfw_ver, 0,
2200 "Management Firmware Version");
2202 SYSCTL_ADD_UINT(ctx, children,
2203 OID_AUTO, "personality", CTLFLAG_RD,
2204 &ha->personality, ha->personality,
2205 "\tpersonality = 0 => Ethernet Only\n"
2206 "\tpersonality = 3 => Ethernet and RoCE\n"
2207 "\tpersonality = 4 => Ethernet and iWARP\n"
2208 "\tpersonality = 6 => Default in Shared Memory\n");
2211 SYSCTL_ADD_UINT(ctx, children,
2212 OID_AUTO, "debug", CTLFLAG_RW,
2213 &ha->dbg_level, ha->dbg_level, "Debug Level");
2215 ha->dp_level = 0x01;
2216 SYSCTL_ADD_UINT(ctx, children,
2217 OID_AUTO, "dp_level", CTLFLAG_RW,
2218 &ha->dp_level, ha->dp_level, "DP Level");
2220 ha->dbg_trace_lro_cnt = 0;
2221 SYSCTL_ADD_UINT(ctx, children,
2222 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2223 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2224 "Trace LRO Counts");
2226 ha->dbg_trace_tso_pkt_len = 0;
2227 SYSCTL_ADD_UINT(ctx, children,
2228 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2229 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2230 "Trace TSO packet lengths");
2233 SYSCTL_ADD_UINT(ctx, children,
2234 OID_AUTO, "dp_module", CTLFLAG_RW,
2235 &ha->dp_module, ha->dp_module, "DP Module");
2239 SYSCTL_ADD_UINT(ctx, children,
2240 OID_AUTO, "err_inject", CTLFLAG_RW,
2241 &ha->err_inject, ha->err_inject, "Error Inject");
2243 ha->storm_stats_enable = 0;
2245 SYSCTL_ADD_UINT(ctx, children,
2246 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2247 &ha->storm_stats_enable, ha->storm_stats_enable,
2248 "Enable Storm Statistics Gathering");
2250 ha->storm_stats_index = 0;
2252 SYSCTL_ADD_UINT(ctx, children,
2253 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2254 &ha->storm_stats_index, ha->storm_stats_index,
2255 "Enable Storm Statistics Gathering Current Index");
2257 ha->grcdump_taken = 0;
2258 SYSCTL_ADD_UINT(ctx, children,
2259 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2260 &ha->grcdump_taken, ha->grcdump_taken,
2263 ha->idle_chk_taken = 0;
2264 SYSCTL_ADD_UINT(ctx, children,
2265 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2266 &ha->idle_chk_taken, ha->idle_chk_taken,
2269 SYSCTL_ADD_UINT(ctx, children,
2270 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2271 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2272 "rx_coalesce_usecs");
2274 SYSCTL_ADD_UINT(ctx, children,
2275 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2276 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2277 "tx_coalesce_usecs");
2279 SYSCTL_ADD_PROC(ctx, children,
2280 OID_AUTO, "trigger_dump",
2281 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2282 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2284 SYSCTL_ADD_PROC(ctx, children,
2285 OID_AUTO, "set_rx_coalesce_usecs",
2286 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2287 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2288 "rx interrupt coalesce period microseconds");
2290 SYSCTL_ADD_PROC(ctx, children,
2291 OID_AUTO, "set_tx_coalesce_usecs",
2292 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2293 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2294 "tx interrupt coalesce period microseconds");
2296 ha->rx_pkt_threshold = 128;
2297 SYSCTL_ADD_UINT(ctx, children,
2298 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2299 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2300 "No. of Rx Pkts to process at a time");
2302 ha->rx_jumbo_buf_eq_mtu = 0;
2303 SYSCTL_ADD_UINT(ctx, children,
2304 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2305 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2306 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2307 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2309 SYSCTL_ADD_QUAD(ctx, children,
2310 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2311 &ha->err_illegal_intr, "err_illegal_intr");
2313 SYSCTL_ADD_QUAD(ctx, children,
2314 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2315 &ha->err_fp_null, "err_fp_null");
2317 SYSCTL_ADD_QUAD(ctx, children,
2318 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2319 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2323 /*****************************************************************************
2324 * Operating System Network Interface Functions
2325 *****************************************************************************/
2328 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2333 ifp = ha->ifp = if_alloc(IFT_ETHER);
2336 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2338 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2340 device_id = pci_get_device(ha->pci_dev);
2342 #if __FreeBSD_version >= 1000000
2344 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2345 ifp->if_baudrate = IF_Gbps(40);
2346 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2347 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2348 ifp->if_baudrate = IF_Gbps(25);
2349 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2350 ifp->if_baudrate = IF_Gbps(50);
2351 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2352 ifp->if_baudrate = IF_Gbps(100);
2354 ifp->if_capabilities = IFCAP_LINKSTATE;
2356 ifp->if_mtu = ETHERMTU;
2357 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2359 #endif /* #if __FreeBSD_version >= 1000000 */
2361 ifp->if_init = qlnx_init;
2363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2364 ifp->if_ioctl = qlnx_ioctl;
2365 ifp->if_transmit = qlnx_transmit;
2366 ifp->if_qflush = qlnx_qflush;
2368 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2369 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2370 IFQ_SET_READY(&ifp->if_snd);
2372 #if __FreeBSD_version >= 1100036
2373 if_setgetcounterfn(ifp, qlnx_get_counter);
2376 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2378 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2380 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2381 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2382 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2387 ha->primary_mac[0] = 0x00;
2388 ha->primary_mac[1] = 0x0e;
2389 ha->primary_mac[2] = 0x1e;
2390 ha->primary_mac[3] = rnd & 0xFF;
2391 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2392 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2395 ether_ifattach(ifp, ha->primary_mac);
2396 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2398 ifp->if_capabilities = IFCAP_HWCSUM;
2399 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2401 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2402 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2403 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2404 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2405 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2406 ifp->if_capabilities |= IFCAP_TSO4;
2407 ifp->if_capabilities |= IFCAP_TSO6;
2408 ifp->if_capabilities |= IFCAP_LRO;
2410 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2411 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2412 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2413 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2415 ifp->if_capenable = ifp->if_capabilities;
2417 ifp->if_hwassist = CSUM_IP;
2418 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2419 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2420 ifp->if_hwassist |= CSUM_TSO;
2422 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2424 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2427 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2428 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2429 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2430 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2431 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2432 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2433 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2434 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2435 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2436 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2437 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2438 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2439 ifmedia_add(&ha->media,
2440 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2441 ifmedia_add(&ha->media,
2442 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2443 ifmedia_add(&ha->media,
2444 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2447 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2448 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2450 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2452 QL_DPRINT2(ha, "exit\n");
2458 qlnx_init_locked(qlnx_host_t *ha)
2460 struct ifnet *ifp = ha->ifp;
2462 QL_DPRINT1(ha, "Driver Initialization start \n");
2466 if (qlnx_load(ha) == 0) {
2467 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2468 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2470 #ifdef QLNX_ENABLE_IWARP
2471 if (qlnx_vf_device(ha) != 0) {
2472 qlnx_rdma_dev_open(ha);
2474 #endif /* #ifdef QLNX_ENABLE_IWARP */
2481 qlnx_init(void *arg)
2485 ha = (qlnx_host_t *)arg;
2487 QL_DPRINT2(ha, "enter\n");
2490 qlnx_init_locked(ha);
2493 QL_DPRINT2(ha, "exit\n");
2499 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2501 struct ecore_filter_mcast *mcast;
2502 struct ecore_dev *cdev;
2507 mcast = &ha->ecore_mcast;
2508 bzero(mcast, sizeof(struct ecore_filter_mcast));
2511 mcast->opcode = ECORE_FILTER_ADD;
2513 mcast->opcode = ECORE_FILTER_REMOVE;
2515 mcast->num_mc_addrs = 1;
2516 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2518 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2524 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2528 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2529 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2530 return 0; /* its been already added */
2533 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2534 if ((ha->mcast[i].addr[0] == 0) &&
2535 (ha->mcast[i].addr[1] == 0) &&
2536 (ha->mcast[i].addr[2] == 0) &&
2537 (ha->mcast[i].addr[3] == 0) &&
2538 (ha->mcast[i].addr[4] == 0) &&
2539 (ha->mcast[i].addr[5] == 0)) {
2540 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2543 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2553 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2557 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2558 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2559 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2562 ha->mcast[i].addr[0] = 0;
2563 ha->mcast[i].addr[1] = 0;
2564 ha->mcast[i].addr[2] = 0;
2565 ha->mcast[i].addr[3] = 0;
2566 ha->mcast[i].addr[4] = 0;
2567 ha->mcast[i].addr[5] = 0;
2578 * Name: qls_hw_set_multi
2579 * Function: Sets the Multicast Addresses provided the host O.S into the
2580 * hardware (for the given interface)
2583 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2588 for (i = 0; i < mcnt; i++) {
2590 if (qlnx_hw_add_mcast(ha, mta))
2593 if (qlnx_hw_del_mcast(ha, mta))
2597 mta += ETHER_HDR_LEN;
2603 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2607 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2610 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2616 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2618 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2619 struct ifnet *ifp = ha->ifp;
2622 if (qlnx_vf_device(ha) == 0)
2625 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2628 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2635 qlnx_set_promisc(qlnx_host_t *ha)
2640 if (qlnx_vf_device(ha) == 0)
2643 filter = ha->filter;
2644 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2645 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2647 rc = qlnx_set_rx_accept_filter(ha, filter);
2652 qlnx_set_allmulti(qlnx_host_t *ha)
2657 if (qlnx_vf_device(ha) == 0)
2660 filter = ha->filter;
2661 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2662 rc = qlnx_set_rx_accept_filter(ha, filter);
2668 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2671 struct ifreq *ifr = (struct ifreq *)data;
2672 struct ifaddr *ifa = (struct ifaddr *)data;
2675 ha = (qlnx_host_t *)ifp->if_softc;
2679 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2681 if (ifa->ifa_addr->sa_family == AF_INET) {
2682 ifp->if_flags |= IFF_UP;
2683 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2685 qlnx_init_locked(ha);
2688 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2689 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2691 arp_ifinit(ifp, ifa);
2693 ether_ioctl(ifp, cmd, data);
2698 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2700 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2704 ifp->if_mtu = ifr->ifr_mtu;
2705 ha->max_frame_size =
2706 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2708 qlnx_init_locked(ha);
2717 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2721 if (ifp->if_flags & IFF_UP) {
2722 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2723 if ((ifp->if_flags ^ ha->if_flags) &
2725 ret = qlnx_set_promisc(ha);
2726 } else if ((ifp->if_flags ^ ha->if_flags) &
2728 ret = qlnx_set_allmulti(ha);
2731 ha->max_frame_size = ifp->if_mtu +
2732 ETHER_HDR_LEN + ETHER_CRC_LEN;
2733 qlnx_init_locked(ha);
2736 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2738 ha->if_flags = ifp->if_flags;
2745 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2747 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2748 if (qlnx_set_multi(ha, 1))
2754 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2757 if (qlnx_set_multi(ha, 0))
2764 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2766 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2771 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2773 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2775 if (mask & IFCAP_HWCSUM)
2776 ifp->if_capenable ^= IFCAP_HWCSUM;
2777 if (mask & IFCAP_TSO4)
2778 ifp->if_capenable ^= IFCAP_TSO4;
2779 if (mask & IFCAP_TSO6)
2780 ifp->if_capenable ^= IFCAP_TSO6;
2781 if (mask & IFCAP_VLAN_HWTAGGING)
2782 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2783 if (mask & IFCAP_VLAN_HWTSO)
2784 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2785 if (mask & IFCAP_LRO)
2786 ifp->if_capenable ^= IFCAP_LRO;
2790 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2791 qlnx_init_locked(ha);
2795 VLAN_CAPABILITIES(ifp);
2798 #if (__FreeBSD_version >= 1100101)
2802 struct ifi2creq i2c;
2803 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2804 struct ecore_ptt *p_ptt;
2806 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2811 if ((i2c.len > sizeof (i2c.data)) ||
2812 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2817 p_ptt = ecore_ptt_acquire(p_hwfn);
2820 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2825 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2826 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2827 i2c.len, &i2c.data[0]);
2829 ecore_ptt_release(p_hwfn, p_ptt);
2836 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2838 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2839 len = %d addr = 0x%02x offset = 0x%04x \
2840 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2841 0x%02x 0x%02x 0x%02x\n",
2842 ret, i2c.len, i2c.dev_addr, i2c.offset,
2843 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2844 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2847 #endif /* #if (__FreeBSD_version >= 1100101) */
2850 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2851 ret = ether_ioctl(ifp, cmd, data);
2859 qlnx_media_change(struct ifnet *ifp)
2862 struct ifmedia *ifm;
2865 ha = (qlnx_host_t *)ifp->if_softc;
2867 QL_DPRINT2(ha, "enter\n");
2871 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2874 QL_DPRINT2(ha, "exit\n");
2880 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2884 ha = (qlnx_host_t *)ifp->if_softc;
2886 QL_DPRINT2(ha, "enter\n");
2888 ifmr->ifm_status = IFM_AVALID;
2889 ifmr->ifm_active = IFM_ETHER;
2892 ifmr->ifm_status |= IFM_ACTIVE;
2894 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2896 if (ha->if_link.link_partner_caps &
2897 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2899 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2902 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2908 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2909 struct qlnx_tx_queue *txq)
2915 struct eth_tx_bd *tx_data_bd;
2916 struct eth_tx_1st_bd *first_bd;
2919 idx = txq->sw_tx_cons;
2920 mp = txq->sw_tx_ring[idx].mp;
2921 map = txq->sw_tx_ring[idx].map;
2923 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2924 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2926 QL_DPRINT1(ha, "(mp == NULL) "
2928 " ecore_prod_idx = 0x%x"
2929 " ecore_cons_idx = 0x%x"
2930 " hw_bd_cons = 0x%x"
2931 " txq_db_last = 0x%x"
2932 " elem_left = 0x%x\n",
2934 ecore_chain_get_prod_idx(&txq->tx_pbl),
2935 ecore_chain_get_cons_idx(&txq->tx_pbl),
2936 le16toh(*txq->hw_cons_ptr),
2938 ecore_chain_get_elem_left(&txq->tx_pbl));
2940 fp->err_tx_free_pkt_null++;
2943 qlnx_trigger_dump(ha);
2947 QLNX_INC_OPACKETS((ha->ifp));
2948 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2950 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2951 bus_dmamap_unload(ha->tx_tag, map);
2953 fp->tx_pkts_freed++;
2954 fp->tx_pkts_completed++;
2959 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2960 nbds = first_bd->data.nbds;
2962 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2964 for (i = 1; i < nbds; i++) {
2965 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2966 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2968 txq->sw_tx_ring[idx].flags = 0;
2969 txq->sw_tx_ring[idx].mp = NULL;
2970 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2976 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2977 struct qlnx_tx_queue *txq)
2984 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2986 while (hw_bd_cons !=
2987 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2988 if (hw_bd_cons < ecore_cons_idx) {
2989 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2991 diff = hw_bd_cons - ecore_cons_idx;
2993 if ((diff > TX_RING_SIZE) ||
2994 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2995 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2997 QL_DPRINT1(ha, "(diff = 0x%x) "
2999 " ecore_prod_idx = 0x%x"
3000 " ecore_cons_idx = 0x%x"
3001 " hw_bd_cons = 0x%x"
3002 " txq_db_last = 0x%x"
3003 " elem_left = 0x%x\n",
3006 ecore_chain_get_prod_idx(&txq->tx_pbl),
3007 ecore_chain_get_cons_idx(&txq->tx_pbl),
3008 le16toh(*txq->hw_cons_ptr),
3010 ecore_chain_get_elem_left(&txq->tx_pbl));
3012 fp->err_tx_cons_idx_conflict++;
3015 qlnx_trigger_dump(ha);
3018 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3019 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3020 prefetch(txq->sw_tx_ring[idx].mp);
3021 prefetch(txq->sw_tx_ring[idx2].mp);
3023 qlnx_free_tx_pkt(ha, fp, txq);
3025 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3031 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
3034 struct qlnx_tx_queue *txq;
3039 ha = (qlnx_host_t *)fp->edev;
3041 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3043 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3048 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3050 mp = drbr_peek(ifp, fp->tx_br);
3052 while (mp != NULL) {
3053 if (qlnx_send(ha, fp, &mp)) {
3055 drbr_putback(ifp, fp->tx_br, mp);
3057 fp->tx_pkts_processed++;
3058 drbr_advance(ifp, fp->tx_br);
3060 goto qlnx_transmit_locked_exit;
3063 drbr_advance(ifp, fp->tx_br);
3064 fp->tx_pkts_transmitted++;
3065 fp->tx_pkts_processed++;
3068 mp = drbr_peek(ifp, fp->tx_br);
3071 qlnx_transmit_locked_exit:
3072 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3073 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3074 < QLNX_TX_ELEM_MAX_THRESH))
3075 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3077 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3082 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
3084 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
3085 struct qlnx_fastpath *fp;
3086 int rss_id = 0, ret = 0;
3088 #ifdef QLNX_TRACEPERF_DATA
3089 uint64_t tx_pkts = 0, tx_compl = 0;
3092 QL_DPRINT2(ha, "enter\n");
3094 #if __FreeBSD_version >= 1100000
3095 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3097 if (mp->m_flags & M_FLOWID)
3099 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3102 fp = &ha->fp_array[rss_id];
3104 if (fp->tx_br == NULL) {
3106 goto qlnx_transmit_exit;
3109 if (mtx_trylock(&fp->tx_mtx)) {
3110 #ifdef QLNX_TRACEPERF_DATA
3111 tx_pkts = fp->tx_pkts_transmitted;
3112 tx_compl = fp->tx_pkts_completed;
3115 ret = qlnx_transmit_locked(ifp, fp, mp);
3117 #ifdef QLNX_TRACEPERF_DATA
3118 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3119 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3121 mtx_unlock(&fp->tx_mtx);
3123 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3124 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3125 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3131 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3136 qlnx_qflush(struct ifnet *ifp)
3139 struct qlnx_fastpath *fp;
3143 ha = (qlnx_host_t *)ifp->if_softc;
3145 QL_DPRINT2(ha, "enter\n");
3147 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3148 fp = &ha->fp_array[rss_id];
3154 mtx_lock(&fp->tx_mtx);
3156 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3157 fp->tx_pkts_freed++;
3160 mtx_unlock(&fp->tx_mtx);
3163 QL_DPRINT2(ha, "exit\n");
3169 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3171 struct ecore_dev *cdev;
3176 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3178 bus_write_4(ha->pci_dbells, offset, value);
3179 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3180 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3186 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3188 struct ether_vlan_header *eh = NULL;
3189 struct ip *ip = NULL;
3190 struct ip6_hdr *ip6 = NULL;
3191 struct tcphdr *th = NULL;
3192 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3195 uint8_t buf[sizeof(struct ip6_hdr)];
3199 eh = mtod(mp, struct ether_vlan_header *);
3201 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3202 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3203 etype = ntohs(eh->evl_proto);
3205 ehdrlen = ETHER_HDR_LEN;
3206 etype = ntohs(eh->evl_encap_proto);
3211 ip = (struct ip *)(mp->m_data + ehdrlen);
3213 ip_hlen = sizeof (struct ip);
3215 if (mp->m_len < (ehdrlen + ip_hlen)) {
3216 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3217 ip = (struct ip *)buf;
3220 th = (struct tcphdr *)(ip + 1);
3221 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3224 case ETHERTYPE_IPV6:
3225 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3227 ip_hlen = sizeof(struct ip6_hdr);
3229 if (mp->m_len < (ehdrlen + ip_hlen)) {
3230 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3232 ip6 = (struct ip6_hdr *)buf;
3234 th = (struct tcphdr *)(ip6 + 1);
3235 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3246 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3250 uint32_t sum, nbds_in_hdr = 1;
3252 bus_dma_segment_t *s_seg;
3254 /* If the header spans mulitple segments, skip those segments */
3256 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3261 while ((i < nsegs) && (offset >= segs->ds_len)) {
3262 offset = offset - segs->ds_len;
3268 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3272 while (nsegs >= window) {
3276 for (i = 0; i < window; i++){
3277 sum += s_seg->ds_len;
3281 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3282 fp->tx_lso_wnd_min_len++;
3294 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3296 bus_dma_segment_t *segs;
3297 bus_dmamap_t map = 0;
3300 struct mbuf *m_head = *m_headp;
3305 struct qlnx_tx_queue *txq;
3307 struct eth_tx_1st_bd *first_bd;
3308 struct eth_tx_2nd_bd *second_bd;
3309 struct eth_tx_3rd_bd *third_bd;
3310 struct eth_tx_bd *tx_data_bd;
3313 uint32_t nbds_in_hdr = 0;
3314 uint32_t offset = 0;
3316 #ifdef QLNX_TRACE_PERF_DATA
3320 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3332 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3333 QLNX_TX_ELEM_MIN_THRESH) {
3334 fp->tx_nsegs_gt_elem_left++;
3335 fp->err_tx_nsegs_gt_elem_left++;
3340 idx = txq->sw_tx_prod;
3342 map = txq->sw_tx_ring[idx].map;
3345 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3348 if (ha->dbg_trace_tso_pkt_len) {
3349 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3350 if (!fp->tx_tso_min_pkt_len) {
3351 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3352 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3354 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3355 fp->tx_tso_min_pkt_len =
3356 m_head->m_pkthdr.len;
3357 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3358 fp->tx_tso_max_pkt_len =
3359 m_head->m_pkthdr.len;
3364 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3365 offset = qlnx_tcp_offset(ha, m_head);
3367 if ((ret == EFBIG) ||
3368 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3369 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3370 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3371 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3374 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3378 m = m_defrag(m_head, M_NOWAIT);
3380 fp->err_tx_defrag++;
3381 fp->tx_pkts_freed++;
3384 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3391 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3392 segs, &nsegs, BUS_DMA_NOWAIT))) {
3393 fp->err_tx_defrag_dmamap_load++;
3396 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3397 ret, m_head->m_pkthdr.len);
3399 fp->tx_pkts_freed++;
3406 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3407 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3408 fp->err_tx_non_tso_max_seg++;
3411 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3412 ret, nsegs, m_head->m_pkthdr.len);
3414 fp->tx_pkts_freed++;
3420 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3421 offset = qlnx_tcp_offset(ha, m_head);
3424 fp->err_tx_dmamap_load++;
3426 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3427 ret, m_head->m_pkthdr.len);
3428 fp->tx_pkts_freed++;
3434 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3436 if (ha->dbg_trace_tso_pkt_len) {
3437 if (nsegs < QLNX_FP_MAX_SEGS)
3438 fp->tx_pkts[(nsegs - 1)]++;
3440 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3443 #ifdef QLNX_TRACE_PERF_DATA
3444 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3445 if(m_head->m_pkthdr.len <= 2048)
3446 fp->tx_pkts_hist[0]++;
3447 else if((m_head->m_pkthdr.len > 2048) &&
3448 (m_head->m_pkthdr.len <= 4096))
3449 fp->tx_pkts_hist[1]++;
3450 else if((m_head->m_pkthdr.len > 4096) &&
3451 (m_head->m_pkthdr.len <= 8192))
3452 fp->tx_pkts_hist[2]++;
3453 else if((m_head->m_pkthdr.len > 8192) &&
3454 (m_head->m_pkthdr.len <= 12288 ))
3455 fp->tx_pkts_hist[3]++;
3456 else if((m_head->m_pkthdr.len > 11288) &&
3457 (m_head->m_pkthdr.len <= 16394))
3458 fp->tx_pkts_hist[4]++;
3459 else if((m_head->m_pkthdr.len > 16384) &&
3460 (m_head->m_pkthdr.len <= 20480))
3461 fp->tx_pkts_hist[5]++;
3462 else if((m_head->m_pkthdr.len > 20480) &&
3463 (m_head->m_pkthdr.len <= 24576))
3464 fp->tx_pkts_hist[6]++;
3465 else if((m_head->m_pkthdr.len > 24576) &&
3466 (m_head->m_pkthdr.len <= 28672))
3467 fp->tx_pkts_hist[7]++;
3468 else if((m_head->m_pkthdr.len > 28762) &&
3469 (m_head->m_pkthdr.len <= 32768))
3470 fp->tx_pkts_hist[8]++;
3471 else if((m_head->m_pkthdr.len > 32768) &&
3472 (m_head->m_pkthdr.len <= 36864))
3473 fp->tx_pkts_hist[9]++;
3474 else if((m_head->m_pkthdr.len > 36864) &&
3475 (m_head->m_pkthdr.len <= 40960))
3476 fp->tx_pkts_hist[10]++;
3477 else if((m_head->m_pkthdr.len > 40960) &&
3478 (m_head->m_pkthdr.len <= 45056))
3479 fp->tx_pkts_hist[11]++;
3480 else if((m_head->m_pkthdr.len > 45056) &&
3481 (m_head->m_pkthdr.len <= 49152))
3482 fp->tx_pkts_hist[12]++;
3483 else if((m_head->m_pkthdr.len > 49512) &&
3484 m_head->m_pkthdr.len <= 53248))
3485 fp->tx_pkts_hist[13]++;
3486 else if((m_head->m_pkthdr.len > 53248) &&
3487 (m_head->m_pkthdr.len <= 57344))
3488 fp->tx_pkts_hist[14]++;
3489 else if((m_head->m_pkthdr.len > 53248) &&
3490 (m_head->m_pkthdr.len <= 57344))
3491 fp->tx_pkts_hist[15]++;
3492 else if((m_head->m_pkthdr.len > 57344) &&
3493 (m_head->m_pkthdr.len <= 61440))
3494 fp->tx_pkts_hist[16]++;
3496 fp->tx_pkts_hist[17]++;
3499 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3500 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3501 bd_used = TX_RING_SIZE - elem_left;
3505 else if((bd_used > 100) && (bd_used <= 500))
3507 else if((bd_used > 500) && (bd_used <= 1000))
3509 else if((bd_used > 1000) && (bd_used <= 2000))
3511 else if((bd_used > 3000) && (bd_used <= 4000))
3513 else if((bd_used > 4000) && (bd_used <= 5000))
3515 else if((bd_used > 6000) && (bd_used <= 7000))
3517 else if((bd_used > 7000) && (bd_used <= 8000))
3519 else if((bd_used > 8000) && (bd_used <= 9000))
3521 else if((bd_used > 9000) && (bd_used <= 10000))
3523 else if((bd_used > 10000) && (bd_used <= 11000))
3524 fp->tx_pkts_q[10]++;
3525 else if((bd_used > 11000) && (bd_used <= 12000))
3526 fp->tx_pkts_q[11]++;
3527 else if((bd_used > 12000) && (bd_used <= 13000))
3528 fp->tx_pkts_q[12]++;
3529 else if((bd_used > 13000) && (bd_used <= 14000))
3530 fp->tx_pkts_q[13]++;
3531 else if((bd_used > 14000) && (bd_used <= 15000))
3532 fp->tx_pkts_q[14]++;
3533 else if((bd_used > 15000) && (bd_used <= 16000))
3534 fp->tx_pkts_q[15]++;
3536 fp->tx_pkts_q[16]++;
3539 #endif /* end of QLNX_TRACE_PERF_DATA */
3541 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3542 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3543 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3544 " in chain[%d] trying to free packets\n",
3545 nsegs, elem_left, fp->rss_id);
3547 fp->tx_nsegs_gt_elem_left++;
3549 (void)qlnx_tx_int(ha, fp, txq);
3551 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3552 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3554 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3555 nsegs, elem_left, fp->rss_id);
3557 fp->err_tx_nsegs_gt_elem_left++;
3558 fp->tx_ring_full = 1;
3559 if (ha->storm_stats_enable)
3560 ha->storm_stats_gather = 1;
3565 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3567 txq->sw_tx_ring[idx].mp = m_head;
3569 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3571 memset(first_bd, 0, sizeof(*first_bd));
3573 first_bd->data.bd_flags.bitfields =
3574 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3576 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3580 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3581 first_bd->data.bd_flags.bitfields |=
3582 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3585 if (m_head->m_pkthdr.csum_flags &
3586 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3587 first_bd->data.bd_flags.bitfields |=
3588 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3591 if (m_head->m_flags & M_VLANTAG) {
3592 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3593 first_bd->data.bd_flags.bitfields |=
3594 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3597 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3598 first_bd->data.bd_flags.bitfields |=
3599 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3600 first_bd->data.bd_flags.bitfields |=
3601 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3605 if (offset == segs->ds_len) {
3606 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3610 second_bd = (struct eth_tx_2nd_bd *)
3611 ecore_chain_produce(&txq->tx_pbl);
3612 memset(second_bd, 0, sizeof(*second_bd));
3615 if (seg_idx < nsegs) {
3616 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3617 (segs->ds_addr), (segs->ds_len));
3622 third_bd = (struct eth_tx_3rd_bd *)
3623 ecore_chain_produce(&txq->tx_pbl);
3624 memset(third_bd, 0, sizeof(*third_bd));
3625 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3626 third_bd->data.bitfields |=
3627 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3630 if (seg_idx < nsegs) {
3631 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3632 (segs->ds_addr), (segs->ds_len));
3637 for (; seg_idx < nsegs; seg_idx++) {
3638 tx_data_bd = (struct eth_tx_bd *)
3639 ecore_chain_produce(&txq->tx_pbl);
3640 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3641 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3648 } else if (offset < segs->ds_len) {
3649 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3651 second_bd = (struct eth_tx_2nd_bd *)
3652 ecore_chain_produce(&txq->tx_pbl);
3653 memset(second_bd, 0, sizeof(*second_bd));
3654 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3655 (segs->ds_addr + offset),\
3656 (segs->ds_len - offset));
3660 third_bd = (struct eth_tx_3rd_bd *)
3661 ecore_chain_produce(&txq->tx_pbl);
3662 memset(third_bd, 0, sizeof(*third_bd));
3664 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3667 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3668 third_bd->data.bitfields |=
3669 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3673 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3674 tx_data_bd = (struct eth_tx_bd *)
3675 ecore_chain_produce(&txq->tx_pbl);
3676 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3677 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3685 offset = offset - segs->ds_len;
3688 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3692 tx_data_bd = (struct eth_tx_bd *)
3693 ecore_chain_produce(&txq->tx_pbl);
3694 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3696 if (second_bd == NULL) {
3697 second_bd = (struct eth_tx_2nd_bd *)
3699 } else if (third_bd == NULL) {
3700 third_bd = (struct eth_tx_3rd_bd *)
3704 if (offset && (offset < segs->ds_len)) {
3705 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3706 segs->ds_addr, offset);
3708 tx_data_bd = (struct eth_tx_bd *)
3709 ecore_chain_produce(&txq->tx_pbl);
3711 memset(tx_data_bd, 0,
3712 sizeof(*tx_data_bd));
3714 if (second_bd == NULL) {
3716 (struct eth_tx_2nd_bd *)tx_data_bd;
3717 } else if (third_bd == NULL) {
3719 (struct eth_tx_3rd_bd *)tx_data_bd;
3721 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3722 (segs->ds_addr + offset), \
3723 (segs->ds_len - offset));
3728 offset = offset - segs->ds_len;
3729 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3730 segs->ds_addr, segs->ds_len);
3736 if (third_bd == NULL) {
3737 third_bd = (struct eth_tx_3rd_bd *)
3738 ecore_chain_produce(&txq->tx_pbl);
3739 memset(third_bd, 0, sizeof(*third_bd));
3742 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3743 third_bd->data.bitfields |=
3744 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3749 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3750 tx_data_bd = (struct eth_tx_bd *)
3751 ecore_chain_produce(&txq->tx_pbl);
3752 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3753 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3758 first_bd->data.bitfields =
3759 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3760 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3761 first_bd->data.bitfields =
3762 htole16(first_bd->data.bitfields);
3763 fp->tx_non_tso_pkts++;
3766 first_bd->data.nbds = nbd;
3768 if (ha->dbg_trace_tso_pkt_len) {
3769 if (fp->tx_tso_max_nsegs < nsegs)
3770 fp->tx_tso_max_nsegs = nsegs;
3772 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3773 fp->tx_tso_min_nsegs = nsegs;
3776 txq->sw_tx_ring[idx].nsegs = nsegs;
3777 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3779 txq->tx_db.data.bd_prod =
3780 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3782 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3784 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3789 qlnx_stop(qlnx_host_t *ha)
3791 struct ifnet *ifp = ha->ifp;
3797 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3800 * We simply lock and unlock each fp->tx_mtx to
3801 * propagate the if_drv_flags
3802 * state to each tx thread
3804 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3806 if (ha->state == QLNX_STATE_OPEN) {
3807 for (i = 0; i < ha->num_rss; i++) {
3808 struct qlnx_fastpath *fp = &ha->fp_array[i];
3810 mtx_lock(&fp->tx_mtx);
3811 mtx_unlock(&fp->tx_mtx);
3813 if (fp->fp_taskqueue != NULL)
3814 taskqueue_enqueue(fp->fp_taskqueue,
3818 #ifdef QLNX_ENABLE_IWARP
3819 if (qlnx_vf_device(ha) != 0) {
3820 qlnx_rdma_dev_close(ha);
3822 #endif /* #ifdef QLNX_ENABLE_IWARP */
3830 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3832 return(TX_RING_SIZE - 1);
3836 qlnx_get_mac_addr(qlnx_host_t *ha)
3838 struct ecore_hwfn *p_hwfn;
3839 unsigned char mac[ETHER_ADDR_LEN];
3840 uint8_t p_is_forced;
3842 p_hwfn = &ha->cdev.hwfns[0];
3844 if (qlnx_vf_device(ha) != 0)
3845 return (p_hwfn->hw_info.hw_mac_addr);
3847 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3848 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3850 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3851 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3852 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3853 memcpy(ha->primary_mac, mac, ETH_ALEN);
3856 return (ha->primary_mac);
3860 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3862 uint32_t ifm_type = 0;
3864 switch (if_link->media_type) {
3865 case MEDIA_MODULE_FIBER:
3866 case MEDIA_UNSPECIFIED:
3867 if (if_link->speed == (100 * 1000))
3868 ifm_type = QLNX_IFM_100G_SR4;
3869 else if (if_link->speed == (40 * 1000))
3870 ifm_type = IFM_40G_SR4;
3871 else if (if_link->speed == (25 * 1000))
3872 ifm_type = QLNX_IFM_25G_SR;
3873 else if (if_link->speed == (10 * 1000))
3874 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3875 else if (if_link->speed == (1 * 1000))
3876 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3880 case MEDIA_DA_TWINAX:
3881 if (if_link->speed == (100 * 1000))
3882 ifm_type = QLNX_IFM_100G_CR4;
3883 else if (if_link->speed == (40 * 1000))
3884 ifm_type = IFM_40G_CR4;
3885 else if (if_link->speed == (25 * 1000))
3886 ifm_type = QLNX_IFM_25G_CR;
3887 else if (if_link->speed == (10 * 1000))
3888 ifm_type = IFM_10G_TWINAX;
3893 ifm_type = IFM_UNKNOWN;
3899 /*****************************************************************************
3900 * Interrupt Service Functions
3901 *****************************************************************************/
3904 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3905 struct mbuf *mp_head, uint16_t len)
3907 struct mbuf *mp, *mpf, *mpl;
3908 struct sw_rx_data *sw_rx_data;
3909 struct qlnx_rx_queue *rxq;
3910 uint16_t len_in_buffer;
3913 mpf = mpl = mp = NULL;
3916 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3918 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3919 mp = sw_rx_data->data;
3922 QL_DPRINT1(ha, "mp = NULL\n");
3923 fp->err_rx_mp_null++;
3925 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3932 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3933 BUS_DMASYNC_POSTREAD);
3935 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3936 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3937 " incoming packet and reusing its buffer\n");
3939 qlnx_reuse_rx_data(rxq);
3940 fp->err_rx_alloc_errors++;
3947 ecore_chain_consume(&rxq->rx_bd_ring);
3949 if (len > rxq->rx_buf_size)
3950 len_in_buffer = rxq->rx_buf_size;
3952 len_in_buffer = len;
3954 len = len - len_in_buffer;
3956 mp->m_flags &= ~M_PKTHDR;
3958 mp->m_len = len_in_buffer;
3969 mp_head->m_next = mpf;
3975 qlnx_tpa_start(qlnx_host_t *ha,
3976 struct qlnx_fastpath *fp,
3977 struct qlnx_rx_queue *rxq,
3978 struct eth_fast_path_rx_tpa_start_cqe *cqe)
3981 struct ifnet *ifp = ha->ifp;
3983 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3984 struct sw_rx_data *sw_rx_data;
3987 struct eth_rx_bd *rx_bd;
3990 #if __FreeBSD_version >= 1100000
3992 #endif /* #if __FreeBSD_version >= 1100000 */
3995 agg_index = cqe->tpa_agg_index;
3997 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3999 \t bitfields = 0x%x\n \
4000 \t seg_len = 0x%x\n \
4001 \t pars_flags = 0x%x\n \
4002 \t vlan_tag = 0x%x\n \
4003 \t rss_hash = 0x%x\n \
4004 \t len_on_first_bd = 0x%x\n \
4005 \t placement_offset = 0x%x\n \
4006 \t tpa_agg_index = 0x%x\n \
4007 \t header_len = 0x%x\n \
4008 \t ext_bd_len_list[0] = 0x%x\n \
4009 \t ext_bd_len_list[1] = 0x%x\n \
4010 \t ext_bd_len_list[2] = 0x%x\n \
4011 \t ext_bd_len_list[3] = 0x%x\n \
4012 \t ext_bd_len_list[4] = 0x%x\n",
4013 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4014 cqe->pars_flags.flags, cqe->vlan_tag,
4015 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
4016 cqe->tpa_agg_index, cqe->header_len,
4017 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4018 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4019 cqe->ext_bd_len_list[4]);
4021 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4022 fp->err_rx_tpa_invalid_agg_num++;
4026 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4027 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4028 mp = sw_rx_data->data;
4030 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4033 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4034 fp->err_rx_mp_null++;
4035 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4040 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4041 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4042 " flags = %x, dropping incoming packet\n", fp->rss_id,
4043 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4045 fp->err_rx_hw_errors++;
4047 qlnx_reuse_rx_data(rxq);
4049 QLNX_INC_IERRORS(ifp);
4054 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4055 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4056 " dropping incoming packet and reusing its buffer\n",
4059 fp->err_rx_alloc_errors++;
4060 QLNX_INC_IQDROPS(ifp);
4063 * Load the tpa mbuf into the rx ring and save the
4067 map = sw_rx_data->map;
4068 addr = sw_rx_data->dma_addr;
4070 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4072 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4073 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4074 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4076 rxq->tpa_info[agg_index].rx_buf.data = mp;
4077 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4078 rxq->tpa_info[agg_index].rx_buf.map = map;
4080 rx_bd = (struct eth_rx_bd *)
4081 ecore_chain_produce(&rxq->rx_bd_ring);
4083 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4084 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4086 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4087 BUS_DMASYNC_PREREAD);
4089 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4090 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4092 ecore_chain_consume(&rxq->rx_bd_ring);
4094 /* Now reuse any buffers posted in ext_bd_len_list */
4095 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4096 if (cqe->ext_bd_len_list[i] == 0)
4099 qlnx_reuse_rx_data(rxq);
4102 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4106 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4107 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4108 " dropping incoming packet and reusing its buffer\n",
4111 QLNX_INC_IQDROPS(ifp);
4113 /* if we already have mbuf head in aggregation free it */
4114 if (rxq->tpa_info[agg_index].mpf) {
4115 m_freem(rxq->tpa_info[agg_index].mpf);
4116 rxq->tpa_info[agg_index].mpl = NULL;
4118 rxq->tpa_info[agg_index].mpf = mp;
4119 rxq->tpa_info[agg_index].mpl = NULL;
4121 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4122 ecore_chain_consume(&rxq->rx_bd_ring);
4124 /* Now reuse any buffers posted in ext_bd_len_list */
4125 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4126 if (cqe->ext_bd_len_list[i] == 0)
4129 qlnx_reuse_rx_data(rxq);
4131 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4137 * first process the ext_bd_len_list
4138 * if this fails then we simply drop the packet
4140 ecore_chain_consume(&rxq->rx_bd_ring);
4141 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4143 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4144 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4146 if (cqe->ext_bd_len_list[i] == 0)
4149 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4150 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4151 BUS_DMASYNC_POSTREAD);
4153 mpc = sw_rx_data->data;
4156 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4157 fp->err_rx_mp_null++;
4161 rxq->tpa_info[agg_index].agg_state =
4162 QLNX_AGG_STATE_ERROR;
4163 ecore_chain_consume(&rxq->rx_bd_ring);
4165 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4169 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4170 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4171 " dropping incoming packet and reusing its"
4172 " buffer\n", fp->rss_id);
4174 qlnx_reuse_rx_data(rxq);
4180 rxq->tpa_info[agg_index].agg_state =
4181 QLNX_AGG_STATE_ERROR;
4183 ecore_chain_consume(&rxq->rx_bd_ring);
4185 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4190 mpc->m_flags &= ~M_PKTHDR;
4192 mpc->m_len = cqe->ext_bd_len_list[i];
4197 mpl->m_len = ha->rx_buf_size;
4202 ecore_chain_consume(&rxq->rx_bd_ring);
4204 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4207 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4208 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4209 " incoming packet and reusing its buffer\n",
4212 QLNX_INC_IQDROPS(ifp);
4214 rxq->tpa_info[agg_index].mpf = mp;
4215 rxq->tpa_info[agg_index].mpl = NULL;
4220 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4223 mp->m_len = ha->rx_buf_size;
4225 rxq->tpa_info[agg_index].mpf = mp;
4226 rxq->tpa_info[agg_index].mpl = mpl;
4228 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4229 rxq->tpa_info[agg_index].mpf = mp;
4230 rxq->tpa_info[agg_index].mpl = mp;
4234 mp->m_flags |= M_PKTHDR;
4236 /* assign packet to this interface interface */
4237 mp->m_pkthdr.rcvif = ifp;
4239 /* assume no hardware checksum has complated */
4240 mp->m_pkthdr.csum_flags = 0;
4242 //mp->m_pkthdr.flowid = fp->rss_id;
4243 mp->m_pkthdr.flowid = cqe->rss_hash;
4245 #if __FreeBSD_version >= 1100000
4247 hash_type = cqe->bitfields &
4248 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4249 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4251 switch (hash_type) {
4252 case RSS_HASH_TYPE_IPV4:
4253 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4256 case RSS_HASH_TYPE_TCP_IPV4:
4257 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4260 case RSS_HASH_TYPE_IPV6:
4261 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4264 case RSS_HASH_TYPE_TCP_IPV6:
4265 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4269 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4274 mp->m_flags |= M_FLOWID;
4277 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4278 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4280 mp->m_pkthdr.csum_data = 0xFFFF;
4282 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4283 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4284 mp->m_flags |= M_VLANTAG;
4287 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4289 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4290 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4291 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4297 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4298 struct qlnx_rx_queue *rxq,
4299 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4301 struct sw_rx_data *sw_rx_data;
4303 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4310 QL_DPRINT7(ha, "[%d]: enter\n \
4312 \t tpa_agg_index = 0x%x\n \
4313 \t len_list[0] = 0x%x\n \
4314 \t len_list[1] = 0x%x\n \
4315 \t len_list[2] = 0x%x\n \
4316 \t len_list[3] = 0x%x\n \
4317 \t len_list[4] = 0x%x\n \
4318 \t len_list[5] = 0x%x\n",
4319 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4320 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4321 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4323 agg_index = cqe->tpa_agg_index;
4325 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4326 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4327 fp->err_rx_tpa_invalid_agg_num++;
4331 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4332 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4334 if (cqe->len_list[i] == 0)
4337 if (rxq->tpa_info[agg_index].agg_state !=
4338 QLNX_AGG_STATE_START) {
4339 qlnx_reuse_rx_data(rxq);
4343 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4344 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4345 BUS_DMASYNC_POSTREAD);
4347 mpc = sw_rx_data->data;
4350 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4352 fp->err_rx_mp_null++;
4356 rxq->tpa_info[agg_index].agg_state =
4357 QLNX_AGG_STATE_ERROR;
4358 ecore_chain_consume(&rxq->rx_bd_ring);
4360 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4364 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4365 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4366 " dropping incoming packet and reusing its"
4367 " buffer\n", fp->rss_id);
4369 qlnx_reuse_rx_data(rxq);
4375 rxq->tpa_info[agg_index].agg_state =
4376 QLNX_AGG_STATE_ERROR;
4378 ecore_chain_consume(&rxq->rx_bd_ring);
4380 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4385 mpc->m_flags &= ~M_PKTHDR;
4387 mpc->m_len = cqe->len_list[i];
4392 mpl->m_len = ha->rx_buf_size;
4397 ecore_chain_consume(&rxq->rx_bd_ring);
4399 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4402 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4403 fp->rss_id, mpf, mpl);
4406 mp = rxq->tpa_info[agg_index].mpl;
4407 mp->m_len = ha->rx_buf_size;
4409 rxq->tpa_info[agg_index].mpl = mpl;
4416 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4417 struct qlnx_rx_queue *rxq,
4418 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4420 struct sw_rx_data *sw_rx_data;
4422 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4426 struct ifnet *ifp = ha->ifp;
4431 QL_DPRINT7(ha, "[%d]: enter\n \
4433 \t tpa_agg_index = 0x%x\n \
4434 \t total_packet_len = 0x%x\n \
4435 \t num_of_bds = 0x%x\n \
4436 \t end_reason = 0x%x\n \
4437 \t num_of_coalesced_segs = 0x%x\n \
4438 \t ts_delta = 0x%x\n \
4439 \t len_list[0] = 0x%x\n \
4440 \t len_list[1] = 0x%x\n \
4441 \t len_list[2] = 0x%x\n \
4442 \t len_list[3] = 0x%x\n",
4443 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4444 cqe->total_packet_len, cqe->num_of_bds,
4445 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4446 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4449 agg_index = cqe->tpa_agg_index;
4451 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4452 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4454 fp->err_rx_tpa_invalid_agg_num++;
4458 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4459 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4461 if (cqe->len_list[i] == 0)
4464 if (rxq->tpa_info[agg_index].agg_state !=
4465 QLNX_AGG_STATE_START) {
4466 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4468 qlnx_reuse_rx_data(rxq);
4472 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4473 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4474 BUS_DMASYNC_POSTREAD);
4476 mpc = sw_rx_data->data;
4479 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4481 fp->err_rx_mp_null++;
4485 rxq->tpa_info[agg_index].agg_state =
4486 QLNX_AGG_STATE_ERROR;
4487 ecore_chain_consume(&rxq->rx_bd_ring);
4489 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4493 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4494 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4495 " dropping incoming packet and reusing its"
4496 " buffer\n", fp->rss_id);
4498 qlnx_reuse_rx_data(rxq);
4504 rxq->tpa_info[agg_index].agg_state =
4505 QLNX_AGG_STATE_ERROR;
4507 ecore_chain_consume(&rxq->rx_bd_ring);
4509 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4514 mpc->m_flags &= ~M_PKTHDR;
4516 mpc->m_len = cqe->len_list[i];
4521 mpl->m_len = ha->rx_buf_size;
4526 ecore_chain_consume(&rxq->rx_bd_ring);
4528 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4531 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4534 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4536 mp = rxq->tpa_info[agg_index].mpl;
4537 mp->m_len = ha->rx_buf_size;
4541 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4542 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4544 if (rxq->tpa_info[agg_index].mpf != NULL)
4545 m_freem(rxq->tpa_info[agg_index].mpf);
4546 rxq->tpa_info[agg_index].mpf = NULL;
4547 rxq->tpa_info[agg_index].mpl = NULL;
4548 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4552 mp = rxq->tpa_info[agg_index].mpf;
4553 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4554 mp->m_pkthdr.len = cqe->total_packet_len;
4556 if (mp->m_next == NULL)
4557 mp->m_len = mp->m_pkthdr.len;
4559 /* compute the total packet length */
4561 while (mpf != NULL) {
4566 if (cqe->total_packet_len > len) {
4567 mpl = rxq->tpa_info[agg_index].mpl;
4568 mpl->m_len += (cqe->total_packet_len - len);
4572 QLNX_INC_IPACKETS(ifp);
4573 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4575 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4576 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4577 fp->rss_id, mp->m_pkthdr.csum_data,
4578 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4580 (*ifp->if_input)(ifp, mp);
4582 rxq->tpa_info[agg_index].mpf = NULL;
4583 rxq->tpa_info[agg_index].mpl = NULL;
4584 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4586 return (cqe->num_of_coalesced_segs);
4590 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4593 uint16_t hw_comp_cons, sw_comp_cons;
4595 struct qlnx_rx_queue *rxq = fp->rxq;
4596 struct ifnet *ifp = ha->ifp;
4597 struct ecore_dev *cdev = &ha->cdev;
4598 struct ecore_hwfn *p_hwfn;
4600 #ifdef QLNX_SOFT_LRO
4601 struct lro_ctrl *lro;
4604 #endif /* #ifdef QLNX_SOFT_LRO */
4606 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4607 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4609 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4611 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4612 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4613 * read before it is written by FW, then FW writes CQE and SB, and then
4614 * the CPU reads the hw_comp_cons, it will use an old CQE.
4617 /* Loop to complete all indicated BDs */
4618 while (sw_comp_cons != hw_comp_cons) {
4619 union eth_rx_cqe *cqe;
4620 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4621 struct sw_rx_data *sw_rx_data;
4622 register struct mbuf *mp;
4623 enum eth_rx_cqe_type cqe_type;
4624 uint16_t len, pad, len_on_first_bd;
4626 #if __FreeBSD_version >= 1100000
4628 #endif /* #if __FreeBSD_version >= 1100000 */
4630 /* Get the CQE from the completion ring */
4631 cqe = (union eth_rx_cqe *)
4632 ecore_chain_consume(&rxq->rx_comp_ring);
4633 cqe_type = cqe->fast_path_regular.type;
4635 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4636 QL_DPRINT3(ha, "Got a slowath CQE\n");
4638 ecore_eth_cqe_completion(p_hwfn,
4639 (struct eth_slow_path_rx_cqe *)cqe);
4643 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4645 case ETH_RX_CQE_TYPE_TPA_START:
4646 qlnx_tpa_start(ha, fp, rxq,
4647 &cqe->fast_path_tpa_start);
4651 case ETH_RX_CQE_TYPE_TPA_CONT:
4652 qlnx_tpa_cont(ha, fp, rxq,
4653 &cqe->fast_path_tpa_cont);
4657 case ETH_RX_CQE_TYPE_TPA_END:
4658 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4659 &cqe->fast_path_tpa_end);
4670 /* Get the data from the SW ring */
4671 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4672 mp = sw_rx_data->data;
4675 QL_DPRINT1(ha, "mp = NULL\n");
4676 fp->err_rx_mp_null++;
4678 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4681 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4682 BUS_DMASYNC_POSTREAD);
4685 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4686 len = le16toh(fp_cqe->pkt_len);
4687 pad = fp_cqe->placement_offset;
4689 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4690 " len %u, parsing flags = %d pad = %d\n",
4691 cqe_type, fp_cqe->bitfields,
4692 le16toh(fp_cqe->vlan_tag),
4693 len, le16toh(fp_cqe->pars_flags.flags), pad);
4695 data = mtod(mp, uint8_t *);
4699 qlnx_dump_buf8(ha, __func__, data, len);
4701 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4702 * is always with a fixed size. If allocation fails, we take the
4703 * consumed BD and return it to the ring in the PROD position.
4704 * The packet that was received on that BD will be dropped (and
4705 * not passed to the upper stack).
4707 /* If this is an error packet then drop it */
4708 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4710 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4711 " dropping incoming packet\n", sw_comp_cons,
4712 le16toh(cqe->fast_path_regular.pars_flags.flags));
4713 fp->err_rx_hw_errors++;
4715 qlnx_reuse_rx_data(rxq);
4717 QLNX_INC_IERRORS(ifp);
4722 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4723 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4724 " incoming packet and reusing its buffer\n");
4725 qlnx_reuse_rx_data(rxq);
4727 fp->err_rx_alloc_errors++;
4729 QLNX_INC_IQDROPS(ifp);
4734 ecore_chain_consume(&rxq->rx_bd_ring);
4736 len_on_first_bd = fp_cqe->len_on_first_bd;
4738 mp->m_pkthdr.len = len;
4740 if ((len > 60 ) && (len > len_on_first_bd)) {
4741 mp->m_len = len_on_first_bd;
4743 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4744 (len - len_on_first_bd)) != 0) {
4747 QLNX_INC_IQDROPS(ifp);
4752 } else if (len_on_first_bd < len) {
4753 fp->err_rx_jumbo_chain_pkts++;
4758 mp->m_flags |= M_PKTHDR;
4760 /* assign packet to this interface interface */
4761 mp->m_pkthdr.rcvif = ifp;
4763 /* assume no hardware checksum has complated */
4764 mp->m_pkthdr.csum_flags = 0;
4766 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4768 #if __FreeBSD_version >= 1100000
4770 hash_type = fp_cqe->bitfields &
4771 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4772 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4774 switch (hash_type) {
4775 case RSS_HASH_TYPE_IPV4:
4776 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4779 case RSS_HASH_TYPE_TCP_IPV4:
4780 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4783 case RSS_HASH_TYPE_IPV6:
4784 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4787 case RSS_HASH_TYPE_TCP_IPV6:
4788 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4792 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4797 mp->m_flags |= M_FLOWID;
4800 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4801 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4804 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4805 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4808 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4809 mp->m_pkthdr.csum_data = 0xFFFF;
4810 mp->m_pkthdr.csum_flags |=
4811 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4814 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4815 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4816 mp->m_flags |= M_VLANTAG;
4819 QLNX_INC_IPACKETS(ifp);
4820 QLNX_INC_IBYTES(ifp, len);
4822 #ifdef QLNX_SOFT_LRO
4825 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4827 tcp_lro_queue_mbuf(lro, mp);
4831 if (tcp_lro_rx(lro, mp, 0))
4832 (*ifp->if_input)(ifp, mp);
4834 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4837 (*ifp->if_input)(ifp, mp);
4841 (*ifp->if_input)(ifp, mp);
4843 #endif /* #ifdef QLNX_SOFT_LRO */
4847 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4849 next_cqe: /* don't consume bd rx buffer */
4850 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4851 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4853 /* CR TPA - revisit how to handle budget in TPA perhaps
4854 increase on "end" */
4855 if (rx_pkt == budget)
4857 } /* repeat while sw_comp_cons != hw_comp_cons... */
4859 /* Update producers */
4860 qlnx_update_rx_prod(p_hwfn, rxq);
4866 * fast path interrupt
4870 qlnx_fp_isr(void *arg)
4872 qlnx_ivec_t *ivec = arg;
4874 struct qlnx_fastpath *fp = NULL;
4879 if (ha->state != QLNX_STATE_OPEN) {
4883 idx = ivec->rss_idx;
4885 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4886 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4887 ha->err_illegal_intr++;
4890 fp = &ha->fp_array[idx];
4895 int rx_int = 0, total_rx_count = 0;
4897 struct qlnx_tx_queue *txq;
4900 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
4902 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4905 for (tc = 0; tc < ha->num_tc; tc++) {
4908 if((int)(elem_left =
4909 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4910 QLNX_TX_ELEM_THRESH) {
4911 if (mtx_trylock(&fp->tx_mtx)) {
4912 #ifdef QLNX_TRACE_PERF_DATA
4913 tx_compl = fp->tx_pkts_completed;
4916 qlnx_tx_int(ha, fp, fp->txq[tc]);
4917 #ifdef QLNX_TRACE_PERF_DATA
4918 fp->tx_pkts_compl_intr +=
4919 (fp->tx_pkts_completed - tx_compl);
4920 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4922 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4923 ((fp->tx_pkts_completed - tx_compl) <= 64))
4925 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4926 ((fp->tx_pkts_completed - tx_compl) <= 128))
4928 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4931 mtx_unlock(&fp->tx_mtx);
4936 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4940 fp->rx_pkts += rx_int;
4941 total_rx_count += rx_int;
4946 #ifdef QLNX_SOFT_LRO
4948 struct lro_ctrl *lro;
4950 lro = &fp->rxq->lro;
4952 if (lro_enable && total_rx_count) {
4953 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4955 #ifdef QLNX_TRACE_LRO_CNT
4956 if (lro->lro_mbuf_count & ~1023)
4958 else if (lro->lro_mbuf_count & ~511)
4960 else if (lro->lro_mbuf_count & ~255)
4962 else if (lro->lro_mbuf_count & ~127)
4964 else if (lro->lro_mbuf_count & ~63)
4966 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
4968 tcp_lro_flush_all(lro);
4971 struct lro_entry *queued;
4973 while ((!SLIST_EMPTY(&lro->lro_active))) {
4974 queued = SLIST_FIRST(&lro->lro_active);
4975 SLIST_REMOVE_HEAD(&lro->lro_active, \
4977 tcp_lro_flush(lro, queued);
4979 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4982 #endif /* #ifdef QLNX_SOFT_LRO */
4984 ecore_sb_update_sb_idx(fp->sb_info);
4986 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4993 * slow path interrupt processing function
4994 * can be invoked in polled mode or in interrupt mode via taskqueue.
4997 qlnx_sp_isr(void *arg)
4999 struct ecore_hwfn *p_hwfn;
5004 ha = (qlnx_host_t *)p_hwfn->p_dev;
5006 ha->sp_interrupts++;
5008 QL_DPRINT2(ha, "enter\n");
5010 ecore_int_sp_dpc(p_hwfn);
5012 QL_DPRINT2(ha, "exit\n");
5017 /*****************************************************************************
5018 * Support Functions for DMA'able Memory
5019 *****************************************************************************/
5022 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5024 *((bus_addr_t *)arg) = 0;
5027 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5031 *((bus_addr_t *)arg) = segs[0].ds_addr;
5037 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5045 ret = bus_dma_tag_create(
5046 ha->parent_tag,/* parent */
5048 ((bus_size_t)(1ULL << 32)),/* boundary */
5049 BUS_SPACE_MAXADDR, /* lowaddr */
5050 BUS_SPACE_MAXADDR, /* highaddr */
5051 NULL, NULL, /* filter, filterarg */
5052 dma_buf->size, /* maxsize */
5054 dma_buf->size, /* maxsegsize */
5056 NULL, NULL, /* lockfunc, lockarg */
5060 QL_DPRINT1(ha, "could not create dma tag\n");
5061 goto qlnx_alloc_dmabuf_exit;
5063 ret = bus_dmamem_alloc(dma_buf->dma_tag,
5064 (void **)&dma_buf->dma_b,
5065 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5068 bus_dma_tag_destroy(dma_buf->dma_tag);
5069 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5070 goto qlnx_alloc_dmabuf_exit;
5073 ret = bus_dmamap_load(dma_buf->dma_tag,
5077 qlnx_dmamap_callback,
5078 &b_addr, BUS_DMA_NOWAIT);
5080 if (ret || !b_addr) {
5081 bus_dma_tag_destroy(dma_buf->dma_tag);
5082 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5085 goto qlnx_alloc_dmabuf_exit;
5088 dma_buf->dma_addr = b_addr;
5090 qlnx_alloc_dmabuf_exit:
5096 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5098 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5099 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5100 bus_dma_tag_destroy(dma_buf->dma_tag);
5105 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5112 ha = (qlnx_host_t *)ecore_dev;
5115 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5117 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5119 dma_buf.size = size + PAGE_SIZE;
5120 dma_buf.alignment = 8;
5122 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5124 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5126 *phys = dma_buf.dma_addr;
5128 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5130 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5132 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5133 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5134 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5136 return (dma_buf.dma_b);
5140 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5143 qlnx_dma_t dma_buf, *dma_p;
5147 ha = (qlnx_host_t *)ecore_dev;
5153 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5155 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5157 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5158 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5159 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5163 if (!ha->qlnxr_debug)
5164 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5169 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5177 * Allocate parent DMA Tag
5179 ret = bus_dma_tag_create(
5180 bus_get_dma_tag(dev), /* parent */
5181 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5182 BUS_SPACE_MAXADDR, /* lowaddr */
5183 BUS_SPACE_MAXADDR, /* highaddr */
5184 NULL, NULL, /* filter, filterarg */
5185 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5187 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5189 NULL, NULL, /* lockfunc, lockarg */
5193 QL_DPRINT1(ha, "could not create parent dma tag\n");
5197 ha->flags.parent_tag = 1;
5203 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5205 if (ha->parent_tag != NULL) {
5206 bus_dma_tag_destroy(ha->parent_tag);
5207 ha->parent_tag = NULL;
5213 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5215 if (bus_dma_tag_create(NULL, /* parent */
5216 1, 0, /* alignment, bounds */
5217 BUS_SPACE_MAXADDR, /* lowaddr */
5218 BUS_SPACE_MAXADDR, /* highaddr */
5219 NULL, NULL, /* filter, filterarg */
5220 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5221 QLNX_MAX_SEGMENTS, /* nsegments */
5222 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5224 NULL, /* lockfunc */
5225 NULL, /* lockfuncarg */
5227 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5235 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5237 if (ha->tx_tag != NULL) {
5238 bus_dma_tag_destroy(ha->tx_tag);
5245 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5247 if (bus_dma_tag_create(NULL, /* parent */
5248 1, 0, /* alignment, bounds */
5249 BUS_SPACE_MAXADDR, /* lowaddr */
5250 BUS_SPACE_MAXADDR, /* highaddr */
5251 NULL, NULL, /* filter, filterarg */
5252 MJUM9BYTES, /* maxsize */
5254 MJUM9BYTES, /* maxsegsize */
5256 NULL, /* lockfunc */
5257 NULL, /* lockfuncarg */
5259 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5267 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5269 if (ha->rx_tag != NULL) {
5270 bus_dma_tag_destroy(ha->rx_tag);
5276 /*********************************
5277 * Exported functions
5278 *********************************/
5280 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5284 bar_id = bar_id * 2;
5286 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5294 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5296 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5302 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5303 uint16_t *reg_value)
5305 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5311 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5312 uint32_t *reg_value)
5314 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5320 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5322 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5323 pci_reg, reg_value, 1);
5328 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5331 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5332 pci_reg, reg_value, 2);
5337 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5340 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5341 pci_reg, reg_value, 4);
5346 qlnx_pci_find_capability(void *ecore_dev, int cap)
5353 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5356 QL_DPRINT1(ha, "failed\n");
5362 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5369 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5372 QL_DPRINT1(ha, "failed\n");
5378 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5381 struct ecore_hwfn *p_hwfn;
5385 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5386 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5392 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5394 struct ecore_hwfn *p_hwfn = hwfn;
5396 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5397 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5403 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5405 struct ecore_hwfn *p_hwfn = hwfn;
5407 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5408 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5413 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5415 struct ecore_dev *cdev;
5416 struct ecore_hwfn *p_hwfn;
5421 cdev = p_hwfn->p_dev;
5423 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5424 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5430 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5432 struct ecore_hwfn *p_hwfn = hwfn;
5434 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5435 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5441 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5445 struct ecore_dev *cdev;
5447 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5448 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5450 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5456 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5459 struct ecore_dev *cdev;
5461 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5462 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5464 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5470 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5473 struct ecore_dev *cdev;
5475 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5476 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5478 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5483 qlnx_zalloc(uint32_t size)
5487 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5489 return ((void *)va);
5493 qlnx_barrier(void *p_hwfn)
5497 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5498 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5502 qlnx_link_update(void *p_hwfn)
5505 int prev_link_state;
5507 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5509 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5511 prev_link_state = ha->link_up;
5512 ha->link_up = ha->if_link.link_up;
5514 if (prev_link_state != ha->link_up) {
5516 if_link_state_change(ha->ifp, LINK_STATE_UP);
5518 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5522 #ifdef CONFIG_ECORE_SRIOV
5524 if (qlnx_vf_device(ha) != 0) {
5525 if (ha->sriov_initialized)
5526 qlnx_inform_vf_link_state(p_hwfn, ha);
5529 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5530 #endif /* #ifdef QLNX_VF */
5536 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5537 struct ecore_vf_acquire_sw_info *p_sw_info)
5539 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5540 (QLNX_VERSION_MINOR << 16) |
5542 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5548 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5551 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5557 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5558 struct qlnx_link_output *if_link)
5560 struct ecore_mcp_link_params link_params;
5561 struct ecore_mcp_link_state link_state;
5563 struct ecore_ptt *p_ptt = NULL;
5565 memset(if_link, 0, sizeof(*if_link));
5566 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5567 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5569 ha = (qlnx_host_t *)hwfn->p_dev;
5571 /* Prepare source inputs */
5572 /* we only deal with physical functions */
5573 if (qlnx_vf_device(ha) != 0) {
5574 p_ptt = ecore_ptt_acquire(hwfn);
5576 if (p_ptt == NULL) {
5577 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5581 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5582 ecore_ptt_release(hwfn, p_ptt);
5584 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5585 sizeof(link_params));
5586 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5587 sizeof(link_state));
5589 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5590 ecore_vf_read_bulletin(hwfn, &p_change);
5591 ecore_vf_get_link_params(hwfn, &link_params);
5592 ecore_vf_get_link_state(hwfn, &link_state);
5595 /* Set the link parameters to pass to protocol driver */
5596 if (link_state.link_up) {
5597 if_link->link_up = true;
5598 if_link->speed = link_state.speed;
5601 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5603 if (link_params.speed.autoneg)
5604 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5606 if (link_params.pause.autoneg ||
5607 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5608 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5610 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5611 link_params.pause.forced_tx)
5612 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5614 if (link_params.speed.advertised_speeds &
5615 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5616 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5617 QLNX_LINK_CAP_1000baseT_Full;
5619 if (link_params.speed.advertised_speeds &
5620 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5621 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5623 if (link_params.speed.advertised_speeds &
5624 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5625 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5627 if (link_params.speed.advertised_speeds &
5628 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5629 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5631 if (link_params.speed.advertised_speeds &
5632 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5633 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5635 if (link_params.speed.advertised_speeds &
5636 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5637 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5639 if_link->advertised_caps = if_link->supported_caps;
5641 if_link->autoneg = link_params.speed.autoneg;
5642 if_link->duplex = QLNX_LINK_DUPLEX;
5644 /* Link partner capabilities */
5646 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5647 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5649 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5650 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5652 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5653 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5655 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5656 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5658 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5659 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5661 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5662 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5664 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5665 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5667 if (link_state.an_complete)
5668 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5670 if (link_state.partner_adv_pause)
5671 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5673 if ((link_state.partner_adv_pause ==
5674 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5675 (link_state.partner_adv_pause ==
5676 ECORE_LINK_PARTNER_BOTH_PAUSE))
5677 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5683 qlnx_schedule_recovery(void *p_hwfn)
5687 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5689 if (qlnx_vf_device(ha) != 0) {
5690 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5697 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5701 for (i = 0; i < cdev->num_hwfns; i++) {
5702 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5703 p_hwfn->pf_params = *func_params;
5705 #ifdef QLNX_ENABLE_IWARP
5706 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5707 p_hwfn->using_ll2 = true;
5709 #endif /* #ifdef QLNX_ENABLE_IWARP */
5712 rc = ecore_resc_alloc(cdev);
5714 goto qlnx_nic_setup_exit;
5716 ecore_resc_setup(cdev);
5718 qlnx_nic_setup_exit:
5724 qlnx_nic_start(struct ecore_dev *cdev)
5727 struct ecore_hw_init_params params;
5729 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5731 params.p_tunn = NULL;
5732 params.b_hw_start = true;
5733 params.int_mode = cdev->int_mode;
5734 params.allow_npar_tx_switch = true;
5735 params.bin_fw_data = NULL;
5737 rc = ecore_hw_init(cdev, ¶ms);
5739 ecore_resc_free(cdev);
5747 qlnx_slowpath_start(qlnx_host_t *ha)
5749 struct ecore_dev *cdev;
5750 struct ecore_pf_params pf_params;
5753 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5754 pf_params.eth_pf_params.num_cons =
5755 (ha->num_rss) * (ha->num_tc + 1);
5757 #ifdef QLNX_ENABLE_IWARP
5758 if (qlnx_vf_device(ha) != 0) {
5759 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5760 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5761 pf_params.rdma_pf_params.num_qps = 1024;
5762 pf_params.rdma_pf_params.num_srqs = 1024;
5763 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5764 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5765 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5766 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5767 pf_params.rdma_pf_params.num_qps = 8192;
5768 pf_params.rdma_pf_params.num_srqs = 8192;
5769 //pf_params.rdma_pf_params.min_dpis = 0;
5770 pf_params.rdma_pf_params.min_dpis = 8;
5771 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5772 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5773 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5776 #endif /* #ifdef QLNX_ENABLE_IWARP */
5780 rc = qlnx_nic_setup(cdev, &pf_params);
5782 goto qlnx_slowpath_start_exit;
5784 cdev->int_mode = ECORE_INT_MODE_MSIX;
5785 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5787 #ifdef QLNX_MAX_COALESCE
5788 cdev->rx_coalesce_usecs = 255;
5789 cdev->tx_coalesce_usecs = 255;
5792 rc = qlnx_nic_start(cdev);
5794 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5795 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5797 #ifdef QLNX_USER_LLDP
5798 (void)qlnx_set_lldp_tlvx(ha, NULL);
5799 #endif /* #ifdef QLNX_USER_LLDP */
5801 qlnx_slowpath_start_exit:
5807 qlnx_slowpath_stop(qlnx_host_t *ha)
5809 struct ecore_dev *cdev;
5810 device_t dev = ha->pci_dev;
5815 ecore_hw_stop(cdev);
5817 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5818 if (ha->sp_handle[i])
5819 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5822 ha->sp_handle[i] = NULL;
5825 (void) bus_release_resource(dev, SYS_RES_IRQ,
5826 ha->sp_irq_rid[i], ha->sp_irq[i]);
5827 ha->sp_irq[i] = NULL;
5830 ecore_resc_free(cdev);
5836 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5837 char ver_str[VER_SIZE])
5841 memcpy(cdev->name, name, NAME_SIZE);
5843 for_each_hwfn(cdev, i) {
5844 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5847 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5853 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5855 enum ecore_mcp_protocol_type type;
5856 union ecore_mcp_protocol_stats *stats;
5857 struct ecore_eth_stats eth_stats;
5861 stats = proto_stats;
5865 case ECORE_MCP_LAN_STATS:
5866 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5867 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5868 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5869 stats->lan_stats.fcs_err = -1;
5873 ha->err_get_proto_invalid_type++;
5875 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5882 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5884 struct ecore_hwfn *p_hwfn;
5885 struct ecore_ptt *p_ptt;
5887 p_hwfn = &ha->cdev.hwfns[0];
5888 p_ptt = ecore_ptt_acquire(p_hwfn);
5890 if (p_ptt == NULL) {
5891 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5894 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5896 ecore_ptt_release(p_hwfn, p_ptt);
5902 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5904 struct ecore_hwfn *p_hwfn;
5905 struct ecore_ptt *p_ptt;
5907 p_hwfn = &ha->cdev.hwfns[0];
5908 p_ptt = ecore_ptt_acquire(p_hwfn);
5910 if (p_ptt == NULL) {
5911 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5914 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5916 ecore_ptt_release(p_hwfn, p_ptt);
5922 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5924 struct ecore_dev *cdev;
5928 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5929 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5930 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5936 qlnx_init_fp(qlnx_host_t *ha)
5938 int rss_id, txq_array_index, tc;
5940 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5941 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5943 fp->rss_id = rss_id;
5945 fp->sb_info = &ha->sb_array[rss_id];
5946 fp->rxq = &ha->rxq_array[rss_id];
5947 fp->rxq->rxq_id = rss_id;
5949 for (tc = 0; tc < ha->num_tc; tc++) {
5950 txq_array_index = tc * ha->num_rss + rss_id;
5951 fp->txq[tc] = &ha->txq_array[txq_array_index];
5952 fp->txq[tc]->index = txq_array_index;
5955 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5958 fp->tx_ring_full = 0;
5960 /* reset all the statistics counters */
5962 fp->tx_pkts_processed = 0;
5963 fp->tx_pkts_freed = 0;
5964 fp->tx_pkts_transmitted = 0;
5965 fp->tx_pkts_completed = 0;
5967 #ifdef QLNX_TRACE_PERF_DATA
5968 fp->tx_pkts_trans_ctx = 0;
5969 fp->tx_pkts_compl_ctx = 0;
5970 fp->tx_pkts_trans_fp = 0;
5971 fp->tx_pkts_compl_fp = 0;
5972 fp->tx_pkts_compl_intr = 0;
5974 fp->tx_lso_wnd_min_len = 0;
5976 fp->tx_nsegs_gt_elem_left = 0;
5977 fp->tx_tso_max_nsegs = 0;
5978 fp->tx_tso_min_nsegs = 0;
5979 fp->err_tx_nsegs_gt_elem_left = 0;
5980 fp->err_tx_dmamap_create = 0;
5981 fp->err_tx_defrag_dmamap_load = 0;
5982 fp->err_tx_non_tso_max_seg = 0;
5983 fp->err_tx_dmamap_load = 0;
5984 fp->err_tx_defrag = 0;
5985 fp->err_tx_free_pkt_null = 0;
5986 fp->err_tx_cons_idx_conflict = 0;
5989 fp->err_m_getcl = 0;
5990 fp->err_m_getjcl = 0;
5996 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5998 struct ecore_dev *cdev;
6002 if (sb_info->sb_virt) {
6003 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
6004 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
6005 sb_info->sb_virt = NULL;
6010 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
6011 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
6013 struct ecore_hwfn *p_hwfn;
6017 hwfn_index = sb_id % cdev->num_hwfns;
6018 p_hwfn = &cdev->hwfns[hwfn_index];
6019 rel_sb_id = sb_id / cdev->num_hwfns;
6021 QL_DPRINT2(((qlnx_host_t *)cdev),
6022 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
6023 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
6024 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
6025 sb_virt_addr, (void *)sb_phy_addr);
6027 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
6028 sb_virt_addr, sb_phy_addr, rel_sb_id);
6033 /* This function allocates fast-path status block memory */
6035 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6037 struct status_block_e4 *sb_virt;
6041 struct ecore_dev *cdev;
6045 size = sizeof(*sb_virt);
6046 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6049 QL_DPRINT1(ha, "Status block allocation failed\n");
6053 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6055 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6062 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6065 struct sw_rx_data *rx_buf;
6067 for (i = 0; i < rxq->num_rx_buffers; i++) {
6068 rx_buf = &rxq->sw_rx_ring[i];
6070 if (rx_buf->data != NULL) {
6071 if (rx_buf->map != NULL) {
6072 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6073 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6076 m_freem(rx_buf->data);
6077 rx_buf->data = NULL;
6084 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6086 struct ecore_dev *cdev;
6091 qlnx_free_rx_buffers(ha, rxq);
6093 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6094 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6095 if (rxq->tpa_info[i].mpf != NULL)
6096 m_freem(rxq->tpa_info[i].mpf);
6099 bzero((void *)&rxq->sw_rx_ring[0],
6100 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6102 /* Free the real RQ ring used by FW */
6103 if (rxq->rx_bd_ring.p_virt_addr) {
6104 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6105 rxq->rx_bd_ring.p_virt_addr = NULL;
6108 /* Free the real completion ring used by FW */
6109 if (rxq->rx_comp_ring.p_virt_addr &&
6110 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6111 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6112 rxq->rx_comp_ring.p_virt_addr = NULL;
6113 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6116 #ifdef QLNX_SOFT_LRO
6118 struct lro_ctrl *lro;
6123 #endif /* #ifdef QLNX_SOFT_LRO */
6129 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6131 register struct mbuf *mp;
6132 uint16_t rx_buf_size;
6133 struct sw_rx_data *sw_rx_data;
6134 struct eth_rx_bd *rx_bd;
6135 dma_addr_t dma_addr;
6137 bus_dma_segment_t segs[1];
6140 struct ecore_dev *cdev;
6144 rx_buf_size = rxq->rx_buf_size;
6146 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6149 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6153 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6155 map = (bus_dmamap_t)0;
6157 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6159 dma_addr = segs[0].ds_addr;
6161 if (ret || !dma_addr || (nsegs != 1)) {
6163 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6164 ret, (long long unsigned int)dma_addr, nsegs);
6168 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6169 sw_rx_data->data = mp;
6170 sw_rx_data->dma_addr = dma_addr;
6171 sw_rx_data->map = map;
6173 /* Advance PROD and get BD pointer */
6174 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6175 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6176 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6177 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6179 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6185 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6186 struct qlnx_agg_info *tpa)
6189 dma_addr_t dma_addr;
6191 bus_dma_segment_t segs[1];
6194 struct sw_rx_data *rx_buf;
6196 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6199 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6203 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6205 map = (bus_dmamap_t)0;
6207 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6209 dma_addr = segs[0].ds_addr;
6211 if (ret || !dma_addr || (nsegs != 1)) {
6213 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6214 ret, (long long unsigned int)dma_addr, nsegs);
6218 rx_buf = &tpa->rx_buf;
6220 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6223 rx_buf->dma_addr = dma_addr;
6226 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6232 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6234 struct sw_rx_data *rx_buf;
6236 rx_buf = &tpa->rx_buf;
6238 if (rx_buf->data != NULL) {
6239 if (rx_buf->map != NULL) {
6240 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6241 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6244 m_freem(rx_buf->data);
6245 rx_buf->data = NULL;
6250 /* This function allocates all memory needed per Rx queue */
6252 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6254 int i, rc, num_allocated;
6256 struct ecore_dev *cdev;
6261 rxq->num_rx_buffers = RX_RING_SIZE;
6263 rxq->rx_buf_size = ha->rx_buf_size;
6265 /* Allocate the parallel driver ring for Rx buffers */
6266 bzero((void *)&rxq->sw_rx_ring[0],
6267 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6269 /* Allocate FW Rx ring */
6271 rc = ecore_chain_alloc(cdev,
6272 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6273 ECORE_CHAIN_MODE_NEXT_PTR,
6274 ECORE_CHAIN_CNT_TYPE_U16,
6276 sizeof(struct eth_rx_bd),
6277 &rxq->rx_bd_ring, NULL);
6282 /* Allocate FW completion ring */
6283 rc = ecore_chain_alloc(cdev,
6284 ECORE_CHAIN_USE_TO_CONSUME,
6285 ECORE_CHAIN_MODE_PBL,
6286 ECORE_CHAIN_CNT_TYPE_U16,
6288 sizeof(union eth_rx_cqe),
6289 &rxq->rx_comp_ring, NULL);
6294 /* Allocate buffers for the Rx ring */
6296 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6297 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6303 for (i = 0; i < rxq->num_rx_buffers; i++) {
6304 rc = qlnx_alloc_rx_buffer(ha, rxq);
6309 if (!num_allocated) {
6310 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6312 } else if (num_allocated < rxq->num_rx_buffers) {
6313 QL_DPRINT1(ha, "Allocated less buffers than"
6314 " desired (%d allocated)\n", num_allocated);
6317 #ifdef QLNX_SOFT_LRO
6320 struct lro_ctrl *lro;
6324 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6325 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6326 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6331 if (tcp_lro_init(lro)) {
6332 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6336 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6340 #endif /* #ifdef QLNX_SOFT_LRO */
6344 qlnx_free_mem_rxq(ha, rxq);
6349 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6350 struct qlnx_tx_queue *txq)
6352 struct ecore_dev *cdev;
6356 bzero((void *)&txq->sw_tx_ring[0],
6357 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6359 /* Free the real RQ ring used by FW */
6360 if (txq->tx_pbl.p_virt_addr) {
6361 ecore_chain_free(cdev, &txq->tx_pbl);
6362 txq->tx_pbl.p_virt_addr = NULL;
6367 /* This function allocates all memory needed per Tx queue */
6369 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6370 struct qlnx_tx_queue *txq)
6372 int ret = ECORE_SUCCESS;
6373 union eth_tx_bd_types *p_virt;
6374 struct ecore_dev *cdev;
6378 bzero((void *)&txq->sw_tx_ring[0],
6379 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6381 /* Allocate the real Tx ring to be used by FW */
6382 ret = ecore_chain_alloc(cdev,
6383 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6384 ECORE_CHAIN_MODE_PBL,
6385 ECORE_CHAIN_CNT_TYPE_U16,
6388 &txq->tx_pbl, NULL);
6390 if (ret != ECORE_SUCCESS) {
6394 txq->num_tx_buffers = TX_RING_SIZE;
6399 qlnx_free_mem_txq(ha, fp, txq);
6404 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6407 struct ifnet *ifp = ha->ifp;
6409 if (mtx_initialized(&fp->tx_mtx)) {
6410 if (fp->tx_br != NULL) {
6411 mtx_lock(&fp->tx_mtx);
6413 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6414 fp->tx_pkts_freed++;
6418 mtx_unlock(&fp->tx_mtx);
6420 buf_ring_free(fp->tx_br, M_DEVBUF);
6423 mtx_destroy(&fp->tx_mtx);
6429 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6433 qlnx_free_mem_sb(ha, fp->sb_info);
6435 qlnx_free_mem_rxq(ha, fp->rxq);
6437 for (tc = 0; tc < ha->num_tc; tc++)
6438 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6444 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6446 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6447 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6449 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6451 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6452 M_NOWAIT, &fp->tx_mtx);
6453 if (fp->tx_br == NULL) {
6454 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6455 ha->dev_unit, fp->rss_id);
6462 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6466 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6470 if (ha->rx_jumbo_buf_eq_mtu) {
6471 if (ha->max_frame_size <= MCLBYTES)
6472 ha->rx_buf_size = MCLBYTES;
6473 else if (ha->max_frame_size <= MJUMPAGESIZE)
6474 ha->rx_buf_size = MJUMPAGESIZE;
6475 else if (ha->max_frame_size <= MJUM9BYTES)
6476 ha->rx_buf_size = MJUM9BYTES;
6477 else if (ha->max_frame_size <= MJUM16BYTES)
6478 ha->rx_buf_size = MJUM16BYTES;
6480 if (ha->max_frame_size <= MCLBYTES)
6481 ha->rx_buf_size = MCLBYTES;
6483 ha->rx_buf_size = MJUMPAGESIZE;
6486 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6490 for (tc = 0; tc < ha->num_tc; tc++) {
6491 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6499 qlnx_free_mem_fp(ha, fp);
6504 qlnx_free_mem_load(qlnx_host_t *ha)
6507 struct ecore_dev *cdev;
6511 for (i = 0; i < ha->num_rss; i++) {
6512 struct qlnx_fastpath *fp = &ha->fp_array[i];
6514 qlnx_free_mem_fp(ha, fp);
6520 qlnx_alloc_mem_load(qlnx_host_t *ha)
6524 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6525 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6527 rc = qlnx_alloc_mem_fp(ha, fp);
6535 qlnx_start_vport(struct ecore_dev *cdev,
6539 u8 inner_vlan_removal_en_flg,
6544 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6547 ha = (qlnx_host_t *)cdev;
6549 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6550 vport_start_params.tx_switching = 0;
6551 vport_start_params.handle_ptp_pkts = 0;
6552 vport_start_params.only_untagged = 0;
6553 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6555 vport_start_params.tpa_mode =
6556 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6557 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6559 vport_start_params.vport_id = vport_id;
6560 vport_start_params.mtu = mtu;
6562 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6564 for_each_hwfn(cdev, i) {
6565 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6567 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6568 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6570 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6573 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6574 " with MTU %d\n" , vport_id, mtu);
6578 ecore_hw_start_fastpath(p_hwfn);
6580 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6587 qlnx_update_vport(struct ecore_dev *cdev,
6588 struct qlnx_update_vport_params *params)
6590 struct ecore_sp_vport_update_params sp_params;
6591 int rc, i, j, fp_index;
6592 struct ecore_hwfn *p_hwfn;
6593 struct ecore_rss_params *rss;
6594 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6595 struct qlnx_fastpath *fp;
6597 memset(&sp_params, 0, sizeof(sp_params));
6598 /* Translate protocol params into sp params */
6599 sp_params.vport_id = params->vport_id;
6601 sp_params.update_vport_active_rx_flg =
6602 params->update_vport_active_rx_flg;
6603 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6605 sp_params.update_vport_active_tx_flg =
6606 params->update_vport_active_tx_flg;
6607 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6609 sp_params.update_inner_vlan_removal_flg =
6610 params->update_inner_vlan_removal_flg;
6611 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6613 sp_params.sge_tpa_params = params->sge_tpa_params;
6615 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6616 * We need to re-fix the rss values per engine for CMT.
6618 if (params->rss_params->update_rss_config)
6619 sp_params.rss_params = params->rss_params;
6621 sp_params.rss_params = NULL;
6623 for_each_hwfn(cdev, i) {
6624 p_hwfn = &cdev->hwfns[i];
6626 if ((cdev->num_hwfns > 1) &&
6627 params->rss_params->update_rss_config &&
6628 params->rss_params->rss_enable) {
6629 rss = params->rss_params;
6631 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6632 fp_index = ((cdev->num_hwfns * j) + i) %
6635 fp = &ha->fp_array[fp_index];
6636 rss->rss_ind_table[j] = fp->rxq->handle;
6639 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6640 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6641 rss->rss_ind_table[j],
6642 rss->rss_ind_table[j+1],
6643 rss->rss_ind_table[j+2],
6644 rss->rss_ind_table[j+3],
6645 rss->rss_ind_table[j+4],
6646 rss->rss_ind_table[j+5],
6647 rss->rss_ind_table[j+6],
6648 rss->rss_ind_table[j+7]);
6653 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6655 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6657 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6658 ECORE_SPQ_MODE_EBLOCK, NULL);
6660 QL_DPRINT1(ha, "Failed to update VPORT\n");
6664 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6665 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6666 params->vport_id, params->vport_active_tx_flg,
6667 params->vport_active_rx_flg,
6668 params->update_vport_active_tx_flg,
6669 params->update_vport_active_rx_flg);
6676 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6678 struct eth_rx_bd *rx_bd_cons =
6679 ecore_chain_consume(&rxq->rx_bd_ring);
6680 struct eth_rx_bd *rx_bd_prod =
6681 ecore_chain_produce(&rxq->rx_bd_ring);
6682 struct sw_rx_data *sw_rx_data_cons =
6683 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6684 struct sw_rx_data *sw_rx_data_prod =
6685 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6687 sw_rx_data_prod->data = sw_rx_data_cons->data;
6688 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6690 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6691 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6697 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6703 struct eth_rx_prod_data rx_prod_data;
6707 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6708 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6710 /* Update producers */
6711 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6712 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6714 /* Make sure that the BD and SGE data is updated before updating the
6715 * producers since FW might read the BD/SGE right after the producer
6720 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6721 sizeof(rx_prods), &rx_prods.data32);
6723 /* mmiowb is needed to synchronize doorbell writes from more than one
6724 * processor. It guarantees that the write arrives to the device before
6725 * the napi lock is released and another qlnx_poll is called (possibly
6726 * on another CPU). Without this barrier, the next doorbell can bypass
6727 * this doorbell. This is applicable to IA64/Altix systems.
6734 static uint32_t qlnx_hash_key[] = {
6735 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6736 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6737 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6738 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6739 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6740 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6741 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6742 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6743 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6744 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6747 qlnx_start_queues(qlnx_host_t *ha)
6749 int rc, tc, i, vport_id = 0,
6750 drop_ttl0_flg = 1, vlan_removal_en = 1,
6751 tx_switching = 0, hw_lro_enable = 0;
6752 struct ecore_dev *cdev = &ha->cdev;
6753 struct ecore_rss_params *rss_params = &ha->rss_params;
6754 struct qlnx_update_vport_params vport_update_params;
6756 struct ecore_hwfn *p_hwfn;
6757 struct ecore_sge_tpa_params tpa_params;
6758 struct ecore_queue_start_common_params qparams;
6759 struct qlnx_fastpath *fp;
6763 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6766 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6767 " are no Rx queues\n");
6771 #ifndef QLNX_SOFT_LRO
6772 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6773 #endif /* #ifndef QLNX_SOFT_LRO */
6775 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6776 vlan_removal_en, tx_switching, hw_lro_enable);
6779 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6783 QL_DPRINT2(ha, "Start vport ramrod passed, "
6784 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6785 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6788 struct ecore_rxq_start_ret_params rx_ret_params;
6789 struct ecore_txq_start_ret_params tx_ret_params;
6791 fp = &ha->fp_array[i];
6792 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6794 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6795 bzero(&rx_ret_params,
6796 sizeof (struct ecore_rxq_start_ret_params));
6798 qparams.queue_id = i ;
6799 qparams.vport_id = vport_id;
6800 qparams.stats_id = vport_id;
6801 qparams.p_sb = fp->sb_info;
6802 qparams.sb_idx = RX_PI;
6805 rc = ecore_eth_rx_queue_start(p_hwfn,
6806 p_hwfn->hw_info.opaque_fid,
6808 fp->rxq->rx_buf_size, /* bd_max_bytes */
6809 /* bd_chain_phys_addr */
6810 fp->rxq->rx_bd_ring.p_phys_addr,
6812 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6814 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6818 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6822 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6823 fp->rxq->handle = rx_ret_params.p_handle;
6824 fp->rxq->hw_cons_ptr =
6825 &fp->sb_info->sb_virt->pi_array[RX_PI];
6827 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6829 for (tc = 0; tc < ha->num_tc; tc++) {
6830 struct qlnx_tx_queue *txq = fp->txq[tc];
6833 sizeof(struct ecore_queue_start_common_params));
6834 bzero(&tx_ret_params,
6835 sizeof (struct ecore_txq_start_ret_params));
6837 qparams.queue_id = txq->index / cdev->num_hwfns ;
6838 qparams.vport_id = vport_id;
6839 qparams.stats_id = vport_id;
6840 qparams.p_sb = fp->sb_info;
6841 qparams.sb_idx = TX_PI(tc);
6843 rc = ecore_eth_tx_queue_start(p_hwfn,
6844 p_hwfn->hw_info.opaque_fid,
6846 /* bd_chain_phys_addr */
6847 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6848 ecore_chain_get_page_cnt(&txq->tx_pbl),
6852 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6857 txq->doorbell_addr = tx_ret_params.p_doorbell;
6858 txq->handle = tx_ret_params.p_handle;
6861 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6862 SET_FIELD(txq->tx_db.data.params,
6863 ETH_DB_DATA_DEST, DB_DEST_XCM);
6864 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6866 SET_FIELD(txq->tx_db.data.params,
6867 ETH_DB_DATA_AGG_VAL_SEL,
6868 DQ_XCM_ETH_TX_BD_PROD_CMD);
6870 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6874 /* Fill struct with RSS params */
6875 if (ha->num_rss > 1) {
6876 rss_params->update_rss_config = 1;
6877 rss_params->rss_enable = 1;
6878 rss_params->update_rss_capabilities = 1;
6879 rss_params->update_rss_ind_table = 1;
6880 rss_params->update_rss_key = 1;
6881 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6882 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6883 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6885 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6886 fp = &ha->fp_array[(i % ha->num_rss)];
6887 rss_params->rss_ind_table[i] = fp->rxq->handle;
6890 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6891 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6894 memset(rss_params, 0, sizeof(*rss_params));
6897 /* Prepare and send the vport enable */
6898 memset(&vport_update_params, 0, sizeof(vport_update_params));
6899 vport_update_params.vport_id = vport_id;
6900 vport_update_params.update_vport_active_tx_flg = 1;
6901 vport_update_params.vport_active_tx_flg = 1;
6902 vport_update_params.update_vport_active_rx_flg = 1;
6903 vport_update_params.vport_active_rx_flg = 1;
6904 vport_update_params.rss_params = rss_params;
6905 vport_update_params.update_inner_vlan_removal_flg = 1;
6906 vport_update_params.inner_vlan_removal_flg = 1;
6908 if (hw_lro_enable) {
6909 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6911 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6913 tpa_params.update_tpa_en_flg = 1;
6914 tpa_params.tpa_ipv4_en_flg = 1;
6915 tpa_params.tpa_ipv6_en_flg = 1;
6917 tpa_params.update_tpa_param_flg = 1;
6918 tpa_params.tpa_pkt_split_flg = 0;
6919 tpa_params.tpa_hdr_data_split_flg = 0;
6920 tpa_params.tpa_gro_consistent_flg = 0;
6921 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6922 tpa_params.tpa_max_size = (uint16_t)(-1);
6923 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6924 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6926 vport_update_params.sge_tpa_params = &tpa_params;
6929 rc = qlnx_update_vport(cdev, &vport_update_params);
6931 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6939 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6940 struct qlnx_tx_queue *txq)
6942 uint16_t hw_bd_cons;
6943 uint16_t ecore_cons_idx;
6945 QL_DPRINT2(ha, "enter\n");
6947 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6949 while (hw_bd_cons !=
6950 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6951 mtx_lock(&fp->tx_mtx);
6953 (void)qlnx_tx_int(ha, fp, txq);
6955 mtx_unlock(&fp->tx_mtx);
6957 qlnx_mdelay(__func__, 2);
6959 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6962 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6968 qlnx_stop_queues(qlnx_host_t *ha)
6970 struct qlnx_update_vport_params vport_update_params;
6971 struct ecore_dev *cdev;
6972 struct qlnx_fastpath *fp;
6977 /* Disable the vport */
6979 memset(&vport_update_params, 0, sizeof(vport_update_params));
6981 vport_update_params.vport_id = 0;
6982 vport_update_params.update_vport_active_tx_flg = 1;
6983 vport_update_params.vport_active_tx_flg = 0;
6984 vport_update_params.update_vport_active_rx_flg = 1;
6985 vport_update_params.vport_active_rx_flg = 0;
6986 vport_update_params.rss_params = &ha->rss_params;
6987 vport_update_params.rss_params->update_rss_config = 0;
6988 vport_update_params.rss_params->rss_enable = 0;
6989 vport_update_params.update_inner_vlan_removal_flg = 0;
6990 vport_update_params.inner_vlan_removal_flg = 0;
6992 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6994 rc = qlnx_update_vport(cdev, &vport_update_params);
6996 QL_DPRINT1(ha, "Failed to update vport\n");
7000 /* Flush Tx queues. If needed, request drain from MCP */
7002 fp = &ha->fp_array[i];
7004 for (tc = 0; tc < ha->num_tc; tc++) {
7005 struct qlnx_tx_queue *txq = fp->txq[tc];
7007 rc = qlnx_drain_txq(ha, fp, txq);
7013 /* Stop all Queues in reverse order*/
7014 for (i = ha->num_rss - 1; i >= 0; i--) {
7015 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
7017 fp = &ha->fp_array[i];
7019 /* Stop the Tx Queue(s)*/
7020 for (tc = 0; tc < ha->num_tc; tc++) {
7023 tx_queue_id = tc * ha->num_rss + i;
7024 rc = ecore_eth_tx_queue_stop(p_hwfn,
7025 fp->txq[tc]->handle);
7028 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
7034 /* Stop the Rx Queue*/
7035 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
7038 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7043 /* Stop the vport */
7044 for_each_hwfn(cdev, i) {
7045 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7047 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7050 QL_DPRINT1(ha, "Failed to stop VPORT\n");
7059 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7060 enum ecore_filter_opcode opcode,
7061 unsigned char mac[ETH_ALEN])
7063 struct ecore_filter_ucast ucast;
7064 struct ecore_dev *cdev;
7069 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7071 ucast.opcode = opcode;
7072 ucast.type = ECORE_FILTER_MAC;
7073 ucast.is_rx_filter = 1;
7074 ucast.vport_to_add_to = 0;
7075 memcpy(&ucast.mac[0], mac, ETH_ALEN);
7077 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7083 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7085 struct ecore_filter_ucast ucast;
7086 struct ecore_dev *cdev;
7089 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7091 ucast.opcode = ECORE_FILTER_REPLACE;
7092 ucast.type = ECORE_FILTER_MAC;
7093 ucast.is_rx_filter = 1;
7097 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7103 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7105 struct ecore_filter_mcast *mcast;
7106 struct ecore_dev *cdev;
7111 mcast = &ha->ecore_mcast;
7112 bzero(mcast, sizeof(struct ecore_filter_mcast));
7114 mcast->opcode = ECORE_FILTER_REMOVE;
7116 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7117 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7118 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7119 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7120 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7121 mcast->num_mc_addrs++;
7124 mcast = &ha->ecore_mcast;
7126 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7128 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7135 qlnx_clean_filters(qlnx_host_t *ha)
7139 /* Remove all unicast macs */
7140 rc = qlnx_remove_all_ucast_mac(ha);
7144 /* Remove all multicast macs */
7145 rc = qlnx_remove_all_mcast_mac(ha);
7149 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7155 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7157 struct ecore_filter_accept_flags accept;
7159 struct ecore_dev *cdev;
7163 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7165 accept.update_rx_mode_config = 1;
7166 accept.rx_accept_filter = filter;
7168 accept.update_tx_mode_config = 1;
7169 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7170 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7172 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7173 ECORE_SPQ_MODE_CB, NULL);
7179 qlnx_set_rx_mode(qlnx_host_t *ha)
7184 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7188 rc = qlnx_remove_all_mcast_mac(ha);
7192 filter = ECORE_ACCEPT_UCAST_MATCHED |
7193 ECORE_ACCEPT_MCAST_MATCHED |
7196 if (qlnx_vf_device(ha) == 0) {
7197 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7198 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7200 ha->filter = filter;
7202 rc = qlnx_set_rx_accept_filter(ha, filter);
7208 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7211 struct ecore_dev *cdev;
7212 struct ecore_hwfn *hwfn;
7213 struct ecore_ptt *ptt;
7215 if (qlnx_vf_device(ha) == 0)
7220 for_each_hwfn(cdev, i) {
7221 hwfn = &cdev->hwfns[i];
7223 ptt = ecore_ptt_acquire(hwfn);
7227 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7229 ecore_ptt_release(hwfn, ptt);
7237 #if __FreeBSD_version >= 1100000
7239 qlnx_get_counter(if_t ifp, ift_counter cnt)
7244 ha = (qlnx_host_t *)if_getsoftc(ifp);
7247 case IFCOUNTER_IPACKETS:
7248 count = ha->hw_stats.common.rx_ucast_pkts +
7249 ha->hw_stats.common.rx_mcast_pkts +
7250 ha->hw_stats.common.rx_bcast_pkts;
7253 case IFCOUNTER_IERRORS:
7254 count = ha->hw_stats.common.rx_crc_errors +
7255 ha->hw_stats.common.rx_align_errors +
7256 ha->hw_stats.common.rx_oversize_packets +
7257 ha->hw_stats.common.rx_undersize_packets;
7260 case IFCOUNTER_OPACKETS:
7261 count = ha->hw_stats.common.tx_ucast_pkts +
7262 ha->hw_stats.common.tx_mcast_pkts +
7263 ha->hw_stats.common.tx_bcast_pkts;
7266 case IFCOUNTER_OERRORS:
7267 count = ha->hw_stats.common.tx_err_drop_pkts;
7270 case IFCOUNTER_COLLISIONS:
7273 case IFCOUNTER_IBYTES:
7274 count = ha->hw_stats.common.rx_ucast_bytes +
7275 ha->hw_stats.common.rx_mcast_bytes +
7276 ha->hw_stats.common.rx_bcast_bytes;
7279 case IFCOUNTER_OBYTES:
7280 count = ha->hw_stats.common.tx_ucast_bytes +
7281 ha->hw_stats.common.tx_mcast_bytes +
7282 ha->hw_stats.common.tx_bcast_bytes;
7285 case IFCOUNTER_IMCASTS:
7286 count = ha->hw_stats.common.rx_mcast_bytes;
7289 case IFCOUNTER_OMCASTS:
7290 count = ha->hw_stats.common.tx_mcast_bytes;
7293 case IFCOUNTER_IQDROPS:
7294 case IFCOUNTER_OQDROPS:
7295 case IFCOUNTER_NOPROTO:
7298 return (if_get_counter_default(ifp, cnt));
7305 qlnx_timer(void *arg)
7309 ha = (qlnx_host_t *)arg;
7311 if (ha->error_recovery) {
7312 ha->error_recovery = 0;
7313 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7317 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7319 if (ha->storm_stats_gather)
7320 qlnx_sample_storm_stats(ha);
7322 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7328 qlnx_load(qlnx_host_t *ha)
7332 struct ecore_dev *cdev;
7338 QL_DPRINT2(ha, "enter\n");
7340 rc = qlnx_alloc_mem_arrays(ha);
7342 goto qlnx_load_exit0;
7346 rc = qlnx_alloc_mem_load(ha);
7348 goto qlnx_load_exit1;
7350 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7351 ha->num_rss, ha->num_tc);
7353 for (i = 0; i < ha->num_rss; i++) {
7354 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7355 (INTR_TYPE_NET | INTR_MPSAFE),
7356 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7357 &ha->irq_vec[i].handle))) {
7358 QL_DPRINT1(ha, "could not setup interrupt\n");
7359 goto qlnx_load_exit2;
7362 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7363 irq %p handle %p\n", i,
7364 ha->irq_vec[i].irq_rid,
7365 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7367 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7370 rc = qlnx_start_queues(ha);
7372 goto qlnx_load_exit2;
7374 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7376 /* Add primary mac and set Rx filters */
7377 rc = qlnx_set_rx_mode(ha);
7379 goto qlnx_load_exit2;
7381 /* Ask for link-up using current configuration */
7382 qlnx_set_link(ha, true);
7384 if (qlnx_vf_device(ha) == 0)
7385 qlnx_link_update(&ha->cdev.hwfns[0]);
7387 ha->state = QLNX_STATE_OPEN;
7389 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7391 if (ha->flags.callout_init)
7392 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7394 goto qlnx_load_exit0;
7397 qlnx_free_mem_load(ha);
7403 QL_DPRINT2(ha, "exit [%d]\n", rc);
7408 qlnx_drain_soft_lro(qlnx_host_t *ha)
7410 #ifdef QLNX_SOFT_LRO
7417 if (ifp->if_capenable & IFCAP_LRO) {
7418 for (i = 0; i < ha->num_rss; i++) {
7419 struct qlnx_fastpath *fp = &ha->fp_array[i];
7420 struct lro_ctrl *lro;
7422 lro = &fp->rxq->lro;
7424 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7426 tcp_lro_flush_all(lro);
7429 struct lro_entry *queued;
7431 while ((!SLIST_EMPTY(&lro->lro_active))){
7432 queued = SLIST_FIRST(&lro->lro_active);
7433 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7434 tcp_lro_flush(lro, queued);
7437 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7441 #endif /* #ifdef QLNX_SOFT_LRO */
7447 qlnx_unload(qlnx_host_t *ha)
7449 struct ecore_dev *cdev;
7456 QL_DPRINT2(ha, "enter\n");
7457 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7459 if (ha->state == QLNX_STATE_OPEN) {
7460 qlnx_set_link(ha, false);
7461 qlnx_clean_filters(ha);
7462 qlnx_stop_queues(ha);
7463 ecore_hw_stop_fastpath(cdev);
7465 for (i = 0; i < ha->num_rss; i++) {
7466 if (ha->irq_vec[i].handle) {
7467 (void)bus_teardown_intr(dev,
7469 ha->irq_vec[i].handle);
7470 ha->irq_vec[i].handle = NULL;
7474 qlnx_drain_fp_taskqueues(ha);
7475 qlnx_drain_soft_lro(ha);
7476 qlnx_free_mem_load(ha);
7479 if (ha->flags.callout_init)
7480 callout_drain(&ha->qlnx_callout);
7482 qlnx_mdelay(__func__, 1000);
7484 ha->state = QLNX_STATE_CLOSED;
7486 QL_DPRINT2(ha, "exit\n");
7491 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7494 struct ecore_hwfn *p_hwfn;
7495 struct ecore_ptt *p_ptt;
7497 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7499 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7500 p_ptt = ecore_ptt_acquire(p_hwfn);
7503 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7507 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7509 if (rval == DBG_STATUS_OK)
7512 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7516 ecore_ptt_release(p_hwfn, p_ptt);
7522 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7525 struct ecore_hwfn *p_hwfn;
7526 struct ecore_ptt *p_ptt;
7528 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7530 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7531 p_ptt = ecore_ptt_acquire(p_hwfn);
7534 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7538 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7540 if (rval == DBG_STATUS_OK)
7543 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7547 ecore_ptt_release(p_hwfn, p_ptt);
7553 qlnx_sample_storm_stats(qlnx_host_t *ha)
7556 struct ecore_dev *cdev;
7557 qlnx_storm_stats_t *s_stats;
7559 struct ecore_ptt *p_ptt;
7560 struct ecore_hwfn *hwfn;
7562 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7563 ha->storm_stats_gather = 0;
7569 for_each_hwfn(cdev, i) {
7570 hwfn = &cdev->hwfns[i];
7572 p_ptt = ecore_ptt_acquire(hwfn);
7576 index = ha->storm_stats_index +
7577 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7579 s_stats = &ha->storm_stats[index];
7582 reg = XSEM_REG_FAST_MEMORY +
7583 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7584 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7586 reg = XSEM_REG_FAST_MEMORY +
7587 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7588 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7590 reg = XSEM_REG_FAST_MEMORY +
7591 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7592 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7594 reg = XSEM_REG_FAST_MEMORY +
7595 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7596 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7599 reg = YSEM_REG_FAST_MEMORY +
7600 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7601 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7603 reg = YSEM_REG_FAST_MEMORY +
7604 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7605 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7607 reg = YSEM_REG_FAST_MEMORY +
7608 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7609 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7611 reg = YSEM_REG_FAST_MEMORY +
7612 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7613 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7616 reg = PSEM_REG_FAST_MEMORY +
7617 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7618 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7620 reg = PSEM_REG_FAST_MEMORY +
7621 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7622 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7624 reg = PSEM_REG_FAST_MEMORY +
7625 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7626 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7628 reg = PSEM_REG_FAST_MEMORY +
7629 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7630 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7633 reg = TSEM_REG_FAST_MEMORY +
7634 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7635 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7637 reg = TSEM_REG_FAST_MEMORY +
7638 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7639 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7641 reg = TSEM_REG_FAST_MEMORY +
7642 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7643 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7645 reg = TSEM_REG_FAST_MEMORY +
7646 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7647 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7650 reg = MSEM_REG_FAST_MEMORY +
7651 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7652 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7654 reg = MSEM_REG_FAST_MEMORY +
7655 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7656 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7658 reg = MSEM_REG_FAST_MEMORY +
7659 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7660 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7662 reg = MSEM_REG_FAST_MEMORY +
7663 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7664 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7667 reg = USEM_REG_FAST_MEMORY +
7668 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7669 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7671 reg = USEM_REG_FAST_MEMORY +
7672 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7673 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7675 reg = USEM_REG_FAST_MEMORY +
7676 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7677 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7679 reg = USEM_REG_FAST_MEMORY +
7680 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7681 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7683 ecore_ptt_release(hwfn, p_ptt);
7686 ha->storm_stats_index++;
7692 * Name: qlnx_dump_buf8
7693 * Function: dumps a buffer as bytes
7696 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7705 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7708 device_printf(dev,"0x%08x:"
7709 " %02x %02x %02x %02x %02x %02x %02x %02x"
7710 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7711 buf[0], buf[1], buf[2], buf[3],
7712 buf[4], buf[5], buf[6], buf[7],
7713 buf[8], buf[9], buf[10], buf[11],
7714 buf[12], buf[13], buf[14], buf[15]);
7721 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7724 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7727 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7728 i, buf[0], buf[1], buf[2]);
7731 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7732 buf[0], buf[1], buf[2], buf[3]);
7735 device_printf(dev,"0x%08x:"
7736 " %02x %02x %02x %02x %02x\n", i,
7737 buf[0], buf[1], buf[2], buf[3], buf[4]);
7740 device_printf(dev,"0x%08x:"
7741 " %02x %02x %02x %02x %02x %02x\n", i,
7742 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7745 device_printf(dev,"0x%08x:"
7746 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7747 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7750 device_printf(dev,"0x%08x:"
7751 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7752 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7756 device_printf(dev,"0x%08x:"
7757 " %02x %02x %02x %02x %02x %02x %02x %02x"
7759 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7763 device_printf(dev,"0x%08x:"
7764 " %02x %02x %02x %02x %02x %02x %02x %02x"
7766 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7767 buf[7], buf[8], buf[9]);
7770 device_printf(dev,"0x%08x:"
7771 " %02x %02x %02x %02x %02x %02x %02x %02x"
7772 " %02x %02x %02x\n", i,
7773 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7774 buf[7], buf[8], buf[9], buf[10]);
7777 device_printf(dev,"0x%08x:"
7778 " %02x %02x %02x %02x %02x %02x %02x %02x"
7779 " %02x %02x %02x %02x\n", i,
7780 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7781 buf[7], buf[8], buf[9], buf[10], buf[11]);
7784 device_printf(dev,"0x%08x:"
7785 " %02x %02x %02x %02x %02x %02x %02x %02x"
7786 " %02x %02x %02x %02x %02x\n", i,
7787 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7788 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7791 device_printf(dev,"0x%08x:"
7792 " %02x %02x %02x %02x %02x %02x %02x %02x"
7793 " %02x %02x %02x %02x %02x %02x\n", i,
7794 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7795 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7799 device_printf(dev,"0x%08x:"
7800 " %02x %02x %02x %02x %02x %02x %02x %02x"
7801 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7802 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7803 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7810 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7815 #ifdef CONFIG_ECORE_SRIOV
7818 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7820 struct ecore_public_vf_info *vf_info;
7822 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7827 /* Clear the VF mac */
7828 memset(vf_info->forced_mac, 0, ETH_ALEN);
7830 vf_info->forced_vlan = 0;
7836 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7838 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7843 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7844 struct ecore_filter_ucast *params)
7846 struct ecore_public_vf_info *vf;
7848 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7849 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7850 "VF[%d] vport not initialized\n", vfid);
7854 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
7858 /* No real decision to make; Store the configured MAC */
7859 if (params->type == ECORE_FILTER_MAC ||
7860 params->type == ECORE_FILTER_MAC_VLAN)
7861 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7867 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
7869 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
7873 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
7874 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
7877 struct ecore_filter_accept_flags *flags;
7879 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
7880 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7881 "VF[%d] vport not initialized\n", vfid);
7885 /* Untrusted VFs can't even be trusted to know that fact.
7886 * Simply indicate everything is configured fine, and trace
7887 * configuration 'behind their back'.
7889 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED;
7890 flags = ¶ms->accept_flags;
7891 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
7898 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
7900 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
7904 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
7907 struct ecore_dev *cdev;
7909 cdev = p_hwfn->p_dev;
7911 for (i = 0; i < cdev->num_hwfns; i++) {
7912 if (&cdev->hwfns[i] == p_hwfn)
7916 if (i >= cdev->num_hwfns)
7923 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
7925 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7928 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7929 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7931 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7934 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7935 atomic_testandset_32(&ha->sriov_task[i].flags,
7936 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
7938 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7939 &ha->sriov_task[i].pf_task);
7942 return (ECORE_SUCCESS);
7946 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
7948 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
7952 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
7954 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7957 if (!ha->sriov_initialized)
7960 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7961 ha, p_hwfn->p_dev, p_hwfn);
7963 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7966 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7967 atomic_testandset_32(&ha->sriov_task[i].flags,
7968 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
7970 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7971 &ha->sriov_task[i].pf_task);
7978 qlnx_vf_flr_update(void *p_hwfn)
7980 __qlnx_vf_flr_update(p_hwfn);
7988 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
7990 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7993 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7994 ha, p_hwfn->p_dev, p_hwfn);
7996 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7999 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
8000 ha, p_hwfn->p_dev, p_hwfn, i);
8002 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8003 atomic_testandset_32(&ha->sriov_task[i].flags,
8004 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
8006 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8007 &ha->sriov_task[i].pf_task);
8012 qlnx_initialize_sriov(qlnx_host_t *ha)
8015 nvlist_t *pf_schema, *vf_schema;
8020 pf_schema = pci_iov_schema_alloc_node();
8021 vf_schema = pci_iov_schema_alloc_node();
8023 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
8024 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
8025 IOV_SCHEMA_HASDEFAULT, FALSE);
8026 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
8027 IOV_SCHEMA_HASDEFAULT, FALSE);
8028 pci_iov_schema_add_uint16(vf_schema, "num-queues",
8029 IOV_SCHEMA_HASDEFAULT, 1);
8031 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
8033 if (iov_error != 0) {
8034 ha->sriov_initialized = 0;
8036 device_printf(dev, "SRIOV initialized\n");
8037 ha->sriov_initialized = 1;
8044 qlnx_sriov_disable(qlnx_host_t *ha)
8046 struct ecore_dev *cdev;
8051 ecore_iov_set_vfs_to_disable(cdev, true);
8053 for_each_hwfn(cdev, i) {
8054 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8055 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8058 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8061 /* Clean WFQ db and configure equal weight for all vports */
8062 ecore_clean_wfq_db(hwfn, ptt);
8064 ecore_for_each_vf(hwfn, j) {
8067 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8070 if (ecore_iov_is_vf_started(hwfn, j)) {
8071 /* Wait until VF is disabled before releasing */
8073 for (k = 0; k < 100; k++) {
8074 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8075 qlnx_mdelay(__func__, 10);
8082 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8086 "Timeout waiting for VF's FLR to end\n");
8089 ecore_ptt_release(hwfn, ptt);
8092 ecore_iov_set_vfs_to_disable(cdev, false);
8098 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8099 struct ecore_iov_vf_init_params *params)
8103 /* Since we have an equal resource distribution per-VF, and we assume
8104 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8105 * sequentially from there.
8107 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8109 params->rel_vf_id = vfid;
8111 for (i = 0; i < params->num_queues; i++) {
8112 params->req_rx_queue[i] = base + i;
8113 params->req_tx_queue[i] = base + i;
8116 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8117 params->vport_id = vfid + 1;
8118 params->rss_eng_id = vfid + 1;
8124 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8127 struct ecore_dev *cdev;
8128 struct ecore_iov_vf_init_params params;
8132 if ((ha = device_get_softc(dev)) == NULL) {
8133 device_printf(dev, "%s: cannot get softc\n", __func__);
8137 if (qlnx_create_pf_taskqueues(ha) != 0)
8138 goto qlnx_iov_init_err0;
8142 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8144 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8145 dev, num_vfs, max_vfs);
8147 if (num_vfs >= max_vfs) {
8148 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8149 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8150 goto qlnx_iov_init_err0;
8153 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8156 if (ha->vf_attr == NULL)
8157 goto qlnx_iov_init_err0;
8159 memset(¶ms, 0, sizeof(params));
8161 /* Initialize HW for VF access */
8162 for_each_hwfn(cdev, j) {
8163 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8164 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8166 /* Make sure not to use more than 16 queues per VF */
8167 params.num_queues = min_t(int,
8168 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8172 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8173 goto qlnx_iov_init_err1;
8176 for (i = 0; i < num_vfs; i++) {
8177 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8180 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8182 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8185 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8186 ecore_ptt_release(hwfn, ptt);
8187 goto qlnx_iov_init_err1;
8191 ecore_ptt_release(hwfn, ptt);
8194 ha->num_vfs = num_vfs;
8195 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8197 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8202 qlnx_sriov_disable(ha);
8205 qlnx_destroy_pf_taskqueues(ha);
8212 qlnx_iov_uninit(device_t dev)
8216 if ((ha = device_get_softc(dev)) == NULL) {
8217 device_printf(dev, "%s: cannot get softc\n", __func__);
8221 QL_DPRINT2(ha," dev = %p enter\n", dev);
8223 qlnx_sriov_disable(ha);
8224 qlnx_destroy_pf_taskqueues(ha);
8226 free(ha->vf_attr, M_QLNXBUF);
8231 QL_DPRINT2(ha," dev = %p exit\n", dev);
8236 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8239 qlnx_vf_attr_t *vf_attr;
8240 unsigned const char *mac;
8242 struct ecore_hwfn *p_hwfn;
8244 if ((ha = device_get_softc(dev)) == NULL) {
8245 device_printf(dev, "%s: cannot get softc\n", __func__);
8249 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8251 if (vfnum > (ha->num_vfs - 1)) {
8252 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8253 vfnum, (ha->num_vfs - 1));
8256 vf_attr = &ha->vf_attr[vfnum];
8258 if (nvlist_exists_binary(params, "mac-addr")) {
8259 mac = nvlist_get_binary(params, "mac-addr", &size);
8260 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8262 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8263 __func__, vf_attr->mac_addr[0],
8264 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8265 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8266 vf_attr->mac_addr[5]);
8267 p_hwfn = &ha->cdev.hwfns[0];
8268 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8272 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8277 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8279 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8280 struct ecore_ptt *ptt;
8283 ptt = ecore_ptt_acquire(p_hwfn);
8285 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8286 __qlnx_pf_vf_msg(p_hwfn, 0);
8290 ecore_iov_pf_get_pending_events(p_hwfn, events);
8292 QL_DPRINT2(ha, "Event mask of VF events:"
8293 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8294 events[0], events[1], events[2]);
8296 ecore_for_each_vf(p_hwfn, i) {
8297 /* Skip VFs with no pending messages */
8298 if (!(events[i / 64] & (1ULL << (i % 64))))
8302 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8303 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8305 /* Copy VF's message to PF's request buffer for that VF */
8306 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8309 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8312 ecore_ptt_release(p_hwfn, ptt);
8318 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8320 struct ecore_ptt *ptt;
8323 ptt = ecore_ptt_acquire(p_hwfn);
8326 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8327 __qlnx_vf_flr_update(p_hwfn);
8331 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8334 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8337 ecore_ptt_release(p_hwfn, ptt);
8343 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8345 struct ecore_ptt *ptt;
8348 ptt = ecore_ptt_acquire(p_hwfn);
8351 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8352 qlnx_vf_bulleting_update(p_hwfn);
8356 ecore_for_each_vf(p_hwfn, i) {
8357 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8359 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8362 ecore_ptt_release(p_hwfn, ptt);
8368 qlnx_pf_taskqueue(void *context, int pending)
8370 struct ecore_hwfn *p_hwfn;
8379 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8381 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8384 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8385 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8386 qlnx_handle_vf_msg(ha, p_hwfn);
8388 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8389 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8390 qlnx_handle_vf_flr_update(ha, p_hwfn);
8392 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8393 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8394 qlnx_handle_bulletin_update(ha, p_hwfn);
8400 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8403 uint8_t tq_name[32];
8405 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8406 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8408 bzero(tq_name, sizeof (tq_name));
8409 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8411 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8413 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8414 taskqueue_thread_enqueue,
8415 &ha->sriov_task[i].pf_taskqueue);
8417 if (ha->sriov_task[i].pf_taskqueue == NULL)
8420 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8421 PI_NET, "%s", tq_name);
8423 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8430 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8434 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8435 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8436 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8437 &ha->sriov_task[i].pf_task);
8438 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8439 ha->sriov_task[i].pf_taskqueue = NULL;
8446 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8448 struct ecore_mcp_link_capabilities caps;
8449 struct ecore_mcp_link_params params;
8450 struct ecore_mcp_link_state link;
8453 if (!p_hwfn->pf_iov_info)
8456 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8457 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8458 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8460 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8461 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8462 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8464 QL_DPRINT2(ha, "called\n");
8466 /* Update bulletin of all future possible VFs with link configuration */
8467 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8468 /* Modify link according to the VF's configured link state */
8470 link.link_up = false;
8473 link.link_up = true;
8474 /* Set speed according to maximum supported by HW.
8475 * that is 40G for regular devices and 100G for CMT
8478 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8479 100000 : link.speed;
8481 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8482 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8485 qlnx_vf_bulleting_update(p_hwfn);
8489 #endif /* #ifndef QLNX_VF */
8490 #endif /* #ifdef CONFIG_ECORE_SRIOV */