2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ecore_gtt_reg_addr.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61 #include "ecore_iov_api.h"
62 #include "ecore_vf_api.h"
64 #include "qlnx_ioctl.h"
68 #ifdef QLNX_ENABLE_IWARP
69 #include "qlnx_rdma.h"
70 #endif /* #ifdef QLNX_ENABLE_IWARP */
79 * ioctl related functions
81 static void qlnx_add_sysctls(qlnx_host_t *ha);
86 static void qlnx_release(qlnx_host_t *ha);
87 static void qlnx_fp_isr(void *arg);
88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
89 static void qlnx_init(void *arg);
90 static void qlnx_init_locked(qlnx_host_t *ha);
91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
92 static int qlnx_set_promisc(qlnx_host_t *ha);
93 static int qlnx_set_allmulti(qlnx_host_t *ha);
94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95 static int qlnx_media_change(struct ifnet *ifp);
96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
97 static void qlnx_stop(qlnx_host_t *ha);
98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
99 struct mbuf **m_headp);
100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
101 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
102 struct qlnx_link_output *if_link);
103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp);
104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp,
106 static void qlnx_qflush(struct ifnet *ifp);
108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
118 static int qlnx_nic_setup(struct ecore_dev *cdev,
119 struct ecore_pf_params *func_params);
120 static int qlnx_nic_start(struct ecore_dev *cdev);
121 static int qlnx_slowpath_start(qlnx_host_t *ha);
122 static int qlnx_slowpath_stop(qlnx_host_t *ha);
123 static int qlnx_init_hw(qlnx_host_t *ha);
124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
125 char ver_str[VER_SIZE]);
126 static void qlnx_unload(qlnx_host_t *ha);
127 static int qlnx_load(qlnx_host_t *ha);
128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
135 struct qlnx_rx_queue *rxq);
136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
141 static void qlnx_timer(void *arg);
142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_trigger_dump(qlnx_host_t *ha);
145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
146 struct qlnx_tx_queue *txq);
147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
148 struct qlnx_tx_queue *txq);
149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
151 static void qlnx_fp_taskqueue(void *context, int pending);
152 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
154 struct qlnx_agg_info *tpa);
155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
157 #if __FreeBSD_version >= 1100000
158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
163 * Hooks to the Operating Systems
165 static int qlnx_pci_probe (device_t);
166 static int qlnx_pci_attach (device_t);
167 static int qlnx_pci_detach (device_t);
171 #ifdef CONFIG_ECORE_SRIOV
173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
174 static void qlnx_iov_uninit(device_t dev);
175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
176 static void qlnx_initialize_sriov(qlnx_host_t *ha);
177 static void qlnx_pf_taskqueue(void *context, int pending);
178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
182 #endif /* #ifdef CONFIG_ECORE_SRIOV */
184 static device_method_t qlnx_pci_methods[] = {
185 /* Device interface */
186 DEVMETHOD(device_probe, qlnx_pci_probe),
187 DEVMETHOD(device_attach, qlnx_pci_attach),
188 DEVMETHOD(device_detach, qlnx_pci_detach),
190 #ifdef CONFIG_ECORE_SRIOV
191 DEVMETHOD(pci_iov_init, qlnx_iov_init),
192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit),
193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf),
194 #endif /* #ifdef CONFIG_ECORE_SRIOV */
198 static driver_t qlnx_pci_driver = {
199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
202 static devclass_t qlnx_devclass;
204 MODULE_VERSION(if_qlnxe,1);
205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
212 static device_method_t qlnxv_pci_methods[] = {
213 /* Device interface */
214 DEVMETHOD(device_probe, qlnx_pci_probe),
215 DEVMETHOD(device_attach, qlnx_pci_attach),
216 DEVMETHOD(device_detach, qlnx_pci_detach),
220 static driver_t qlnxv_pci_driver = {
221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t),
224 static devclass_t qlnxv_devclass;
225 MODULE_VERSION(if_qlnxev,1);
226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0);
228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1);
229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
231 #endif /* #ifdef QLNX_VF */
233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
235 char qlnx_dev_str[128];
236 char qlnx_ver_str[VER_SIZE];
237 char qlnx_name_str[NAME_SIZE];
240 * Some PCI Configuration Space Related Defines
243 #ifndef PCI_VENDOR_QLOGIC
244 #define PCI_VENDOR_QLOGIC 0x1077
247 /* 40G Adapter QLE45xxx*/
248 #ifndef QLOGIC_PCI_DEVICE_ID_1634
249 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634
252 /* 100G Adapter QLE45xxx*/
253 #ifndef QLOGIC_PCI_DEVICE_ID_1644
254 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644
257 /* 25G Adapter QLE45xxx*/
258 #ifndef QLOGIC_PCI_DEVICE_ID_1656
259 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656
262 /* 50G Adapter QLE45xxx*/
263 #ifndef QLOGIC_PCI_DEVICE_ID_1654
264 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654
267 /* 10G/25G/40G Adapter QLE41xxx*/
268 #ifndef QLOGIC_PCI_DEVICE_ID_8070
269 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070
272 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/
273 #ifndef QLOGIC_PCI_DEVICE_ID_8090
274 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090
279 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
280 "qlnxe driver parameters");
282 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */
283 static int qlnxe_queue_count = QLNX_DEFAULT_RSS;
285 #if __FreeBSD_version < 1100000
287 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count);
291 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
292 &qlnxe_queue_count, 0, "Multi-Queue queue count");
296 * Note on RDMA personality setting
298 * Read the personality configured in NVRAM
299 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and
300 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT
301 * use the personality in NVRAM.
303 * Otherwise use t the personality configured in sysctl.
306 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */
307 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */
308 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */
309 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */
310 #define QLNX_PERSONALITY_BITS_PER_FUNC 4
311 #define QLNX_PERSONALIY_MASK 0xF
313 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/
314 static uint64_t qlnxe_rdma_configuration = 0x22222222;
316 #if __FreeBSD_version < 1100000
318 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration);
320 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
321 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
325 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN,
326 &qlnxe_rdma_configuration, 0, "RDMA Configuration");
328 #endif /* #if __FreeBSD_version < 1100000 */
331 qlnx_vf_device(qlnx_host_t *ha)
335 device_id = ha->device_id;
337 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
344 qlnx_valid_device(qlnx_host_t *ha)
348 device_id = ha->device_id;
351 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
352 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
353 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
354 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
355 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
358 if (device_id == QLOGIC_PCI_DEVICE_ID_8090)
361 #endif /* #ifndef QLNX_VF */
365 #ifdef QLNX_ENABLE_IWARP
367 qlnx_rdma_supported(struct qlnx_host *ha)
371 device_id = pci_get_device(ha->pci_dev);
373 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
374 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
375 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
376 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
381 #endif /* #ifdef QLNX_ENABLE_IWARP */
384 * Name: qlnx_pci_probe
385 * Function: Validate the PCI device to be a QLA80XX device
388 qlnx_pci_probe(device_t dev)
390 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
391 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
392 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
394 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
398 switch (pci_get_device(dev)) {
402 case QLOGIC_PCI_DEVICE_ID_1644:
403 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
404 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
405 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
407 device_set_desc_copy(dev, qlnx_dev_str);
411 case QLOGIC_PCI_DEVICE_ID_1634:
412 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
413 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
414 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
416 device_set_desc_copy(dev, qlnx_dev_str);
420 case QLOGIC_PCI_DEVICE_ID_1656:
421 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
422 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
423 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
425 device_set_desc_copy(dev, qlnx_dev_str);
429 case QLOGIC_PCI_DEVICE_ID_1654:
430 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
431 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
432 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
434 device_set_desc_copy(dev, qlnx_dev_str);
438 case QLOGIC_PCI_DEVICE_ID_8070:
439 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
440 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
441 " Adapter-Ethernet Function",
442 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
444 device_set_desc_copy(dev, qlnx_dev_str);
449 case QLOGIC_PCI_DEVICE_ID_8090:
450 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
451 "Qlogic SRIOV PCI CNA (AH) "
452 "Adapter-Ethernet Function",
453 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
455 device_set_desc_copy(dev, qlnx_dev_str);
459 #endif /* #ifndef QLNX_VF */
465 #ifdef QLNX_ENABLE_IWARP
467 #endif /* #ifdef QLNX_ENABLE_IWARP */
469 return (BUS_PROBE_DEFAULT);
473 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
474 struct qlnx_tx_queue *txq)
480 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
482 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
483 if (hw_bd_cons < ecore_cons_idx) {
484 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
486 diff = hw_bd_cons - ecore_cons_idx;
493 qlnx_sp_intr(void *arg)
495 struct ecore_hwfn *p_hwfn;
501 if (p_hwfn == NULL) {
502 printf("%s: spurious slowpath intr\n", __func__);
506 ha = (qlnx_host_t *)p_hwfn->p_dev;
508 QL_DPRINT2(ha, "enter\n");
510 for (i = 0; i < ha->cdev.num_hwfns; i++) {
511 if (&ha->cdev.hwfns[i] == p_hwfn) {
512 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
516 QL_DPRINT2(ha, "exit\n");
522 qlnx_sp_taskqueue(void *context, int pending)
524 struct ecore_hwfn *p_hwfn;
528 if (p_hwfn != NULL) {
535 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
540 for (i = 0; i < ha->cdev.num_hwfns; i++) {
542 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
544 bzero(tq_name, sizeof (tq_name));
545 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
547 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
549 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
550 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
552 if (ha->sp_taskqueue[i] == NULL)
555 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
558 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
565 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
569 for (i = 0; i < ha->cdev.num_hwfns; i++) {
570 if (ha->sp_taskqueue[i] != NULL) {
571 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
572 taskqueue_free(ha->sp_taskqueue[i]);
579 qlnx_fp_taskqueue(void *context, int pending)
581 struct qlnx_fastpath *fp;
590 ha = (qlnx_host_t *)fp->edev;
594 if(ifp->if_drv_flags & IFF_DRV_RUNNING) {
596 if (!drbr_empty(ifp, fp->tx_br)) {
598 if(mtx_trylock(&fp->tx_mtx)) {
600 #ifdef QLNX_TRACE_PERF_DATA
601 tx_pkts = fp->tx_pkts_transmitted;
602 tx_compl = fp->tx_pkts_completed;
605 qlnx_transmit_locked(ifp, fp, NULL);
607 #ifdef QLNX_TRACE_PERF_DATA
608 fp->tx_pkts_trans_fp +=
609 (fp->tx_pkts_transmitted - tx_pkts);
610 fp->tx_pkts_compl_fp +=
611 (fp->tx_pkts_completed - tx_compl);
613 mtx_unlock(&fp->tx_mtx);
618 QL_DPRINT2(ha, "exit \n");
623 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
627 struct qlnx_fastpath *fp;
629 for (i = 0; i < ha->num_rss; i++) {
631 fp = &ha->fp_array[i];
633 bzero(tq_name, sizeof (tq_name));
634 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
636 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
638 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
639 taskqueue_thread_enqueue,
642 if (fp->fp_taskqueue == NULL)
645 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
648 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
655 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
658 struct qlnx_fastpath *fp;
660 for (i = 0; i < ha->num_rss; i++) {
662 fp = &ha->fp_array[i];
664 if (fp->fp_taskqueue != NULL) {
666 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
667 taskqueue_free(fp->fp_taskqueue);
668 fp->fp_taskqueue = NULL;
675 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
678 struct qlnx_fastpath *fp;
680 for (i = 0; i < ha->num_rss; i++) {
681 fp = &ha->fp_array[i];
683 if (fp->fp_taskqueue != NULL) {
685 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
693 qlnx_get_params(qlnx_host_t *ha)
695 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) {
696 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
698 qlnxe_queue_count = 0;
704 qlnx_error_recovery_taskqueue(void *context, int pending)
710 QL_DPRINT2(ha, "enter\n");
716 #ifdef QLNX_ENABLE_IWARP
717 qlnx_rdma_dev_remove(ha);
718 #endif /* #ifdef QLNX_ENABLE_IWARP */
720 qlnx_slowpath_stop(ha);
721 qlnx_slowpath_start(ha);
723 #ifdef QLNX_ENABLE_IWARP
724 qlnx_rdma_dev_add(ha);
725 #endif /* #ifdef QLNX_ENABLE_IWARP */
729 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
731 QL_DPRINT2(ha, "exit\n");
737 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
741 bzero(tq_name, sizeof (tq_name));
742 snprintf(tq_name, sizeof (tq_name), "ql_err_tq");
744 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
746 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
747 taskqueue_thread_enqueue, &ha->err_taskqueue);
750 if (ha->err_taskqueue == NULL)
753 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
755 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
761 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
763 if (ha->err_taskqueue != NULL) {
764 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
765 taskqueue_free(ha->err_taskqueue);
768 ha->err_taskqueue = NULL;
774 * Name: qlnx_pci_attach
775 * Function: attaches the device to the operating system
778 qlnx_pci_attach(device_t dev)
780 qlnx_host_t *ha = NULL;
781 uint32_t rsrc_len_reg = 0;
782 uint32_t rsrc_len_dbells = 0;
783 uint32_t rsrc_len_msix = 0;
786 uint32_t num_sp_msix = 0;
787 uint32_t num_rdma_irqs = 0;
789 if ((ha = device_get_softc(dev)) == NULL) {
790 device_printf(dev, "cannot get softc\n");
794 memset(ha, 0, sizeof (qlnx_host_t));
796 ha->device_id = pci_get_device(dev);
798 if (qlnx_valid_device(ha) != 0) {
799 device_printf(dev, "device is not valid device\n");
802 ha->pci_func = pci_get_function(dev);
806 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
808 ha->flags.lock_init = 1;
810 pci_enable_busmaster(dev);
816 ha->reg_rid = PCIR_BAR(0);
817 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
820 if (ha->pci_reg == NULL) {
821 device_printf(dev, "unable to map BAR0\n");
822 goto qlnx_pci_attach_err;
825 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
828 ha->dbells_rid = PCIR_BAR(2);
829 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev,
832 if (rsrc_len_dbells) {
834 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
835 &ha->dbells_rid, RF_ACTIVE);
837 if (ha->pci_dbells == NULL) {
838 device_printf(dev, "unable to map BAR1\n");
839 goto qlnx_pci_attach_err;
841 ha->dbells_phys_addr = (uint64_t)
842 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
844 ha->dbells_size = rsrc_len_dbells;
846 if (qlnx_vf_device(ha) != 0) {
847 device_printf(dev, " BAR1 size is zero\n");
848 goto qlnx_pci_attach_err;
852 ha->msix_rid = PCIR_BAR(4);
853 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
854 &ha->msix_rid, RF_ACTIVE);
856 if (ha->msix_bar == NULL) {
857 device_printf(dev, "unable to map BAR2\n");
858 goto qlnx_pci_attach_err;
861 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
864 ha->dbg_level = 0x0000;
866 QL_DPRINT1(ha, "\n\t\t\t"
867 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
868 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
869 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
870 " msix_avail = 0x%x "
871 "\n\t\t\t[ncpus = %d]\n",
872 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
873 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
874 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
880 if (qlnx_alloc_parent_dma_tag(ha))
881 goto qlnx_pci_attach_err;
883 if (qlnx_alloc_tx_dma_tag(ha))
884 goto qlnx_pci_attach_err;
886 if (qlnx_alloc_rx_dma_tag(ha))
887 goto qlnx_pci_attach_err;
890 if (qlnx_init_hw(ha) != 0)
891 goto qlnx_pci_attach_err;
893 ha->flags.hw_init = 1;
897 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) &&
898 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) {
899 qlnxe_queue_count = QLNX_MAX_RSS;
903 * Allocate MSI-x vectors
905 if (qlnx_vf_device(ha) != 0) {
907 if (qlnxe_queue_count == 0)
908 ha->num_rss = QLNX_DEFAULT_RSS;
910 ha->num_rss = qlnxe_queue_count;
912 num_sp_msix = ha->cdev.num_hwfns;
917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
918 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
920 if (max_rxq < max_txq)
921 ha->num_rss = max_rxq;
923 ha->num_rss = max_txq;
925 if (ha->num_rss > QLNX_MAX_VF_RSS)
926 ha->num_rss = QLNX_MAX_VF_RSS;
931 if (ha->num_rss > mp_ncpus)
932 ha->num_rss = mp_ncpus;
934 ha->num_tc = QLNX_MAX_TC;
936 ha->msix_count = pci_msix_count(dev);
938 #ifdef QLNX_ENABLE_IWARP
940 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
942 #endif /* #ifdef QLNX_ENABLE_IWARP */
944 if (!ha->msix_count ||
945 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
946 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
948 goto qlnx_pci_attach_err;
951 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
952 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
954 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
956 QL_DPRINT1(ha, "\n\t\t\t"
957 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x"
958 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x"
959 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x"
960 " msix_avail = 0x%x msix_alloc = 0x%x"
961 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
962 ha->pci_reg, rsrc_len_reg,
963 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
964 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
965 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
967 if (pci_alloc_msix(dev, &ha->msix_count)) {
968 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
971 goto qlnx_pci_attach_err;
975 * Initialize slow path interrupt and task queue
980 if (qlnx_create_sp_taskqueues(ha) != 0)
981 goto qlnx_pci_attach_err;
983 for (i = 0; i < ha->cdev.num_hwfns; i++) {
985 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
987 ha->sp_irq_rid[i] = i + 1;
988 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
990 (RF_ACTIVE | RF_SHAREABLE));
991 if (ha->sp_irq[i] == NULL) {
993 "could not allocate mbx interrupt\n");
994 goto qlnx_pci_attach_err;
997 if (bus_setup_intr(dev, ha->sp_irq[i],
998 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
999 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
1001 "could not setup slow path interrupt\n");
1002 goto qlnx_pci_attach_err;
1005 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
1006 " sp_irq %p sp_handle %p\n", p_hwfn,
1007 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
1012 * initialize fast path interrupt
1014 if (qlnx_create_fp_taskqueues(ha) != 0)
1015 goto qlnx_pci_attach_err;
1017 for (i = 0; i < ha->num_rss; i++) {
1018 ha->irq_vec[i].rss_idx = i;
1019 ha->irq_vec[i].ha = ha;
1020 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
1022 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1023 &ha->irq_vec[i].irq_rid,
1024 (RF_ACTIVE | RF_SHAREABLE));
1026 if (ha->irq_vec[i].irq == NULL) {
1028 "could not allocate interrupt[%d] irq_rid = %d\n",
1029 i, ha->irq_vec[i].irq_rid);
1030 goto qlnx_pci_attach_err;
1033 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
1034 device_printf(dev, "could not allocate tx_br[%d]\n", i);
1035 goto qlnx_pci_attach_err;
1041 if (qlnx_vf_device(ha) != 0) {
1043 callout_init(&ha->qlnx_callout, 1);
1044 ha->flags.callout_init = 1;
1046 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1048 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
1049 goto qlnx_pci_attach_err;
1050 if (ha->grcdump_size[i] == 0)
1051 goto qlnx_pci_attach_err;
1053 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
1054 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
1055 i, ha->grcdump_size[i]);
1057 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
1058 if (ha->grcdump[i] == NULL) {
1059 device_printf(dev, "grcdump alloc[%d] failed\n", i);
1060 goto qlnx_pci_attach_err;
1063 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1064 goto qlnx_pci_attach_err;
1065 if (ha->idle_chk_size[i] == 0)
1066 goto qlnx_pci_attach_err;
1068 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1069 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1070 i, ha->idle_chk_size[i]);
1072 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1074 if (ha->idle_chk[i] == NULL) {
1075 device_printf(dev, "idle_chk alloc failed\n");
1076 goto qlnx_pci_attach_err;
1080 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1081 goto qlnx_pci_attach_err;
1084 if (qlnx_slowpath_start(ha) != 0)
1085 goto qlnx_pci_attach_err;
1087 ha->flags.slowpath_start = 1;
1089 if (qlnx_vf_device(ha) != 0) {
1090 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1091 qlnx_mdelay(__func__, 1000);
1092 qlnx_trigger_dump(ha);
1094 goto qlnx_pci_attach_err0;
1097 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1098 qlnx_mdelay(__func__, 1000);
1099 qlnx_trigger_dump(ha);
1101 goto qlnx_pci_attach_err0;
1104 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1105 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL);
1108 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1109 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
1110 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
1111 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1112 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1113 FW_ENGINEERING_VERSION);
1115 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1116 ha->stormfw_ver, ha->mfw_ver);
1118 qlnx_init_ifnet(dev, ha);
1123 qlnx_add_sysctls(ha);
1125 qlnx_pci_attach_err0:
1127 * create ioctl device interface
1129 if (qlnx_vf_device(ha) != 0) {
1131 if (qlnx_make_cdev(ha)) {
1132 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
1133 goto qlnx_pci_attach_err;
1136 #ifdef QLNX_ENABLE_IWARP
1137 qlnx_rdma_dev_add(ha);
1138 #endif /* #ifdef QLNX_ENABLE_IWARP */
1142 #ifdef CONFIG_ECORE_SRIOV
1144 if (qlnx_vf_device(ha) != 0)
1145 qlnx_initialize_sriov(ha);
1147 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1148 #endif /* #ifdef QLNX_VF */
1150 QL_DPRINT2(ha, "success\n");
1154 qlnx_pci_attach_err:
1162 * Name: qlnx_pci_detach
1163 * Function: Unhooks the device from the operating system
1166 qlnx_pci_detach(device_t dev)
1168 qlnx_host_t *ha = NULL;
1170 if ((ha = device_get_softc(dev)) == NULL) {
1171 device_printf(dev, "%s: cannot get softc\n", __func__);
1175 if (qlnx_vf_device(ha) != 0) {
1176 #ifdef CONFIG_ECORE_SRIOV
1179 ret = pci_iov_detach(dev);
1181 device_printf(dev, "%s: SRIOV in use\n", __func__);
1185 #endif /* #ifdef CONFIG_ECORE_SRIOV */
1187 #ifdef QLNX_ENABLE_IWARP
1188 if (qlnx_rdma_dev_remove(ha) != 0)
1190 #endif /* #ifdef QLNX_ENABLE_IWARP */
1202 #ifdef QLNX_ENABLE_IWARP
1205 qlnx_get_personality(uint8_t pci_func)
1207 uint8_t personality;
1209 personality = (qlnxe_rdma_configuration >>
1210 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) &
1211 QLNX_PERSONALIY_MASK;
1212 return (personality);
1216 qlnx_set_personality(qlnx_host_t *ha)
1218 struct ecore_hwfn *p_hwfn;
1219 uint8_t personality;
1221 p_hwfn = &ha->cdev.hwfns[0];
1223 personality = qlnx_get_personality(ha->pci_func);
1225 switch (personality) {
1227 case QLNX_PERSONALITY_DEFAULT:
1228 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1230 ha->personality = ECORE_PCI_DEFAULT;
1233 case QLNX_PERSONALITY_ETH_ONLY:
1234 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1236 ha->personality = ECORE_PCI_ETH;
1239 case QLNX_PERSONALITY_ETH_IWARP:
1240 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1242 ha->personality = ECORE_PCI_ETH_IWARP;
1245 case QLNX_PERSONALITY_ETH_ROCE:
1246 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1248 ha->personality = ECORE_PCI_ETH_ROCE;
1255 #endif /* #ifdef QLNX_ENABLE_IWARP */
1258 qlnx_init_hw(qlnx_host_t *ha)
1261 struct ecore_hw_prepare_params params;
1263 ecore_init_struct(&ha->cdev);
1265 /* ha->dp_module = ECORE_MSG_PROBE |
1271 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1272 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1273 ha->dp_level = ECORE_LEVEL_NOTICE;
1274 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1276 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1278 ha->cdev.regview = ha->pci_reg;
1280 ha->personality = ECORE_PCI_DEFAULT;
1282 if (qlnx_vf_device(ha) == 0) {
1283 ha->cdev.b_is_vf = true;
1285 if (ha->pci_dbells != NULL) {
1286 ha->cdev.doorbells = ha->pci_dbells;
1287 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1288 ha->cdev.db_size = ha->dbells_size;
1290 ha->pci_dbells = ha->pci_reg;
1293 ha->cdev.doorbells = ha->pci_dbells;
1294 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1295 ha->cdev.db_size = ha->dbells_size;
1297 #ifdef QLNX_ENABLE_IWARP
1299 if (qlnx_rdma_supported(ha) == 0)
1300 qlnx_set_personality(ha);
1302 #endif /* #ifdef QLNX_ENABLE_IWARP */
1305 QL_DPRINT2(ha, "%s: %s\n", __func__,
1306 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1308 bzero(¶ms, sizeof (struct ecore_hw_prepare_params));
1310 params.personality = ha->personality;
1312 params.drv_resc_alloc = false;
1313 params.chk_reg_fifo = false;
1314 params.initiate_pf_flr = true;
1317 ecore_hw_prepare(&ha->cdev, ¶ms);
1319 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1321 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1322 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1328 qlnx_release(qlnx_host_t *ha)
1335 QL_DPRINT2(ha, "enter\n");
1337 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
1338 if (ha->idle_chk[i] != NULL) {
1339 free(ha->idle_chk[i], M_QLNXBUF);
1340 ha->idle_chk[i] = NULL;
1343 if (ha->grcdump[i] != NULL) {
1344 free(ha->grcdump[i], M_QLNXBUF);
1345 ha->grcdump[i] = NULL;
1349 if (ha->flags.callout_init)
1350 callout_drain(&ha->qlnx_callout);
1352 if (ha->flags.slowpath_start) {
1353 qlnx_slowpath_stop(ha);
1356 if (ha->flags.hw_init)
1357 ecore_hw_remove(&ha->cdev);
1361 if (ha->ifp != NULL)
1362 ether_ifdetach(ha->ifp);
1364 qlnx_free_tx_dma_tag(ha);
1366 qlnx_free_rx_dma_tag(ha);
1368 qlnx_free_parent_dma_tag(ha);
1370 if (qlnx_vf_device(ha) != 0) {
1371 qlnx_destroy_error_recovery_taskqueue(ha);
1374 for (i = 0; i < ha->num_rss; i++) {
1375 struct qlnx_fastpath *fp = &ha->fp_array[i];
1377 if (ha->irq_vec[i].handle) {
1378 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1379 ha->irq_vec[i].handle);
1382 if (ha->irq_vec[i].irq) {
1383 (void)bus_release_resource(dev, SYS_RES_IRQ,
1384 ha->irq_vec[i].irq_rid,
1385 ha->irq_vec[i].irq);
1388 qlnx_free_tx_br(ha, fp);
1390 qlnx_destroy_fp_taskqueues(ha);
1392 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1393 if (ha->sp_handle[i])
1394 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1398 (void) bus_release_resource(dev, SYS_RES_IRQ,
1399 ha->sp_irq_rid[i], ha->sp_irq[i]);
1402 qlnx_destroy_sp_taskqueues(ha);
1405 pci_release_msi(dev);
1407 if (ha->flags.lock_init) {
1408 mtx_destroy(&ha->hw_lock);
1412 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1415 if (ha->dbells_size && ha->pci_dbells)
1416 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1420 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1423 QL_DPRINT2(ha, "exit\n");
1428 qlnx_trigger_dump(qlnx_host_t *ha)
1432 if (ha->ifp != NULL)
1433 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1435 QL_DPRINT2(ha, "enter\n");
1437 if (qlnx_vf_device(ha) == 0)
1440 ha->error_recovery = 1;
1442 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1443 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1444 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1447 QL_DPRINT2(ha, "exit\n");
1453 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1458 err = sysctl_handle_int(oidp, &ret, 0, req);
1460 if (err || !req->newptr)
1464 ha = (qlnx_host_t *)arg1;
1465 qlnx_trigger_dump(ha);
1471 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1473 int err, i, ret = 0, usecs = 0;
1475 struct ecore_hwfn *p_hwfn;
1476 struct qlnx_fastpath *fp;
1478 err = sysctl_handle_int(oidp, &usecs, 0, req);
1480 if (err || !req->newptr || !usecs || (usecs > 255))
1483 ha = (qlnx_host_t *)arg1;
1485 if (qlnx_vf_device(ha) == 0)
1488 for (i = 0; i < ha->num_rss; i++) {
1490 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1492 fp = &ha->fp_array[i];
1494 if (fp->txq[0]->handle != NULL) {
1495 ret = ecore_set_queue_coalesce(p_hwfn, 0,
1496 (uint16_t)usecs, fp->txq[0]->handle);
1501 ha->tx_coalesce_usecs = (uint8_t)usecs;
1507 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1509 int err, i, ret = 0, usecs = 0;
1511 struct ecore_hwfn *p_hwfn;
1512 struct qlnx_fastpath *fp;
1514 err = sysctl_handle_int(oidp, &usecs, 0, req);
1516 if (err || !req->newptr || !usecs || (usecs > 255))
1519 ha = (qlnx_host_t *)arg1;
1521 if (qlnx_vf_device(ha) == 0)
1524 for (i = 0; i < ha->num_rss; i++) {
1526 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1528 fp = &ha->fp_array[i];
1530 if (fp->rxq->handle != NULL) {
1531 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1532 0, fp->rxq->handle);
1537 ha->rx_coalesce_usecs = (uint8_t)usecs;
1543 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1545 struct sysctl_ctx_list *ctx;
1546 struct sysctl_oid_list *children;
1547 struct sysctl_oid *ctx_oid;
1549 ctx = device_get_sysctl_ctx(ha->pci_dev);
1550 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1552 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1553 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat");
1554 children = SYSCTL_CHILDREN(ctx_oid);
1556 SYSCTL_ADD_QUAD(ctx, children,
1557 OID_AUTO, "sp_interrupts",
1558 CTLFLAG_RD, &ha->sp_interrupts,
1559 "No. of slowpath interrupts");
1565 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1567 struct sysctl_ctx_list *ctx;
1568 struct sysctl_oid_list *children;
1569 struct sysctl_oid_list *node_children;
1570 struct sysctl_oid *ctx_oid;
1572 uint8_t name_str[16];
1574 ctx = device_get_sysctl_ctx(ha->pci_dev);
1575 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1577 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1578 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat");
1579 children = SYSCTL_CHILDREN(ctx_oid);
1581 for (i = 0; i < ha->num_rss; i++) {
1583 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1584 snprintf(name_str, sizeof(name_str), "%d", i);
1586 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1587 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str);
1588 node_children = SYSCTL_CHILDREN(ctx_oid);
1592 SYSCTL_ADD_QUAD(ctx, node_children,
1593 OID_AUTO, "tx_pkts_processed",
1594 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1595 "No. of packets processed for transmission");
1597 SYSCTL_ADD_QUAD(ctx, node_children,
1598 OID_AUTO, "tx_pkts_freed",
1599 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1600 "No. of freed packets");
1602 SYSCTL_ADD_QUAD(ctx, node_children,
1603 OID_AUTO, "tx_pkts_transmitted",
1604 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1605 "No. of transmitted packets");
1607 SYSCTL_ADD_QUAD(ctx, node_children,
1608 OID_AUTO, "tx_pkts_completed",
1609 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1610 "No. of transmit completions");
1612 SYSCTL_ADD_QUAD(ctx, node_children,
1613 OID_AUTO, "tx_non_tso_pkts",
1614 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1615 "No. of non LSO transmited packets");
1617 #ifdef QLNX_TRACE_PERF_DATA
1619 SYSCTL_ADD_QUAD(ctx, node_children,
1620 OID_AUTO, "tx_pkts_trans_ctx",
1621 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1622 "No. of transmitted packets in transmit context");
1624 SYSCTL_ADD_QUAD(ctx, node_children,
1625 OID_AUTO, "tx_pkts_compl_ctx",
1626 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1627 "No. of transmit completions in transmit context");
1629 SYSCTL_ADD_QUAD(ctx, node_children,
1630 OID_AUTO, "tx_pkts_trans_fp",
1631 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1632 "No. of transmitted packets in taskqueue");
1634 SYSCTL_ADD_QUAD(ctx, node_children,
1635 OID_AUTO, "tx_pkts_compl_fp",
1636 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1637 "No. of transmit completions in taskqueue");
1639 SYSCTL_ADD_QUAD(ctx, node_children,
1640 OID_AUTO, "tx_pkts_compl_intr",
1641 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1642 "No. of transmit completions in interrupt ctx");
1645 SYSCTL_ADD_QUAD(ctx, node_children,
1646 OID_AUTO, "tx_tso_pkts",
1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1648 "No. of LSO transmited packets");
1650 SYSCTL_ADD_QUAD(ctx, node_children,
1651 OID_AUTO, "tx_lso_wnd_min_len",
1652 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1653 "tx_lso_wnd_min_len");
1655 SYSCTL_ADD_QUAD(ctx, node_children,
1656 OID_AUTO, "tx_defrag",
1657 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1660 SYSCTL_ADD_QUAD(ctx, node_children,
1661 OID_AUTO, "tx_nsegs_gt_elem_left",
1662 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1663 "tx_nsegs_gt_elem_left");
1665 SYSCTL_ADD_UINT(ctx, node_children,
1666 OID_AUTO, "tx_tso_max_nsegs",
1667 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1668 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1670 SYSCTL_ADD_UINT(ctx, node_children,
1671 OID_AUTO, "tx_tso_min_nsegs",
1672 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1673 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1675 SYSCTL_ADD_UINT(ctx, node_children,
1676 OID_AUTO, "tx_tso_max_pkt_len",
1677 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1678 ha->fp_array[i].tx_tso_max_pkt_len,
1679 "tx_tso_max_pkt_len");
1681 SYSCTL_ADD_UINT(ctx, node_children,
1682 OID_AUTO, "tx_tso_min_pkt_len",
1683 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1684 ha->fp_array[i].tx_tso_min_pkt_len,
1685 "tx_tso_min_pkt_len");
1687 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1689 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1690 snprintf(name_str, sizeof(name_str),
1691 "tx_pkts_nseg_%02d", (j+1));
1693 SYSCTL_ADD_QUAD(ctx, node_children,
1694 OID_AUTO, name_str, CTLFLAG_RD,
1695 &ha->fp_array[i].tx_pkts[j], name_str);
1698 #ifdef QLNX_TRACE_PERF_DATA
1699 for (j = 0; j < 18; j++) {
1701 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1702 snprintf(name_str, sizeof(name_str),
1703 "tx_pkts_hist_%02d", (j+1));
1705 SYSCTL_ADD_QUAD(ctx, node_children,
1706 OID_AUTO, name_str, CTLFLAG_RD,
1707 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1709 for (j = 0; j < 5; j++) {
1711 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1712 snprintf(name_str, sizeof(name_str),
1713 "tx_comInt_%02d", (j+1));
1715 SYSCTL_ADD_QUAD(ctx, node_children,
1716 OID_AUTO, name_str, CTLFLAG_RD,
1717 &ha->fp_array[i].tx_comInt[j], name_str);
1719 for (j = 0; j < 18; j++) {
1721 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1722 snprintf(name_str, sizeof(name_str),
1723 "tx_pkts_q_%02d", (j+1));
1725 SYSCTL_ADD_QUAD(ctx, node_children,
1726 OID_AUTO, name_str, CTLFLAG_RD,
1727 &ha->fp_array[i].tx_pkts_q[j], name_str);
1731 SYSCTL_ADD_QUAD(ctx, node_children,
1732 OID_AUTO, "err_tx_nsegs_gt_elem_left",
1733 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1734 "err_tx_nsegs_gt_elem_left");
1736 SYSCTL_ADD_QUAD(ctx, node_children,
1737 OID_AUTO, "err_tx_dmamap_create",
1738 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1739 "err_tx_dmamap_create");
1741 SYSCTL_ADD_QUAD(ctx, node_children,
1742 OID_AUTO, "err_tx_defrag_dmamap_load",
1743 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1744 "err_tx_defrag_dmamap_load");
1746 SYSCTL_ADD_QUAD(ctx, node_children,
1747 OID_AUTO, "err_tx_non_tso_max_seg",
1748 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1749 "err_tx_non_tso_max_seg");
1751 SYSCTL_ADD_QUAD(ctx, node_children,
1752 OID_AUTO, "err_tx_dmamap_load",
1753 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1754 "err_tx_dmamap_load");
1756 SYSCTL_ADD_QUAD(ctx, node_children,
1757 OID_AUTO, "err_tx_defrag",
1758 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1761 SYSCTL_ADD_QUAD(ctx, node_children,
1762 OID_AUTO, "err_tx_free_pkt_null",
1763 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1764 "err_tx_free_pkt_null");
1766 SYSCTL_ADD_QUAD(ctx, node_children,
1767 OID_AUTO, "err_tx_cons_idx_conflict",
1768 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1769 "err_tx_cons_idx_conflict");
1771 SYSCTL_ADD_QUAD(ctx, node_children,
1772 OID_AUTO, "lro_cnt_64",
1773 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1776 SYSCTL_ADD_QUAD(ctx, node_children,
1777 OID_AUTO, "lro_cnt_128",
1778 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1781 SYSCTL_ADD_QUAD(ctx, node_children,
1782 OID_AUTO, "lro_cnt_256",
1783 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1786 SYSCTL_ADD_QUAD(ctx, node_children,
1787 OID_AUTO, "lro_cnt_512",
1788 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1791 SYSCTL_ADD_QUAD(ctx, node_children,
1792 OID_AUTO, "lro_cnt_1024",
1793 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1798 SYSCTL_ADD_QUAD(ctx, node_children,
1799 OID_AUTO, "rx_pkts",
1800 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1801 "No. of received packets");
1803 SYSCTL_ADD_QUAD(ctx, node_children,
1804 OID_AUTO, "tpa_start",
1805 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1806 "No. of tpa_start packets");
1808 SYSCTL_ADD_QUAD(ctx, node_children,
1809 OID_AUTO, "tpa_cont",
1810 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1811 "No. of tpa_cont packets");
1813 SYSCTL_ADD_QUAD(ctx, node_children,
1814 OID_AUTO, "tpa_end",
1815 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1816 "No. of tpa_end packets");
1818 SYSCTL_ADD_QUAD(ctx, node_children,
1819 OID_AUTO, "err_m_getcl",
1820 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1823 SYSCTL_ADD_QUAD(ctx, node_children,
1824 OID_AUTO, "err_m_getjcl",
1825 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1828 SYSCTL_ADD_QUAD(ctx, node_children,
1829 OID_AUTO, "err_rx_hw_errors",
1830 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1831 "err_rx_hw_errors");
1833 SYSCTL_ADD_QUAD(ctx, node_children,
1834 OID_AUTO, "err_rx_alloc_errors",
1835 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1836 "err_rx_alloc_errors");
1843 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1845 struct sysctl_ctx_list *ctx;
1846 struct sysctl_oid_list *children;
1847 struct sysctl_oid *ctx_oid;
1849 ctx = device_get_sysctl_ctx(ha->pci_dev);
1850 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1852 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1853 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat");
1854 children = SYSCTL_CHILDREN(ctx_oid);
1856 SYSCTL_ADD_QUAD(ctx, children,
1857 OID_AUTO, "no_buff_discards",
1858 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1859 "No. of packets discarded due to lack of buffer");
1861 SYSCTL_ADD_QUAD(ctx, children,
1862 OID_AUTO, "packet_too_big_discard",
1863 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1864 "No. of packets discarded because packet was too big");
1866 SYSCTL_ADD_QUAD(ctx, children,
1867 OID_AUTO, "ttl0_discard",
1868 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1871 SYSCTL_ADD_QUAD(ctx, children,
1872 OID_AUTO, "rx_ucast_bytes",
1873 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1876 SYSCTL_ADD_QUAD(ctx, children,
1877 OID_AUTO, "rx_mcast_bytes",
1878 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1881 SYSCTL_ADD_QUAD(ctx, children,
1882 OID_AUTO, "rx_bcast_bytes",
1883 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1886 SYSCTL_ADD_QUAD(ctx, children,
1887 OID_AUTO, "rx_ucast_pkts",
1888 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1891 SYSCTL_ADD_QUAD(ctx, children,
1892 OID_AUTO, "rx_mcast_pkts",
1893 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1896 SYSCTL_ADD_QUAD(ctx, children,
1897 OID_AUTO, "rx_bcast_pkts",
1898 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1901 SYSCTL_ADD_QUAD(ctx, children,
1902 OID_AUTO, "mftag_filter_discards",
1903 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1904 "mftag_filter_discards");
1906 SYSCTL_ADD_QUAD(ctx, children,
1907 OID_AUTO, "mac_filter_discards",
1908 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1909 "mac_filter_discards");
1911 SYSCTL_ADD_QUAD(ctx, children,
1912 OID_AUTO, "tx_ucast_bytes",
1913 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1916 SYSCTL_ADD_QUAD(ctx, children,
1917 OID_AUTO, "tx_mcast_bytes",
1918 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1921 SYSCTL_ADD_QUAD(ctx, children,
1922 OID_AUTO, "tx_bcast_bytes",
1923 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1926 SYSCTL_ADD_QUAD(ctx, children,
1927 OID_AUTO, "tx_ucast_pkts",
1928 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1931 SYSCTL_ADD_QUAD(ctx, children,
1932 OID_AUTO, "tx_mcast_pkts",
1933 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1936 SYSCTL_ADD_QUAD(ctx, children,
1937 OID_AUTO, "tx_bcast_pkts",
1938 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1941 SYSCTL_ADD_QUAD(ctx, children,
1942 OID_AUTO, "tx_err_drop_pkts",
1943 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1944 "tx_err_drop_pkts");
1946 SYSCTL_ADD_QUAD(ctx, children,
1947 OID_AUTO, "tpa_coalesced_pkts",
1948 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1949 "tpa_coalesced_pkts");
1951 SYSCTL_ADD_QUAD(ctx, children,
1952 OID_AUTO, "tpa_coalesced_events",
1953 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1954 "tpa_coalesced_events");
1956 SYSCTL_ADD_QUAD(ctx, children,
1957 OID_AUTO, "tpa_aborts_num",
1958 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1961 SYSCTL_ADD_QUAD(ctx, children,
1962 OID_AUTO, "tpa_not_coalesced_pkts",
1963 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1964 "tpa_not_coalesced_pkts");
1966 SYSCTL_ADD_QUAD(ctx, children,
1967 OID_AUTO, "tpa_coalesced_bytes",
1968 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1969 "tpa_coalesced_bytes");
1971 SYSCTL_ADD_QUAD(ctx, children,
1972 OID_AUTO, "rx_64_byte_packets",
1973 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1974 "rx_64_byte_packets");
1976 SYSCTL_ADD_QUAD(ctx, children,
1977 OID_AUTO, "rx_65_to_127_byte_packets",
1978 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1979 "rx_65_to_127_byte_packets");
1981 SYSCTL_ADD_QUAD(ctx, children,
1982 OID_AUTO, "rx_128_to_255_byte_packets",
1983 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1984 "rx_128_to_255_byte_packets");
1986 SYSCTL_ADD_QUAD(ctx, children,
1987 OID_AUTO, "rx_256_to_511_byte_packets",
1988 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1989 "rx_256_to_511_byte_packets");
1991 SYSCTL_ADD_QUAD(ctx, children,
1992 OID_AUTO, "rx_512_to_1023_byte_packets",
1993 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1994 "rx_512_to_1023_byte_packets");
1996 SYSCTL_ADD_QUAD(ctx, children,
1997 OID_AUTO, "rx_1024_to_1518_byte_packets",
1998 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1999 "rx_1024_to_1518_byte_packets");
2001 SYSCTL_ADD_QUAD(ctx, children,
2002 OID_AUTO, "rx_1519_to_1522_byte_packets",
2003 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
2004 "rx_1519_to_1522_byte_packets");
2006 SYSCTL_ADD_QUAD(ctx, children,
2007 OID_AUTO, "rx_1523_to_2047_byte_packets",
2008 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
2009 "rx_1523_to_2047_byte_packets");
2011 SYSCTL_ADD_QUAD(ctx, children,
2012 OID_AUTO, "rx_2048_to_4095_byte_packets",
2013 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
2014 "rx_2048_to_4095_byte_packets");
2016 SYSCTL_ADD_QUAD(ctx, children,
2017 OID_AUTO, "rx_4096_to_9216_byte_packets",
2018 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
2019 "rx_4096_to_9216_byte_packets");
2021 SYSCTL_ADD_QUAD(ctx, children,
2022 OID_AUTO, "rx_9217_to_16383_byte_packets",
2023 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
2024 "rx_9217_to_16383_byte_packets");
2026 SYSCTL_ADD_QUAD(ctx, children,
2027 OID_AUTO, "rx_crc_errors",
2028 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
2031 SYSCTL_ADD_QUAD(ctx, children,
2032 OID_AUTO, "rx_mac_crtl_frames",
2033 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
2034 "rx_mac_crtl_frames");
2036 SYSCTL_ADD_QUAD(ctx, children,
2037 OID_AUTO, "rx_pause_frames",
2038 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
2041 SYSCTL_ADD_QUAD(ctx, children,
2042 OID_AUTO, "rx_pfc_frames",
2043 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
2046 SYSCTL_ADD_QUAD(ctx, children,
2047 OID_AUTO, "rx_align_errors",
2048 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
2051 SYSCTL_ADD_QUAD(ctx, children,
2052 OID_AUTO, "rx_carrier_errors",
2053 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
2054 "rx_carrier_errors");
2056 SYSCTL_ADD_QUAD(ctx, children,
2057 OID_AUTO, "rx_oversize_packets",
2058 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
2059 "rx_oversize_packets");
2061 SYSCTL_ADD_QUAD(ctx, children,
2062 OID_AUTO, "rx_jabbers",
2063 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
2066 SYSCTL_ADD_QUAD(ctx, children,
2067 OID_AUTO, "rx_undersize_packets",
2068 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2069 "rx_undersize_packets");
2071 SYSCTL_ADD_QUAD(ctx, children,
2072 OID_AUTO, "rx_fragments",
2073 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2076 SYSCTL_ADD_QUAD(ctx, children,
2077 OID_AUTO, "tx_64_byte_packets",
2078 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2079 "tx_64_byte_packets");
2081 SYSCTL_ADD_QUAD(ctx, children,
2082 OID_AUTO, "tx_65_to_127_byte_packets",
2083 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2084 "tx_65_to_127_byte_packets");
2086 SYSCTL_ADD_QUAD(ctx, children,
2087 OID_AUTO, "tx_128_to_255_byte_packets",
2088 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2089 "tx_128_to_255_byte_packets");
2091 SYSCTL_ADD_QUAD(ctx, children,
2092 OID_AUTO, "tx_256_to_511_byte_packets",
2093 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2094 "tx_256_to_511_byte_packets");
2096 SYSCTL_ADD_QUAD(ctx, children,
2097 OID_AUTO, "tx_512_to_1023_byte_packets",
2098 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2099 "tx_512_to_1023_byte_packets");
2101 SYSCTL_ADD_QUAD(ctx, children,
2102 OID_AUTO, "tx_1024_to_1518_byte_packets",
2103 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2104 "tx_1024_to_1518_byte_packets");
2106 SYSCTL_ADD_QUAD(ctx, children,
2107 OID_AUTO, "tx_1519_to_2047_byte_packets",
2108 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2109 "tx_1519_to_2047_byte_packets");
2111 SYSCTL_ADD_QUAD(ctx, children,
2112 OID_AUTO, "tx_2048_to_4095_byte_packets",
2113 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2114 "tx_2048_to_4095_byte_packets");
2116 SYSCTL_ADD_QUAD(ctx, children,
2117 OID_AUTO, "tx_4096_to_9216_byte_packets",
2118 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2119 "tx_4096_to_9216_byte_packets");
2121 SYSCTL_ADD_QUAD(ctx, children,
2122 OID_AUTO, "tx_9217_to_16383_byte_packets",
2123 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2124 "tx_9217_to_16383_byte_packets");
2126 SYSCTL_ADD_QUAD(ctx, children,
2127 OID_AUTO, "tx_pause_frames",
2128 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2131 SYSCTL_ADD_QUAD(ctx, children,
2132 OID_AUTO, "tx_pfc_frames",
2133 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2136 SYSCTL_ADD_QUAD(ctx, children,
2137 OID_AUTO, "tx_lpi_entry_count",
2138 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2139 "tx_lpi_entry_count");
2141 SYSCTL_ADD_QUAD(ctx, children,
2142 OID_AUTO, "tx_total_collisions",
2143 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2144 "tx_total_collisions");
2146 SYSCTL_ADD_QUAD(ctx, children,
2147 OID_AUTO, "brb_truncates",
2148 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2151 SYSCTL_ADD_QUAD(ctx, children,
2152 OID_AUTO, "brb_discards",
2153 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2156 SYSCTL_ADD_QUAD(ctx, children,
2157 OID_AUTO, "rx_mac_bytes",
2158 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2161 SYSCTL_ADD_QUAD(ctx, children,
2162 OID_AUTO, "rx_mac_uc_packets",
2163 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2164 "rx_mac_uc_packets");
2166 SYSCTL_ADD_QUAD(ctx, children,
2167 OID_AUTO, "rx_mac_mc_packets",
2168 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2169 "rx_mac_mc_packets");
2171 SYSCTL_ADD_QUAD(ctx, children,
2172 OID_AUTO, "rx_mac_bc_packets",
2173 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2174 "rx_mac_bc_packets");
2176 SYSCTL_ADD_QUAD(ctx, children,
2177 OID_AUTO, "rx_mac_frames_ok",
2178 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2179 "rx_mac_frames_ok");
2181 SYSCTL_ADD_QUAD(ctx, children,
2182 OID_AUTO, "tx_mac_bytes",
2183 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2186 SYSCTL_ADD_QUAD(ctx, children,
2187 OID_AUTO, "tx_mac_uc_packets",
2188 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2189 "tx_mac_uc_packets");
2191 SYSCTL_ADD_QUAD(ctx, children,
2192 OID_AUTO, "tx_mac_mc_packets",
2193 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2194 "tx_mac_mc_packets");
2196 SYSCTL_ADD_QUAD(ctx, children,
2197 OID_AUTO, "tx_mac_bc_packets",
2198 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2199 "tx_mac_bc_packets");
2201 SYSCTL_ADD_QUAD(ctx, children,
2202 OID_AUTO, "tx_mac_ctrl_frames",
2203 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2204 "tx_mac_ctrl_frames");
2209 qlnx_add_sysctls(qlnx_host_t *ha)
2211 device_t dev = ha->pci_dev;
2212 struct sysctl_ctx_list *ctx;
2213 struct sysctl_oid_list *children;
2215 ctx = device_get_sysctl_ctx(dev);
2216 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2218 qlnx_add_fp_stats_sysctls(ha);
2219 qlnx_add_sp_stats_sysctls(ha);
2221 if (qlnx_vf_device(ha) != 0)
2222 qlnx_add_hw_stats_sysctls(ha);
2224 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
2225 CTLFLAG_RD, qlnx_ver_str, 0,
2228 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
2229 CTLFLAG_RD, ha->stormfw_ver, 0,
2230 "STORM Firmware Version");
2232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
2233 CTLFLAG_RD, ha->mfw_ver, 0,
2234 "Management Firmware Version");
2236 SYSCTL_ADD_UINT(ctx, children,
2237 OID_AUTO, "personality", CTLFLAG_RD,
2238 &ha->personality, ha->personality,
2239 "\tpersonality = 0 => Ethernet Only\n"
2240 "\tpersonality = 3 => Ethernet and RoCE\n"
2241 "\tpersonality = 4 => Ethernet and iWARP\n"
2242 "\tpersonality = 6 => Default in Shared Memory\n");
2245 SYSCTL_ADD_UINT(ctx, children,
2246 OID_AUTO, "debug", CTLFLAG_RW,
2247 &ha->dbg_level, ha->dbg_level, "Debug Level");
2249 ha->dp_level = 0x01;
2250 SYSCTL_ADD_UINT(ctx, children,
2251 OID_AUTO, "dp_level", CTLFLAG_RW,
2252 &ha->dp_level, ha->dp_level, "DP Level");
2254 ha->dbg_trace_lro_cnt = 0;
2255 SYSCTL_ADD_UINT(ctx, children,
2256 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
2257 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2258 "Trace LRO Counts");
2260 ha->dbg_trace_tso_pkt_len = 0;
2261 SYSCTL_ADD_UINT(ctx, children,
2262 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
2263 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2264 "Trace TSO packet lengths");
2267 SYSCTL_ADD_UINT(ctx, children,
2268 OID_AUTO, "dp_module", CTLFLAG_RW,
2269 &ha->dp_module, ha->dp_module, "DP Module");
2273 SYSCTL_ADD_UINT(ctx, children,
2274 OID_AUTO, "err_inject", CTLFLAG_RW,
2275 &ha->err_inject, ha->err_inject, "Error Inject");
2277 ha->storm_stats_enable = 0;
2279 SYSCTL_ADD_UINT(ctx, children,
2280 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
2281 &ha->storm_stats_enable, ha->storm_stats_enable,
2282 "Enable Storm Statistics Gathering");
2284 ha->storm_stats_index = 0;
2286 SYSCTL_ADD_UINT(ctx, children,
2287 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
2288 &ha->storm_stats_index, ha->storm_stats_index,
2289 "Enable Storm Statistics Gathering Current Index");
2291 ha->grcdump_taken = 0;
2292 SYSCTL_ADD_UINT(ctx, children,
2293 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
2294 &ha->grcdump_taken, ha->grcdump_taken,
2297 ha->idle_chk_taken = 0;
2298 SYSCTL_ADD_UINT(ctx, children,
2299 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
2300 &ha->idle_chk_taken, ha->idle_chk_taken,
2303 SYSCTL_ADD_UINT(ctx, children,
2304 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
2305 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2306 "rx_coalesce_usecs");
2308 SYSCTL_ADD_UINT(ctx, children,
2309 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
2310 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2311 "tx_coalesce_usecs");
2313 SYSCTL_ADD_PROC(ctx, children,
2314 OID_AUTO, "trigger_dump",
2315 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2316 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2318 SYSCTL_ADD_PROC(ctx, children,
2319 OID_AUTO, "set_rx_coalesce_usecs",
2320 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2321 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2322 "rx interrupt coalesce period microseconds");
2324 SYSCTL_ADD_PROC(ctx, children,
2325 OID_AUTO, "set_tx_coalesce_usecs",
2326 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2327 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2328 "tx interrupt coalesce period microseconds");
2330 ha->rx_pkt_threshold = 128;
2331 SYSCTL_ADD_UINT(ctx, children,
2332 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
2333 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2334 "No. of Rx Pkts to process at a time");
2336 ha->rx_jumbo_buf_eq_mtu = 0;
2337 SYSCTL_ADD_UINT(ctx, children,
2338 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
2339 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2340 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
2341 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
2343 SYSCTL_ADD_QUAD(ctx, children,
2344 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
2345 &ha->err_illegal_intr, "err_illegal_intr");
2347 SYSCTL_ADD_QUAD(ctx, children,
2348 OID_AUTO, "err_fp_null", CTLFLAG_RD,
2349 &ha->err_fp_null, "err_fp_null");
2351 SYSCTL_ADD_QUAD(ctx, children,
2352 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
2353 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2359 /*****************************************************************************
2360 * Operating System Network Interface Functions
2361 *****************************************************************************/
2364 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2369 ifp = ha->ifp = if_alloc(IFT_ETHER);
2372 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
2374 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2376 device_id = pci_get_device(ha->pci_dev);
2378 #if __FreeBSD_version >= 1000000
2380 if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
2381 ifp->if_baudrate = IF_Gbps(40);
2382 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2383 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
2384 ifp->if_baudrate = IF_Gbps(25);
2385 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
2386 ifp->if_baudrate = IF_Gbps(50);
2387 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
2388 ifp->if_baudrate = IF_Gbps(100);
2390 ifp->if_capabilities = IFCAP_LINKSTATE;
2392 ifp->if_mtu = ETHERMTU;
2393 ifp->if_baudrate = (1 * 1000 * 1000 *1000);
2395 #endif /* #if __FreeBSD_version >= 1000000 */
2397 ifp->if_init = qlnx_init;
2399 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2400 ifp->if_ioctl = qlnx_ioctl;
2401 ifp->if_transmit = qlnx_transmit;
2402 ifp->if_qflush = qlnx_qflush;
2404 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
2405 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
2406 IFQ_SET_READY(&ifp->if_snd);
2408 #if __FreeBSD_version >= 1100036
2409 if_setgetcounterfn(ifp, qlnx_get_counter);
2412 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2414 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2416 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2417 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2418 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2423 ha->primary_mac[0] = 0x00;
2424 ha->primary_mac[1] = 0x0e;
2425 ha->primary_mac[2] = 0x1e;
2426 ha->primary_mac[3] = rnd & 0xFF;
2427 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2428 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2431 ether_ifattach(ifp, ha->primary_mac);
2432 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2434 ifp->if_capabilities = IFCAP_HWCSUM;
2435 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2437 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2438 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2439 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2440 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2441 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2442 ifp->if_capabilities |= IFCAP_TSO4;
2443 ifp->if_capabilities |= IFCAP_TSO6;
2444 ifp->if_capabilities |= IFCAP_LRO;
2446 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE -
2447 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2448 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */;
2449 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE;
2452 ifp->if_capenable = ifp->if_capabilities;
2454 ifp->if_hwassist = CSUM_IP;
2455 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
2456 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
2457 ifp->if_hwassist |= CSUM_TSO;
2459 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2461 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2464 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
2465 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2466 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2468 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
2469 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
2470 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2471 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2472 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
2473 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2474 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2475 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2476 ifmedia_add(&ha->media,
2477 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2478 ifmedia_add(&ha->media,
2479 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2480 ifmedia_add(&ha->media,
2481 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2484 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2485 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2488 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2490 QL_DPRINT2(ha, "exit\n");
2496 qlnx_init_locked(qlnx_host_t *ha)
2498 struct ifnet *ifp = ha->ifp;
2500 QL_DPRINT1(ha, "Driver Initialization start \n");
2504 if (qlnx_load(ha) == 0) {
2506 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2507 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2509 #ifdef QLNX_ENABLE_IWARP
2510 if (qlnx_vf_device(ha) != 0) {
2511 qlnx_rdma_dev_open(ha);
2513 #endif /* #ifdef QLNX_ENABLE_IWARP */
2520 qlnx_init(void *arg)
2524 ha = (qlnx_host_t *)arg;
2526 QL_DPRINT2(ha, "enter\n");
2529 qlnx_init_locked(ha);
2532 QL_DPRINT2(ha, "exit\n");
2538 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2540 struct ecore_filter_mcast *mcast;
2541 struct ecore_dev *cdev;
2546 mcast = &ha->ecore_mcast;
2547 bzero(mcast, sizeof(struct ecore_filter_mcast));
2550 mcast->opcode = ECORE_FILTER_ADD;
2552 mcast->opcode = ECORE_FILTER_REMOVE;
2554 mcast->num_mc_addrs = 1;
2555 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2557 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2563 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2567 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2569 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2570 return 0; /* its been already added */
2573 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2575 if ((ha->mcast[i].addr[0] == 0) &&
2576 (ha->mcast[i].addr[1] == 0) &&
2577 (ha->mcast[i].addr[2] == 0) &&
2578 (ha->mcast[i].addr[3] == 0) &&
2579 (ha->mcast[i].addr[4] == 0) &&
2580 (ha->mcast[i].addr[5] == 0)) {
2582 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2585 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2595 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2599 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2600 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2602 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2605 ha->mcast[i].addr[0] = 0;
2606 ha->mcast[i].addr[1] = 0;
2607 ha->mcast[i].addr[2] = 0;
2608 ha->mcast[i].addr[3] = 0;
2609 ha->mcast[i].addr[4] = 0;
2610 ha->mcast[i].addr[5] = 0;
2621 * Name: qls_hw_set_multi
2622 * Function: Sets the Multicast Addresses provided the host O.S into the
2623 * hardware (for the given interface)
2626 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2631 for (i = 0; i < mcnt; i++) {
2633 if (qlnx_hw_add_mcast(ha, mta))
2636 if (qlnx_hw_del_mcast(ha, mta))
2640 mta += ETHER_HDR_LEN;
2647 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
2651 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2654 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2660 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2662 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN];
2663 struct ifnet *ifp = ha->ifp;
2666 if (qlnx_vf_device(ha) == 0)
2669 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta);
2672 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2679 qlnx_set_promisc(qlnx_host_t *ha)
2684 if (qlnx_vf_device(ha) == 0)
2687 filter = ha->filter;
2688 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2689 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2691 rc = qlnx_set_rx_accept_filter(ha, filter);
2696 qlnx_set_allmulti(qlnx_host_t *ha)
2701 if (qlnx_vf_device(ha) == 0)
2704 filter = ha->filter;
2705 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2706 rc = qlnx_set_rx_accept_filter(ha, filter);
2713 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2716 struct ifreq *ifr = (struct ifreq *)data;
2717 struct ifaddr *ifa = (struct ifaddr *)data;
2720 ha = (qlnx_host_t *)ifp->if_softc;
2724 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2726 if (ifa->ifa_addr->sa_family == AF_INET) {
2727 ifp->if_flags |= IFF_UP;
2728 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2730 qlnx_init_locked(ha);
2733 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2734 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2736 arp_ifinit(ifp, ifa);
2738 ether_ioctl(ifp, cmd, data);
2743 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2745 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2749 ifp->if_mtu = ifr->ifr_mtu;
2750 ha->max_frame_size =
2751 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2752 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2753 qlnx_init_locked(ha);
2762 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2766 if (ifp->if_flags & IFF_UP) {
2767 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2768 if ((ifp->if_flags ^ ha->if_flags) &
2770 ret = qlnx_set_promisc(ha);
2771 } else if ((ifp->if_flags ^ ha->if_flags) &
2773 ret = qlnx_set_allmulti(ha);
2776 ha->max_frame_size = ifp->if_mtu +
2777 ETHER_HDR_LEN + ETHER_CRC_LEN;
2778 qlnx_init_locked(ha);
2781 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2783 ha->if_flags = ifp->if_flags;
2790 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2792 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2793 if (qlnx_set_multi(ha, 1))
2799 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2801 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2802 if (qlnx_set_multi(ha, 0))
2809 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2811 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2816 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2818 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2820 if (mask & IFCAP_HWCSUM)
2821 ifp->if_capenable ^= IFCAP_HWCSUM;
2822 if (mask & IFCAP_TSO4)
2823 ifp->if_capenable ^= IFCAP_TSO4;
2824 if (mask & IFCAP_TSO6)
2825 ifp->if_capenable ^= IFCAP_TSO6;
2826 if (mask & IFCAP_VLAN_HWTAGGING)
2827 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2828 if (mask & IFCAP_VLAN_HWTSO)
2829 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2830 if (mask & IFCAP_LRO)
2831 ifp->if_capenable ^= IFCAP_LRO;
2835 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2836 qlnx_init_locked(ha);
2840 VLAN_CAPABILITIES(ifp);
2843 #if (__FreeBSD_version >= 1100101)
2847 struct ifi2creq i2c;
2848 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2849 struct ecore_ptt *p_ptt;
2851 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2856 if ((i2c.len > sizeof (i2c.data)) ||
2857 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2862 p_ptt = ecore_ptt_acquire(p_hwfn);
2865 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2870 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2871 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2872 i2c.len, &i2c.data[0]);
2874 ecore_ptt_release(p_hwfn, p_ptt);
2881 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2883 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2884 len = %d addr = 0x%02x offset = 0x%04x \
2885 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2886 0x%02x 0x%02x 0x%02x\n",
2887 ret, i2c.len, i2c.dev_addr, i2c.offset,
2888 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2889 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2892 #endif /* #if (__FreeBSD_version >= 1100101) */
2895 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2896 ret = ether_ioctl(ifp, cmd, data);
2904 qlnx_media_change(struct ifnet *ifp)
2907 struct ifmedia *ifm;
2910 ha = (qlnx_host_t *)ifp->if_softc;
2912 QL_DPRINT2(ha, "enter\n");
2916 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2919 QL_DPRINT2(ha, "exit\n");
2925 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2929 ha = (qlnx_host_t *)ifp->if_softc;
2931 QL_DPRINT2(ha, "enter\n");
2933 ifmr->ifm_status = IFM_AVALID;
2934 ifmr->ifm_active = IFM_ETHER;
2937 ifmr->ifm_status |= IFM_ACTIVE;
2939 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2941 if (ha->if_link.link_partner_caps &
2942 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2944 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2947 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2954 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2955 struct qlnx_tx_queue *txq)
2961 struct eth_tx_bd *tx_data_bd;
2962 struct eth_tx_1st_bd *first_bd;
2965 idx = txq->sw_tx_cons;
2966 mp = txq->sw_tx_ring[idx].mp;
2967 map = txq->sw_tx_ring[idx].map;
2969 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2971 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2973 QL_DPRINT1(ha, "(mp == NULL) "
2975 " ecore_prod_idx = 0x%x"
2976 " ecore_cons_idx = 0x%x"
2977 " hw_bd_cons = 0x%x"
2978 " txq_db_last = 0x%x"
2979 " elem_left = 0x%x\n",
2981 ecore_chain_get_prod_idx(&txq->tx_pbl),
2982 ecore_chain_get_cons_idx(&txq->tx_pbl),
2983 le16toh(*txq->hw_cons_ptr),
2985 ecore_chain_get_elem_left(&txq->tx_pbl));
2987 fp->err_tx_free_pkt_null++;
2990 qlnx_trigger_dump(ha);
2995 QLNX_INC_OPACKETS((ha->ifp));
2996 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2998 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2999 bus_dmamap_unload(ha->tx_tag, map);
3001 fp->tx_pkts_freed++;
3002 fp->tx_pkts_completed++;
3007 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
3008 nbds = first_bd->data.nbds;
3010 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
3012 for (i = 1; i < nbds; i++) {
3013 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
3014 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
3016 txq->sw_tx_ring[idx].flags = 0;
3017 txq->sw_tx_ring[idx].mp = NULL;
3018 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
3024 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3025 struct qlnx_tx_queue *txq)
3032 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
3034 while (hw_bd_cons !=
3035 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
3037 if (hw_bd_cons < ecore_cons_idx) {
3038 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
3040 diff = hw_bd_cons - ecore_cons_idx;
3042 if ((diff > TX_RING_SIZE) ||
3043 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
3045 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
3047 QL_DPRINT1(ha, "(diff = 0x%x) "
3049 " ecore_prod_idx = 0x%x"
3050 " ecore_cons_idx = 0x%x"
3051 " hw_bd_cons = 0x%x"
3052 " txq_db_last = 0x%x"
3053 " elem_left = 0x%x\n",
3056 ecore_chain_get_prod_idx(&txq->tx_pbl),
3057 ecore_chain_get_cons_idx(&txq->tx_pbl),
3058 le16toh(*txq->hw_cons_ptr),
3060 ecore_chain_get_elem_left(&txq->tx_pbl));
3062 fp->err_tx_cons_idx_conflict++;
3065 qlnx_trigger_dump(ha);
3068 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3069 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
3070 prefetch(txq->sw_tx_ring[idx].mp);
3071 prefetch(txq->sw_tx_ring[idx2].mp);
3073 qlnx_free_tx_pkt(ha, fp, txq);
3075 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
3081 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp)
3084 struct qlnx_tx_queue *txq;
3089 ha = (qlnx_host_t *)fp->edev;
3092 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3094 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3099 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3101 mp = drbr_peek(ifp, fp->tx_br);
3103 while (mp != NULL) {
3105 if (qlnx_send(ha, fp, &mp)) {
3108 drbr_putback(ifp, fp->tx_br, mp);
3110 fp->tx_pkts_processed++;
3111 drbr_advance(ifp, fp->tx_br);
3113 goto qlnx_transmit_locked_exit;
3116 drbr_advance(ifp, fp->tx_br);
3117 fp->tx_pkts_transmitted++;
3118 fp->tx_pkts_processed++;
3121 mp = drbr_peek(ifp, fp->tx_br);
3124 qlnx_transmit_locked_exit:
3125 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3126 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3127 < QLNX_TX_ELEM_MAX_THRESH))
3128 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3130 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3136 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
3138 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
3139 struct qlnx_fastpath *fp;
3140 int rss_id = 0, ret = 0;
3142 #ifdef QLNX_TRACEPERF_DATA
3143 uint64_t tx_pkts = 0, tx_compl = 0;
3146 QL_DPRINT2(ha, "enter\n");
3148 #if __FreeBSD_version >= 1100000
3149 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
3151 if (mp->m_flags & M_FLOWID)
3153 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3156 fp = &ha->fp_array[rss_id];
3158 if (fp->tx_br == NULL) {
3160 goto qlnx_transmit_exit;
3163 if (mtx_trylock(&fp->tx_mtx)) {
3165 #ifdef QLNX_TRACEPERF_DATA
3166 tx_pkts = fp->tx_pkts_transmitted;
3167 tx_compl = fp->tx_pkts_completed;
3170 ret = qlnx_transmit_locked(ifp, fp, mp);
3172 #ifdef QLNX_TRACEPERF_DATA
3173 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3174 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3176 mtx_unlock(&fp->tx_mtx);
3178 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3179 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3180 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3186 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3191 qlnx_qflush(struct ifnet *ifp)
3194 struct qlnx_fastpath *fp;
3198 ha = (qlnx_host_t *)ifp->if_softc;
3200 QL_DPRINT2(ha, "enter\n");
3202 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3204 fp = &ha->fp_array[rss_id];
3210 mtx_lock(&fp->tx_mtx);
3212 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3213 fp->tx_pkts_freed++;
3216 mtx_unlock(&fp->tx_mtx);
3219 QL_DPRINT2(ha, "exit\n");
3225 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3227 struct ecore_dev *cdev;
3232 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3234 bus_write_4(ha->pci_dbells, offset, value);
3235 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3236 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3242 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3244 struct ether_vlan_header *eh = NULL;
3245 struct ip *ip = NULL;
3246 struct ip6_hdr *ip6 = NULL;
3247 struct tcphdr *th = NULL;
3248 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0;
3251 uint8_t buf[sizeof(struct ip6_hdr)];
3255 eh = mtod(mp, struct ether_vlan_header *);
3257 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3258 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3259 etype = ntohs(eh->evl_proto);
3261 ehdrlen = ETHER_HDR_LEN;
3262 etype = ntohs(eh->evl_encap_proto);
3268 ip = (struct ip *)(mp->m_data + ehdrlen);
3270 ip_hlen = sizeof (struct ip);
3272 if (mp->m_len < (ehdrlen + ip_hlen)) {
3273 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
3274 ip = (struct ip *)buf;
3277 th = (struct tcphdr *)(ip + 1);
3278 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3281 case ETHERTYPE_IPV6:
3282 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3284 ip_hlen = sizeof(struct ip6_hdr);
3286 if (mp->m_len < (ehdrlen + ip_hlen)) {
3287 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
3289 ip6 = (struct ip6_hdr *)buf;
3291 th = (struct tcphdr *)(ip6 + 1);
3292 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3303 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
3307 uint32_t sum, nbds_in_hdr = 1;
3309 bus_dma_segment_t *s_seg;
3311 /* If the header spans mulitple segments, skip those segments */
3313 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM)
3318 while ((i < nsegs) && (offset >= segs->ds_len)) {
3319 offset = offset - segs->ds_len;
3325 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3329 while (nsegs >= window) {
3334 for (i = 0; i < window; i++){
3335 sum += s_seg->ds_len;
3339 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
3340 fp->tx_lso_wnd_min_len++;
3352 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3354 bus_dma_segment_t *segs;
3355 bus_dmamap_t map = 0;
3358 struct mbuf *m_head = *m_headp;
3363 struct qlnx_tx_queue *txq;
3365 struct eth_tx_1st_bd *first_bd;
3366 struct eth_tx_2nd_bd *second_bd;
3367 struct eth_tx_3rd_bd *third_bd;
3368 struct eth_tx_bd *tx_data_bd;
3371 uint32_t nbds_in_hdr = 0;
3372 uint32_t offset = 0;
3374 #ifdef QLNX_TRACE_PERF_DATA
3378 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3390 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3391 QLNX_TX_ELEM_MIN_THRESH) {
3393 fp->tx_nsegs_gt_elem_left++;
3394 fp->err_tx_nsegs_gt_elem_left++;
3399 idx = txq->sw_tx_prod;
3401 map = txq->sw_tx_ring[idx].map;
3404 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3407 if (ha->dbg_trace_tso_pkt_len) {
3408 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3409 if (!fp->tx_tso_min_pkt_len) {
3410 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3411 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3413 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3414 fp->tx_tso_min_pkt_len =
3415 m_head->m_pkthdr.len;
3416 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3417 fp->tx_tso_max_pkt_len =
3418 m_head->m_pkthdr.len;
3423 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3424 offset = qlnx_tcp_offset(ha, m_head);
3426 if ((ret == EFBIG) ||
3427 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
3428 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3429 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3430 qlnx_tso_check(fp, segs, nsegs, offset))))) {
3434 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3438 m = m_defrag(m_head, M_NOWAIT);
3440 fp->err_tx_defrag++;
3441 fp->tx_pkts_freed++;
3444 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3451 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3452 segs, &nsegs, BUS_DMA_NOWAIT))) {
3454 fp->err_tx_defrag_dmamap_load++;
3457 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
3458 ret, m_head->m_pkthdr.len);
3460 fp->tx_pkts_freed++;
3467 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
3468 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3470 fp->err_tx_non_tso_max_seg++;
3473 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3474 ret, nsegs, m_head->m_pkthdr.len);
3476 fp->tx_pkts_freed++;
3482 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3483 offset = qlnx_tcp_offset(ha, m_head);
3487 fp->err_tx_dmamap_load++;
3489 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3490 ret, m_head->m_pkthdr.len);
3491 fp->tx_pkts_freed++;
3497 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3499 if (ha->dbg_trace_tso_pkt_len) {
3500 if (nsegs < QLNX_FP_MAX_SEGS)
3501 fp->tx_pkts[(nsegs - 1)]++;
3503 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3506 #ifdef QLNX_TRACE_PERF_DATA
3507 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3508 if(m_head->m_pkthdr.len <= 2048)
3509 fp->tx_pkts_hist[0]++;
3510 else if((m_head->m_pkthdr.len > 2048) &&
3511 (m_head->m_pkthdr.len <= 4096))
3512 fp->tx_pkts_hist[1]++;
3513 else if((m_head->m_pkthdr.len > 4096) &&
3514 (m_head->m_pkthdr.len <= 8192))
3515 fp->tx_pkts_hist[2]++;
3516 else if((m_head->m_pkthdr.len > 8192) &&
3517 (m_head->m_pkthdr.len <= 12288 ))
3518 fp->tx_pkts_hist[3]++;
3519 else if((m_head->m_pkthdr.len > 11288) &&
3520 (m_head->m_pkthdr.len <= 16394))
3521 fp->tx_pkts_hist[4]++;
3522 else if((m_head->m_pkthdr.len > 16384) &&
3523 (m_head->m_pkthdr.len <= 20480))
3524 fp->tx_pkts_hist[5]++;
3525 else if((m_head->m_pkthdr.len > 20480) &&
3526 (m_head->m_pkthdr.len <= 24576))
3527 fp->tx_pkts_hist[6]++;
3528 else if((m_head->m_pkthdr.len > 24576) &&
3529 (m_head->m_pkthdr.len <= 28672))
3530 fp->tx_pkts_hist[7]++;
3531 else if((m_head->m_pkthdr.len > 28762) &&
3532 (m_head->m_pkthdr.len <= 32768))
3533 fp->tx_pkts_hist[8]++;
3534 else if((m_head->m_pkthdr.len > 32768) &&
3535 (m_head->m_pkthdr.len <= 36864))
3536 fp->tx_pkts_hist[9]++;
3537 else if((m_head->m_pkthdr.len > 36864) &&
3538 (m_head->m_pkthdr.len <= 40960))
3539 fp->tx_pkts_hist[10]++;
3540 else if((m_head->m_pkthdr.len > 40960) &&
3541 (m_head->m_pkthdr.len <= 45056))
3542 fp->tx_pkts_hist[11]++;
3543 else if((m_head->m_pkthdr.len > 45056) &&
3544 (m_head->m_pkthdr.len <= 49152))
3545 fp->tx_pkts_hist[12]++;
3546 else if((m_head->m_pkthdr.len > 49512) &&
3547 m_head->m_pkthdr.len <= 53248))
3548 fp->tx_pkts_hist[13]++;
3549 else if((m_head->m_pkthdr.len > 53248) &&
3550 (m_head->m_pkthdr.len <= 57344))
3551 fp->tx_pkts_hist[14]++;
3552 else if((m_head->m_pkthdr.len > 53248) &&
3553 (m_head->m_pkthdr.len <= 57344))
3554 fp->tx_pkts_hist[15]++;
3555 else if((m_head->m_pkthdr.len > 57344) &&
3556 (m_head->m_pkthdr.len <= 61440))
3557 fp->tx_pkts_hist[16]++;
3559 fp->tx_pkts_hist[17]++;
3562 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3564 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3565 bd_used = TX_RING_SIZE - elem_left;
3569 else if((bd_used > 100) && (bd_used <= 500))
3571 else if((bd_used > 500) && (bd_used <= 1000))
3573 else if((bd_used > 1000) && (bd_used <= 2000))
3575 else if((bd_used > 3000) && (bd_used <= 4000))
3577 else if((bd_used > 4000) && (bd_used <= 5000))
3579 else if((bd_used > 6000) && (bd_used <= 7000))
3581 else if((bd_used > 7000) && (bd_used <= 8000))
3583 else if((bd_used > 8000) && (bd_used <= 9000))
3585 else if((bd_used > 9000) && (bd_used <= 10000))
3587 else if((bd_used > 10000) && (bd_used <= 11000))
3588 fp->tx_pkts_q[10]++;
3589 else if((bd_used > 11000) && (bd_used <= 12000))
3590 fp->tx_pkts_q[11]++;
3591 else if((bd_used > 12000) && (bd_used <= 13000))
3592 fp->tx_pkts_q[12]++;
3593 else if((bd_used > 13000) && (bd_used <= 14000))
3594 fp->tx_pkts_q[13]++;
3595 else if((bd_used > 14000) && (bd_used <= 15000))
3596 fp->tx_pkts_q[14]++;
3597 else if((bd_used > 15000) && (bd_used <= 16000))
3598 fp->tx_pkts_q[15]++;
3600 fp->tx_pkts_q[16]++;
3603 #endif /* end of QLNX_TRACE_PERF_DATA */
3605 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
3606 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3608 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3609 " in chain[%d] trying to free packets\n",
3610 nsegs, elem_left, fp->rss_id);
3612 fp->tx_nsegs_gt_elem_left++;
3614 (void)qlnx_tx_int(ha, fp, txq);
3616 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
3617 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3620 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
3621 nsegs, elem_left, fp->rss_id);
3623 fp->err_tx_nsegs_gt_elem_left++;
3624 fp->tx_ring_full = 1;
3625 if (ha->storm_stats_enable)
3626 ha->storm_stats_gather = 1;
3631 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3633 txq->sw_tx_ring[idx].mp = m_head;
3635 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3637 memset(first_bd, 0, sizeof(*first_bd));
3639 first_bd->data.bd_flags.bitfields =
3640 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
3642 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3646 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3647 first_bd->data.bd_flags.bitfields |=
3648 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3651 if (m_head->m_pkthdr.csum_flags &
3652 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
3653 first_bd->data.bd_flags.bitfields |=
3654 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
3657 if (m_head->m_flags & M_VLANTAG) {
3658 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3659 first_bd->data.bd_flags.bitfields |=
3660 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
3663 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3665 first_bd->data.bd_flags.bitfields |=
3666 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
3667 first_bd->data.bd_flags.bitfields |=
3668 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
3672 if (offset == segs->ds_len) {
3673 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3677 second_bd = (struct eth_tx_2nd_bd *)
3678 ecore_chain_produce(&txq->tx_pbl);
3679 memset(second_bd, 0, sizeof(*second_bd));
3682 if (seg_idx < nsegs) {
3683 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3684 (segs->ds_addr), (segs->ds_len));
3689 third_bd = (struct eth_tx_3rd_bd *)
3690 ecore_chain_produce(&txq->tx_pbl);
3691 memset(third_bd, 0, sizeof(*third_bd));
3692 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3693 third_bd->data.bitfields |=
3694 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3697 if (seg_idx < nsegs) {
3698 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3699 (segs->ds_addr), (segs->ds_len));
3704 for (; seg_idx < nsegs; seg_idx++) {
3705 tx_data_bd = (struct eth_tx_bd *)
3706 ecore_chain_produce(&txq->tx_pbl);
3707 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3708 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3715 } else if (offset < segs->ds_len) {
3716 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3718 second_bd = (struct eth_tx_2nd_bd *)
3719 ecore_chain_produce(&txq->tx_pbl);
3720 memset(second_bd, 0, sizeof(*second_bd));
3721 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3722 (segs->ds_addr + offset),\
3723 (segs->ds_len - offset));
3727 third_bd = (struct eth_tx_3rd_bd *)
3728 ecore_chain_produce(&txq->tx_pbl);
3729 memset(third_bd, 0, sizeof(*third_bd));
3731 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3734 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3735 third_bd->data.bitfields |=
3736 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3740 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3741 tx_data_bd = (struct eth_tx_bd *)
3742 ecore_chain_produce(&txq->tx_pbl);
3743 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3744 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3752 offset = offset - segs->ds_len;
3755 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3760 tx_data_bd = (struct eth_tx_bd *)
3761 ecore_chain_produce(&txq->tx_pbl);
3762 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3764 if (second_bd == NULL) {
3765 second_bd = (struct eth_tx_2nd_bd *)
3767 } else if (third_bd == NULL) {
3768 third_bd = (struct eth_tx_3rd_bd *)
3772 if (offset && (offset < segs->ds_len)) {
3773 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3774 segs->ds_addr, offset);
3776 tx_data_bd = (struct eth_tx_bd *)
3777 ecore_chain_produce(&txq->tx_pbl);
3779 memset(tx_data_bd, 0,
3780 sizeof(*tx_data_bd));
3782 if (second_bd == NULL) {
3784 (struct eth_tx_2nd_bd *)tx_data_bd;
3785 } else if (third_bd == NULL) {
3787 (struct eth_tx_3rd_bd *)tx_data_bd;
3789 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3790 (segs->ds_addr + offset), \
3791 (segs->ds_len - offset));
3796 offset = offset - segs->ds_len;
3797 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3798 segs->ds_addr, segs->ds_len);
3804 if (third_bd == NULL) {
3805 third_bd = (struct eth_tx_3rd_bd *)
3806 ecore_chain_produce(&txq->tx_pbl);
3807 memset(third_bd, 0, sizeof(*third_bd));
3810 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3811 third_bd->data.bitfields |=
3812 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3817 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3818 tx_data_bd = (struct eth_tx_bd *)
3819 ecore_chain_produce(&txq->tx_pbl);
3820 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3821 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3826 first_bd->data.bitfields =
3827 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3828 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3829 first_bd->data.bitfields =
3830 htole16(first_bd->data.bitfields);
3831 fp->tx_non_tso_pkts++;
3835 first_bd->data.nbds = nbd;
3837 if (ha->dbg_trace_tso_pkt_len) {
3838 if (fp->tx_tso_max_nsegs < nsegs)
3839 fp->tx_tso_max_nsegs = nsegs;
3841 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3842 fp->tx_tso_min_nsegs = nsegs;
3845 txq->sw_tx_ring[idx].nsegs = nsegs;
3846 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3848 txq->tx_db.data.bd_prod =
3849 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3851 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3853 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3858 qlnx_stop(qlnx_host_t *ha)
3860 struct ifnet *ifp = ha->ifp;
3866 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3869 * We simply lock and unlock each fp->tx_mtx to
3870 * propagate the if_drv_flags
3871 * state to each tx thread
3873 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3875 if (ha->state == QLNX_STATE_OPEN) {
3876 for (i = 0; i < ha->num_rss; i++) {
3877 struct qlnx_fastpath *fp = &ha->fp_array[i];
3879 mtx_lock(&fp->tx_mtx);
3880 mtx_unlock(&fp->tx_mtx);
3882 if (fp->fp_taskqueue != NULL)
3883 taskqueue_enqueue(fp->fp_taskqueue,
3887 #ifdef QLNX_ENABLE_IWARP
3888 if (qlnx_vf_device(ha) != 0) {
3889 qlnx_rdma_dev_close(ha);
3891 #endif /* #ifdef QLNX_ENABLE_IWARP */
3899 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3901 return(TX_RING_SIZE - 1);
3905 qlnx_get_mac_addr(qlnx_host_t *ha)
3907 struct ecore_hwfn *p_hwfn;
3908 unsigned char mac[ETHER_ADDR_LEN];
3909 uint8_t p_is_forced;
3911 p_hwfn = &ha->cdev.hwfns[0];
3913 if (qlnx_vf_device(ha) != 0)
3914 return (p_hwfn->hw_info.hw_mac_addr);
3916 ecore_vf_read_bulletin(p_hwfn, &p_is_forced);
3917 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) ==
3919 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3920 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
3921 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3922 memcpy(ha->primary_mac, mac, ETH_ALEN);
3925 return (ha->primary_mac);
3929 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3931 uint32_t ifm_type = 0;
3933 switch (if_link->media_type) {
3935 case MEDIA_MODULE_FIBER:
3936 case MEDIA_UNSPECIFIED:
3937 if (if_link->speed == (100 * 1000))
3938 ifm_type = QLNX_IFM_100G_SR4;
3939 else if (if_link->speed == (40 * 1000))
3940 ifm_type = IFM_40G_SR4;
3941 else if (if_link->speed == (25 * 1000))
3942 ifm_type = QLNX_IFM_25G_SR;
3943 else if (if_link->speed == (10 * 1000))
3944 ifm_type = (IFM_10G_LR | IFM_10G_SR);
3945 else if (if_link->speed == (1 * 1000))
3946 ifm_type = (IFM_1000_SX | IFM_1000_LX);
3950 case MEDIA_DA_TWINAX:
3951 if (if_link->speed == (100 * 1000))
3952 ifm_type = QLNX_IFM_100G_CR4;
3953 else if (if_link->speed == (40 * 1000))
3954 ifm_type = IFM_40G_CR4;
3955 else if (if_link->speed == (25 * 1000))
3956 ifm_type = QLNX_IFM_25G_CR;
3957 else if (if_link->speed == (10 * 1000))
3958 ifm_type = IFM_10G_TWINAX;
3963 ifm_type = IFM_UNKNOWN;
3971 /*****************************************************************************
3972 * Interrupt Service Functions
3973 *****************************************************************************/
3976 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3977 struct mbuf *mp_head, uint16_t len)
3979 struct mbuf *mp, *mpf, *mpl;
3980 struct sw_rx_data *sw_rx_data;
3981 struct qlnx_rx_queue *rxq;
3982 uint16_t len_in_buffer;
3985 mpf = mpl = mp = NULL;
3989 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3991 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3992 mp = sw_rx_data->data;
3995 QL_DPRINT1(ha, "mp = NULL\n");
3996 fp->err_rx_mp_null++;
3998 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4005 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4006 BUS_DMASYNC_POSTREAD);
4008 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4010 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4011 " incoming packet and reusing its buffer\n");
4013 qlnx_reuse_rx_data(rxq);
4014 fp->err_rx_alloc_errors++;
4021 ecore_chain_consume(&rxq->rx_bd_ring);
4023 if (len > rxq->rx_buf_size)
4024 len_in_buffer = rxq->rx_buf_size;
4026 len_in_buffer = len;
4028 len = len - len_in_buffer;
4030 mp->m_flags &= ~M_PKTHDR;
4032 mp->m_len = len_in_buffer;
4043 mp_head->m_next = mpf;
4049 qlnx_tpa_start(qlnx_host_t *ha,
4050 struct qlnx_fastpath *fp,
4051 struct qlnx_rx_queue *rxq,
4052 struct eth_fast_path_rx_tpa_start_cqe *cqe)
4055 struct ifnet *ifp = ha->ifp;
4057 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4058 struct sw_rx_data *sw_rx_data;
4061 struct eth_rx_bd *rx_bd;
4064 #if __FreeBSD_version >= 1100000
4066 #endif /* #if __FreeBSD_version >= 1100000 */
4069 agg_index = cqe->tpa_agg_index;
4071 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
4073 \t bitfields = 0x%x\n \
4074 \t seg_len = 0x%x\n \
4075 \t pars_flags = 0x%x\n \
4076 \t vlan_tag = 0x%x\n \
4077 \t rss_hash = 0x%x\n \
4078 \t len_on_first_bd = 0x%x\n \
4079 \t placement_offset = 0x%x\n \
4080 \t tpa_agg_index = 0x%x\n \
4081 \t header_len = 0x%x\n \
4082 \t ext_bd_len_list[0] = 0x%x\n \
4083 \t ext_bd_len_list[1] = 0x%x\n \
4084 \t ext_bd_len_list[2] = 0x%x\n \
4085 \t ext_bd_len_list[3] = 0x%x\n \
4086 \t ext_bd_len_list[4] = 0x%x\n",
4087 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4088 cqe->pars_flags.flags, cqe->vlan_tag,
4089 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
4090 cqe->tpa_agg_index, cqe->header_len,
4091 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
4092 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
4093 cqe->ext_bd_len_list[4]);
4095 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4096 fp->err_rx_tpa_invalid_agg_num++;
4100 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4101 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
4102 mp = sw_rx_data->data;
4104 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
4107 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
4108 fp->err_rx_mp_null++;
4109 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4114 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
4116 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
4117 " flags = %x, dropping incoming packet\n", fp->rss_id,
4118 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
4120 fp->err_rx_hw_errors++;
4122 qlnx_reuse_rx_data(rxq);
4124 QLNX_INC_IERRORS(ifp);
4129 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4131 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4132 " dropping incoming packet and reusing its buffer\n",
4135 fp->err_rx_alloc_errors++;
4136 QLNX_INC_IQDROPS(ifp);
4139 * Load the tpa mbuf into the rx ring and save the
4143 map = sw_rx_data->map;
4144 addr = sw_rx_data->dma_addr;
4146 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4148 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4149 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4150 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4152 rxq->tpa_info[agg_index].rx_buf.data = mp;
4153 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4154 rxq->tpa_info[agg_index].rx_buf.map = map;
4156 rx_bd = (struct eth_rx_bd *)
4157 ecore_chain_produce(&rxq->rx_bd_ring);
4159 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4160 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4162 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4163 BUS_DMASYNC_PREREAD);
4165 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4166 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4168 ecore_chain_consume(&rxq->rx_bd_ring);
4170 /* Now reuse any buffers posted in ext_bd_len_list */
4171 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4173 if (cqe->ext_bd_len_list[i] == 0)
4176 qlnx_reuse_rx_data(rxq);
4179 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4183 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4185 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4186 " dropping incoming packet and reusing its buffer\n",
4189 QLNX_INC_IQDROPS(ifp);
4191 /* if we already have mbuf head in aggregation free it */
4192 if (rxq->tpa_info[agg_index].mpf) {
4193 m_freem(rxq->tpa_info[agg_index].mpf);
4194 rxq->tpa_info[agg_index].mpl = NULL;
4196 rxq->tpa_info[agg_index].mpf = mp;
4197 rxq->tpa_info[agg_index].mpl = NULL;
4199 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4200 ecore_chain_consume(&rxq->rx_bd_ring);
4202 /* Now reuse any buffers posted in ext_bd_len_list */
4203 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4205 if (cqe->ext_bd_len_list[i] == 0)
4208 qlnx_reuse_rx_data(rxq);
4210 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4216 * first process the ext_bd_len_list
4217 * if this fails then we simply drop the packet
4219 ecore_chain_consume(&rxq->rx_bd_ring);
4220 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4222 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
4224 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4226 if (cqe->ext_bd_len_list[i] == 0)
4229 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4230 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4231 BUS_DMASYNC_POSTREAD);
4233 mpc = sw_rx_data->data;
4236 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4237 fp->err_rx_mp_null++;
4241 rxq->tpa_info[agg_index].agg_state =
4242 QLNX_AGG_STATE_ERROR;
4243 ecore_chain_consume(&rxq->rx_bd_ring);
4245 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4249 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4250 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4251 " dropping incoming packet and reusing its"
4252 " buffer\n", fp->rss_id);
4254 qlnx_reuse_rx_data(rxq);
4260 rxq->tpa_info[agg_index].agg_state =
4261 QLNX_AGG_STATE_ERROR;
4263 ecore_chain_consume(&rxq->rx_bd_ring);
4265 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4270 mpc->m_flags &= ~M_PKTHDR;
4272 mpc->m_len = cqe->ext_bd_len_list[i];
4278 mpl->m_len = ha->rx_buf_size;
4283 ecore_chain_consume(&rxq->rx_bd_ring);
4285 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4288 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4290 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4291 " incoming packet and reusing its buffer\n",
4294 QLNX_INC_IQDROPS(ifp);
4296 rxq->tpa_info[agg_index].mpf = mp;
4297 rxq->tpa_info[agg_index].mpl = NULL;
4302 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4305 mp->m_len = ha->rx_buf_size;
4307 rxq->tpa_info[agg_index].mpf = mp;
4308 rxq->tpa_info[agg_index].mpl = mpl;
4310 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4311 rxq->tpa_info[agg_index].mpf = mp;
4312 rxq->tpa_info[agg_index].mpl = mp;
4316 mp->m_flags |= M_PKTHDR;
4318 /* assign packet to this interface interface */
4319 mp->m_pkthdr.rcvif = ifp;
4321 /* assume no hardware checksum has complated */
4322 mp->m_pkthdr.csum_flags = 0;
4324 //mp->m_pkthdr.flowid = fp->rss_id;
4325 mp->m_pkthdr.flowid = cqe->rss_hash;
4327 #if __FreeBSD_version >= 1100000
4329 hash_type = cqe->bitfields &
4330 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4331 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4333 switch (hash_type) {
4335 case RSS_HASH_TYPE_IPV4:
4336 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4339 case RSS_HASH_TYPE_TCP_IPV4:
4340 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4343 case RSS_HASH_TYPE_IPV6:
4344 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4347 case RSS_HASH_TYPE_TCP_IPV6:
4348 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4352 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4357 mp->m_flags |= M_FLOWID;
4360 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4361 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4363 mp->m_pkthdr.csum_data = 0xFFFF;
4365 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4366 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4367 mp->m_flags |= M_VLANTAG;
4370 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4372 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4373 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4374 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4380 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4381 struct qlnx_rx_queue *rxq,
4382 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
4384 struct sw_rx_data *sw_rx_data;
4386 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4393 QL_DPRINT7(ha, "[%d]: enter\n \
4395 \t tpa_agg_index = 0x%x\n \
4396 \t len_list[0] = 0x%x\n \
4397 \t len_list[1] = 0x%x\n \
4398 \t len_list[2] = 0x%x\n \
4399 \t len_list[3] = 0x%x\n \
4400 \t len_list[4] = 0x%x\n \
4401 \t len_list[5] = 0x%x\n",
4402 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4403 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4404 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4406 agg_index = cqe->tpa_agg_index;
4408 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4409 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4410 fp->err_rx_tpa_invalid_agg_num++;
4415 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
4417 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4419 if (cqe->len_list[i] == 0)
4422 if (rxq->tpa_info[agg_index].agg_state !=
4423 QLNX_AGG_STATE_START) {
4424 qlnx_reuse_rx_data(rxq);
4428 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4429 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4430 BUS_DMASYNC_POSTREAD);
4432 mpc = sw_rx_data->data;
4436 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4438 fp->err_rx_mp_null++;
4442 rxq->tpa_info[agg_index].agg_state =
4443 QLNX_AGG_STATE_ERROR;
4444 ecore_chain_consume(&rxq->rx_bd_ring);
4446 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4450 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4452 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4453 " dropping incoming packet and reusing its"
4454 " buffer\n", fp->rss_id);
4456 qlnx_reuse_rx_data(rxq);
4462 rxq->tpa_info[agg_index].agg_state =
4463 QLNX_AGG_STATE_ERROR;
4465 ecore_chain_consume(&rxq->rx_bd_ring);
4467 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4472 mpc->m_flags &= ~M_PKTHDR;
4474 mpc->m_len = cqe->len_list[i];
4480 mpl->m_len = ha->rx_buf_size;
4485 ecore_chain_consume(&rxq->rx_bd_ring);
4487 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4490 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4491 fp->rss_id, mpf, mpl);
4494 mp = rxq->tpa_info[agg_index].mpl;
4495 mp->m_len = ha->rx_buf_size;
4497 rxq->tpa_info[agg_index].mpl = mpl;
4504 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4505 struct qlnx_rx_queue *rxq,
4506 struct eth_fast_path_rx_tpa_end_cqe *cqe)
4508 struct sw_rx_data *sw_rx_data;
4510 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
4514 struct ifnet *ifp = ha->ifp;
4519 QL_DPRINT7(ha, "[%d]: enter\n \
4521 \t tpa_agg_index = 0x%x\n \
4522 \t total_packet_len = 0x%x\n \
4523 \t num_of_bds = 0x%x\n \
4524 \t end_reason = 0x%x\n \
4525 \t num_of_coalesced_segs = 0x%x\n \
4526 \t ts_delta = 0x%x\n \
4527 \t len_list[0] = 0x%x\n \
4528 \t len_list[1] = 0x%x\n \
4529 \t len_list[2] = 0x%x\n \
4530 \t len_list[3] = 0x%x\n",
4531 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4532 cqe->total_packet_len, cqe->num_of_bds,
4533 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4534 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4537 agg_index = cqe->tpa_agg_index;
4539 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
4541 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4543 fp->err_rx_tpa_invalid_agg_num++;
4548 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
4550 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4552 if (cqe->len_list[i] == 0)
4555 if (rxq->tpa_info[agg_index].agg_state !=
4556 QLNX_AGG_STATE_START) {
4558 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4560 qlnx_reuse_rx_data(rxq);
4564 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4565 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4566 BUS_DMASYNC_POSTREAD);
4568 mpc = sw_rx_data->data;
4572 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4574 fp->err_rx_mp_null++;
4578 rxq->tpa_info[agg_index].agg_state =
4579 QLNX_AGG_STATE_ERROR;
4580 ecore_chain_consume(&rxq->rx_bd_ring);
4582 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4586 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4587 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4588 " dropping incoming packet and reusing its"
4589 " buffer\n", fp->rss_id);
4591 qlnx_reuse_rx_data(rxq);
4597 rxq->tpa_info[agg_index].agg_state =
4598 QLNX_AGG_STATE_ERROR;
4600 ecore_chain_consume(&rxq->rx_bd_ring);
4602 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4607 mpc->m_flags &= ~M_PKTHDR;
4609 mpc->m_len = cqe->len_list[i];
4615 mpl->m_len = ha->rx_buf_size;
4620 ecore_chain_consume(&rxq->rx_bd_ring);
4622 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4625 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4629 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4631 mp = rxq->tpa_info[agg_index].mpl;
4632 mp->m_len = ha->rx_buf_size;
4636 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4638 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4640 if (rxq->tpa_info[agg_index].mpf != NULL)
4641 m_freem(rxq->tpa_info[agg_index].mpf);
4642 rxq->tpa_info[agg_index].mpf = NULL;
4643 rxq->tpa_info[agg_index].mpl = NULL;
4644 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4648 mp = rxq->tpa_info[agg_index].mpf;
4649 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4650 mp->m_pkthdr.len = cqe->total_packet_len;
4652 if (mp->m_next == NULL)
4653 mp->m_len = mp->m_pkthdr.len;
4655 /* compute the total packet length */
4657 while (mpf != NULL) {
4662 if (cqe->total_packet_len > len) {
4663 mpl = rxq->tpa_info[agg_index].mpl;
4664 mpl->m_len += (cqe->total_packet_len - len);
4668 QLNX_INC_IPACKETS(ifp);
4669 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4671 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4672 m_len = 0x%x m_pkthdr_len = 0x%x\n",
4673 fp->rss_id, mp->m_pkthdr.csum_data,
4674 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4676 (*ifp->if_input)(ifp, mp);
4678 rxq->tpa_info[agg_index].mpf = NULL;
4679 rxq->tpa_info[agg_index].mpl = NULL;
4680 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4682 return (cqe->num_of_coalesced_segs);
4686 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4689 uint16_t hw_comp_cons, sw_comp_cons;
4691 struct qlnx_rx_queue *rxq = fp->rxq;
4692 struct ifnet *ifp = ha->ifp;
4693 struct ecore_dev *cdev = &ha->cdev;
4694 struct ecore_hwfn *p_hwfn;
4696 #ifdef QLNX_SOFT_LRO
4697 struct lro_ctrl *lro;
4700 #endif /* #ifdef QLNX_SOFT_LRO */
4702 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4703 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4705 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4707 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4708 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4709 * read before it is written by FW, then FW writes CQE and SB, and then
4710 * the CPU reads the hw_comp_cons, it will use an old CQE.
4713 /* Loop to complete all indicated BDs */
4714 while (sw_comp_cons != hw_comp_cons) {
4715 union eth_rx_cqe *cqe;
4716 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4717 struct sw_rx_data *sw_rx_data;
4718 register struct mbuf *mp;
4719 enum eth_rx_cqe_type cqe_type;
4720 uint16_t len, pad, len_on_first_bd;
4722 #if __FreeBSD_version >= 1100000
4724 #endif /* #if __FreeBSD_version >= 1100000 */
4726 /* Get the CQE from the completion ring */
4727 cqe = (union eth_rx_cqe *)
4728 ecore_chain_consume(&rxq->rx_comp_ring);
4729 cqe_type = cqe->fast_path_regular.type;
4731 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4732 QL_DPRINT3(ha, "Got a slowath CQE\n");
4734 ecore_eth_cqe_completion(p_hwfn,
4735 (struct eth_slow_path_rx_cqe *)cqe);
4739 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4743 case ETH_RX_CQE_TYPE_TPA_START:
4744 qlnx_tpa_start(ha, fp, rxq,
4745 &cqe->fast_path_tpa_start);
4749 case ETH_RX_CQE_TYPE_TPA_CONT:
4750 qlnx_tpa_cont(ha, fp, rxq,
4751 &cqe->fast_path_tpa_cont);
4755 case ETH_RX_CQE_TYPE_TPA_END:
4756 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4757 &cqe->fast_path_tpa_end);
4768 /* Get the data from the SW ring */
4769 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4770 mp = sw_rx_data->data;
4773 QL_DPRINT1(ha, "mp = NULL\n");
4774 fp->err_rx_mp_null++;
4776 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4779 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4780 BUS_DMASYNC_POSTREAD);
4783 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4784 len = le16toh(fp_cqe->pkt_len);
4785 pad = fp_cqe->placement_offset;
4787 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4788 " len %u, parsing flags = %d pad = %d\n",
4789 cqe_type, fp_cqe->bitfields,
4790 le16toh(fp_cqe->vlan_tag),
4791 len, le16toh(fp_cqe->pars_flags.flags), pad);
4793 data = mtod(mp, uint8_t *);
4797 qlnx_dump_buf8(ha, __func__, data, len);
4799 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4800 * is always with a fixed size. If allocation fails, we take the
4801 * consumed BD and return it to the ring in the PROD position.
4802 * The packet that was received on that BD will be dropped (and
4803 * not passed to the upper stack).
4805 /* If this is an error packet then drop it */
4806 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4809 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4810 " dropping incoming packet\n", sw_comp_cons,
4811 le16toh(cqe->fast_path_regular.pars_flags.flags));
4812 fp->err_rx_hw_errors++;
4814 qlnx_reuse_rx_data(rxq);
4816 QLNX_INC_IERRORS(ifp);
4821 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4823 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4824 " incoming packet and reusing its buffer\n");
4825 qlnx_reuse_rx_data(rxq);
4827 fp->err_rx_alloc_errors++;
4829 QLNX_INC_IQDROPS(ifp);
4834 ecore_chain_consume(&rxq->rx_bd_ring);
4836 len_on_first_bd = fp_cqe->len_on_first_bd;
4838 mp->m_pkthdr.len = len;
4840 if ((len > 60 ) && (len > len_on_first_bd)) {
4842 mp->m_len = len_on_first_bd;
4844 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4845 (len - len_on_first_bd)) != 0) {
4849 QLNX_INC_IQDROPS(ifp);
4854 } else if (len_on_first_bd < len) {
4855 fp->err_rx_jumbo_chain_pkts++;
4860 mp->m_flags |= M_PKTHDR;
4862 /* assign packet to this interface interface */
4863 mp->m_pkthdr.rcvif = ifp;
4865 /* assume no hardware checksum has complated */
4866 mp->m_pkthdr.csum_flags = 0;
4868 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4870 #if __FreeBSD_version >= 1100000
4872 hash_type = fp_cqe->bitfields &
4873 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4874 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4876 switch (hash_type) {
4878 case RSS_HASH_TYPE_IPV4:
4879 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4882 case RSS_HASH_TYPE_TCP_IPV4:
4883 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4886 case RSS_HASH_TYPE_IPV6:
4887 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4890 case RSS_HASH_TYPE_TCP_IPV6:
4891 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4895 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4900 mp->m_flags |= M_FLOWID;
4903 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4904 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4907 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4908 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4911 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4912 mp->m_pkthdr.csum_data = 0xFFFF;
4913 mp->m_pkthdr.csum_flags |=
4914 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4917 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4918 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4919 mp->m_flags |= M_VLANTAG;
4922 QLNX_INC_IPACKETS(ifp);
4923 QLNX_INC_IBYTES(ifp, len);
4925 #ifdef QLNX_SOFT_LRO
4929 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4931 tcp_lro_queue_mbuf(lro, mp);
4935 if (tcp_lro_rx(lro, mp, 0))
4936 (*ifp->if_input)(ifp, mp);
4938 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4941 (*ifp->if_input)(ifp, mp);
4945 (*ifp->if_input)(ifp, mp);
4947 #endif /* #ifdef QLNX_SOFT_LRO */
4951 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4953 next_cqe: /* don't consume bd rx buffer */
4954 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4955 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4957 /* CR TPA - revisit how to handle budget in TPA perhaps
4958 increase on "end" */
4959 if (rx_pkt == budget)
4961 } /* repeat while sw_comp_cons != hw_comp_cons... */
4963 /* Update producers */
4964 qlnx_update_rx_prod(p_hwfn, rxq);
4971 * fast path interrupt
4975 qlnx_fp_isr(void *arg)
4977 qlnx_ivec_t *ivec = arg;
4979 struct qlnx_fastpath *fp = NULL;
4984 if (ha->state != QLNX_STATE_OPEN) {
4988 idx = ivec->rss_idx;
4990 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4991 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4992 ha->err_illegal_intr++;
4995 fp = &ha->fp_array[idx];
5000 int rx_int = 0, total_rx_count = 0;
5002 struct qlnx_tx_queue *txq;
5005 lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
5007 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
5010 for (tc = 0; tc < ha->num_tc; tc++) {
5014 if((int)(elem_left =
5015 ecore_chain_get_elem_left(&txq->tx_pbl)) <
5016 QLNX_TX_ELEM_THRESH) {
5018 if (mtx_trylock(&fp->tx_mtx)) {
5019 #ifdef QLNX_TRACE_PERF_DATA
5020 tx_compl = fp->tx_pkts_completed;
5023 qlnx_tx_int(ha, fp, fp->txq[tc]);
5024 #ifdef QLNX_TRACE_PERF_DATA
5025 fp->tx_pkts_compl_intr +=
5026 (fp->tx_pkts_completed - tx_compl);
5027 if ((fp->tx_pkts_completed - tx_compl) <= 32)
5029 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
5030 ((fp->tx_pkts_completed - tx_compl) <= 64))
5032 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
5033 ((fp->tx_pkts_completed - tx_compl) <= 128))
5035 else if(((fp->tx_pkts_completed - tx_compl) > 128))
5038 mtx_unlock(&fp->tx_mtx);
5043 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
5047 fp->rx_pkts += rx_int;
5048 total_rx_count += rx_int;
5053 #ifdef QLNX_SOFT_LRO
5055 struct lro_ctrl *lro;
5057 lro = &fp->rxq->lro;
5059 if (lro_enable && total_rx_count) {
5061 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5063 #ifdef QLNX_TRACE_LRO_CNT
5064 if (lro->lro_mbuf_count & ~1023)
5066 else if (lro->lro_mbuf_count & ~511)
5068 else if (lro->lro_mbuf_count & ~255)
5070 else if (lro->lro_mbuf_count & ~127)
5072 else if (lro->lro_mbuf_count & ~63)
5074 #endif /* #ifdef QLNX_TRACE_LRO_CNT */
5076 tcp_lro_flush_all(lro);
5079 struct lro_entry *queued;
5081 while ((!SLIST_EMPTY(&lro->lro_active))) {
5082 queued = SLIST_FIRST(&lro->lro_active);
5083 SLIST_REMOVE_HEAD(&lro->lro_active, \
5085 tcp_lro_flush(lro, queued);
5087 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5090 #endif /* #ifdef QLNX_SOFT_LRO */
5092 ecore_sb_update_sb_idx(fp->sb_info);
5094 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
5102 * slow path interrupt processing function
5103 * can be invoked in polled mode or in interrupt mode via taskqueue.
5106 qlnx_sp_isr(void *arg)
5108 struct ecore_hwfn *p_hwfn;
5113 ha = (qlnx_host_t *)p_hwfn->p_dev;
5115 ha->sp_interrupts++;
5117 QL_DPRINT2(ha, "enter\n");
5119 ecore_int_sp_dpc(p_hwfn);
5121 QL_DPRINT2(ha, "exit\n");
5126 /*****************************************************************************
5127 * Support Functions for DMA'able Memory
5128 *****************************************************************************/
5131 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
5133 *((bus_addr_t *)arg) = 0;
5136 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
5140 *((bus_addr_t *)arg) = segs[0].ds_addr;
5146 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5154 ret = bus_dma_tag_create(
5155 ha->parent_tag,/* parent */
5157 ((bus_size_t)(1ULL << 32)),/* boundary */
5158 BUS_SPACE_MAXADDR, /* lowaddr */
5159 BUS_SPACE_MAXADDR, /* highaddr */
5160 NULL, NULL, /* filter, filterarg */
5161 dma_buf->size, /* maxsize */
5163 dma_buf->size, /* maxsegsize */
5165 NULL, NULL, /* lockfunc, lockarg */
5169 QL_DPRINT1(ha, "could not create dma tag\n");
5170 goto qlnx_alloc_dmabuf_exit;
5172 ret = bus_dmamem_alloc(dma_buf->dma_tag,
5173 (void **)&dma_buf->dma_b,
5174 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
5177 bus_dma_tag_destroy(dma_buf->dma_tag);
5178 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
5179 goto qlnx_alloc_dmabuf_exit;
5182 ret = bus_dmamap_load(dma_buf->dma_tag,
5186 qlnx_dmamap_callback,
5187 &b_addr, BUS_DMA_NOWAIT);
5189 if (ret || !b_addr) {
5190 bus_dma_tag_destroy(dma_buf->dma_tag);
5191 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
5194 goto qlnx_alloc_dmabuf_exit;
5197 dma_buf->dma_addr = b_addr;
5199 qlnx_alloc_dmabuf_exit:
5205 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5207 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
5208 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
5209 bus_dma_tag_destroy(dma_buf->dma_tag);
5214 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
5221 ha = (qlnx_host_t *)ecore_dev;
5224 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5226 memset(&dma_buf, 0, sizeof (qlnx_dma_t));
5228 dma_buf.size = size + PAGE_SIZE;
5229 dma_buf.alignment = 8;
5231 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
5233 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
5235 *phys = dma_buf.dma_addr;
5237 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
5239 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
5241 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5242 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
5243 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
5245 return (dma_buf.dma_b);
5249 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
5252 qlnx_dma_t dma_buf, *dma_p;
5256 ha = (qlnx_host_t *)ecore_dev;
5262 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5264 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
5266 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5267 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5268 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5272 if (!ha->qlnxr_debug)
5273 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
5278 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5286 * Allocate parent DMA Tag
5288 ret = bus_dma_tag_create(
5289 bus_get_dma_tag(dev), /* parent */
5290 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
5291 BUS_SPACE_MAXADDR, /* lowaddr */
5292 BUS_SPACE_MAXADDR, /* highaddr */
5293 NULL, NULL, /* filter, filterarg */
5294 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
5296 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5298 NULL, NULL, /* lockfunc, lockarg */
5302 QL_DPRINT1(ha, "could not create parent dma tag\n");
5306 ha->flags.parent_tag = 1;
5312 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5314 if (ha->parent_tag != NULL) {
5315 bus_dma_tag_destroy(ha->parent_tag);
5316 ha->parent_tag = NULL;
5322 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5324 if (bus_dma_tag_create(NULL, /* parent */
5325 1, 0, /* alignment, bounds */
5326 BUS_SPACE_MAXADDR, /* lowaddr */
5327 BUS_SPACE_MAXADDR, /* highaddr */
5328 NULL, NULL, /* filter, filterarg */
5329 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
5330 QLNX_MAX_SEGMENTS, /* nsegments */
5331 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */
5333 NULL, /* lockfunc */
5334 NULL, /* lockfuncarg */
5337 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5345 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5347 if (ha->tx_tag != NULL) {
5348 bus_dma_tag_destroy(ha->tx_tag);
5355 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5357 if (bus_dma_tag_create(NULL, /* parent */
5358 1, 0, /* alignment, bounds */
5359 BUS_SPACE_MAXADDR, /* lowaddr */
5360 BUS_SPACE_MAXADDR, /* highaddr */
5361 NULL, NULL, /* filter, filterarg */
5362 MJUM9BYTES, /* maxsize */
5364 MJUM9BYTES, /* maxsegsize */
5366 NULL, /* lockfunc */
5367 NULL, /* lockfuncarg */
5370 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5378 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5380 if (ha->rx_tag != NULL) {
5381 bus_dma_tag_destroy(ha->rx_tag);
5387 /*********************************
5388 * Exported functions
5389 *********************************/
5391 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
5395 bar_id = bar_id * 2;
5397 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5405 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
5407 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5413 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
5414 uint16_t *reg_value)
5416 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5422 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
5423 uint32_t *reg_value)
5425 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5431 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
5433 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5434 pci_reg, reg_value, 1);
5439 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
5442 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5443 pci_reg, reg_value, 2);
5448 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
5451 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5452 pci_reg, reg_value, 4);
5457 qlnx_pci_find_capability(void *ecore_dev, int cap)
5464 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5467 QL_DPRINT1(ha, "failed\n");
5473 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap)
5480 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5483 QL_DPRINT1(ha, "failed\n");
5489 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
5492 struct ecore_hwfn *p_hwfn;
5496 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5497 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5503 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5505 struct ecore_hwfn *p_hwfn = hwfn;
5507 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5508 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5514 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
5516 struct ecore_hwfn *p_hwfn = hwfn;
5518 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5519 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5524 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value)
5526 struct ecore_dev *cdev;
5527 struct ecore_hwfn *p_hwfn;
5532 cdev = p_hwfn->p_dev;
5534 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5535 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5541 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
5543 struct ecore_hwfn *p_hwfn = hwfn;
5545 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5546 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5552 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
5556 struct ecore_dev *cdev;
5558 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5559 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5561 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5567 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
5570 struct ecore_dev *cdev;
5572 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5573 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5575 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5581 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
5584 struct ecore_dev *cdev;
5586 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5587 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5589 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5594 qlnx_zalloc(uint32_t size)
5598 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
5600 return ((void *)va);
5604 qlnx_barrier(void *p_hwfn)
5608 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5609 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5613 qlnx_link_update(void *p_hwfn)
5616 int prev_link_state;
5618 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5620 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5622 prev_link_state = ha->link_up;
5623 ha->link_up = ha->if_link.link_up;
5625 if (prev_link_state != ha->link_up) {
5627 if_link_state_change(ha->ifp, LINK_STATE_UP);
5629 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5633 #ifdef CONFIG_ECORE_SRIOV
5635 if (qlnx_vf_device(ha) != 0) {
5636 if (ha->sriov_initialized)
5637 qlnx_inform_vf_link_state(p_hwfn, ha);
5640 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5641 #endif /* #ifdef QLNX_VF */
5647 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn,
5648 struct ecore_vf_acquire_sw_info *p_sw_info)
5650 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5651 (QLNX_VERSION_MINOR << 16) |
5653 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5659 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
5662 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info);
5668 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5669 struct qlnx_link_output *if_link)
5671 struct ecore_mcp_link_params link_params;
5672 struct ecore_mcp_link_state link_state;
5674 struct ecore_ptt *p_ptt = NULL;
5677 memset(if_link, 0, sizeof(*if_link));
5678 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
5679 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
5681 ha = (qlnx_host_t *)hwfn->p_dev;
5683 /* Prepare source inputs */
5684 /* we only deal with physical functions */
5685 if (qlnx_vf_device(ha) != 0) {
5687 p_ptt = ecore_ptt_acquire(hwfn);
5689 if (p_ptt == NULL) {
5690 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5694 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5695 ecore_ptt_release(hwfn, p_ptt);
5697 memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
5698 sizeof(link_params));
5699 memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
5700 sizeof(link_state));
5702 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5703 ecore_vf_read_bulletin(hwfn, &p_change);
5704 ecore_vf_get_link_params(hwfn, &link_params);
5705 ecore_vf_get_link_state(hwfn, &link_state);
5708 /* Set the link parameters to pass to protocol driver */
5709 if (link_state.link_up) {
5710 if_link->link_up = true;
5711 if_link->speed = link_state.speed;
5714 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5716 if (link_params.speed.autoneg)
5717 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5719 if (link_params.pause.autoneg ||
5720 (link_params.pause.forced_rx && link_params.pause.forced_tx))
5721 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5723 if (link_params.pause.autoneg || link_params.pause.forced_rx ||
5724 link_params.pause.forced_tx)
5725 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5727 if (link_params.speed.advertised_speeds &
5728 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
5729 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5730 QLNX_LINK_CAP_1000baseT_Full;
5732 if (link_params.speed.advertised_speeds &
5733 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
5734 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5736 if (link_params.speed.advertised_speeds &
5737 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
5738 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5740 if (link_params.speed.advertised_speeds &
5741 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
5742 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5744 if (link_params.speed.advertised_speeds &
5745 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
5746 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5748 if (link_params.speed.advertised_speeds &
5749 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
5750 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5752 if_link->advertised_caps = if_link->supported_caps;
5754 if_link->autoneg = link_params.speed.autoneg;
5755 if_link->duplex = QLNX_LINK_DUPLEX;
5757 /* Link partner capabilities */
5759 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
5760 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5762 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
5763 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5765 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
5766 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5768 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
5769 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5771 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
5772 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5774 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
5775 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5777 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
5778 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5780 if (link_state.an_complete)
5781 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5783 if (link_state.partner_adv_pause)
5784 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5786 if ((link_state.partner_adv_pause ==
5787 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
5788 (link_state.partner_adv_pause ==
5789 ECORE_LINK_PARTNER_BOTH_PAUSE))
5790 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5796 qlnx_schedule_recovery(void *p_hwfn)
5800 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5802 if (qlnx_vf_device(ha) != 0) {
5803 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5810 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
5814 for (i = 0; i < cdev->num_hwfns; i++) {
5815 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5816 p_hwfn->pf_params = *func_params;
5818 #ifdef QLNX_ENABLE_IWARP
5819 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) {
5820 p_hwfn->using_ll2 = true;
5822 #endif /* #ifdef QLNX_ENABLE_IWARP */
5826 rc = ecore_resc_alloc(cdev);
5828 goto qlnx_nic_setup_exit;
5830 ecore_resc_setup(cdev);
5832 qlnx_nic_setup_exit:
5838 qlnx_nic_start(struct ecore_dev *cdev)
5841 struct ecore_hw_init_params params;
5843 bzero(¶ms, sizeof (struct ecore_hw_init_params));
5845 params.p_tunn = NULL;
5846 params.b_hw_start = true;
5847 params.int_mode = cdev->int_mode;
5848 params.allow_npar_tx_switch = true;
5849 params.bin_fw_data = NULL;
5851 rc = ecore_hw_init(cdev, ¶ms);
5853 ecore_resc_free(cdev);
5861 qlnx_slowpath_start(qlnx_host_t *ha)
5863 struct ecore_dev *cdev;
5864 struct ecore_pf_params pf_params;
5867 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
5868 pf_params.eth_pf_params.num_cons =
5869 (ha->num_rss) * (ha->num_tc + 1);
5871 #ifdef QLNX_ENABLE_IWARP
5872 if (qlnx_vf_device(ha) != 0) {
5873 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5874 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5875 pf_params.rdma_pf_params.num_qps = 1024;
5876 pf_params.rdma_pf_params.num_srqs = 1024;
5877 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5878 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP;
5879 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5880 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5881 pf_params.rdma_pf_params.num_qps = 8192;
5882 pf_params.rdma_pf_params.num_srqs = 8192;
5883 //pf_params.rdma_pf_params.min_dpis = 0;
5884 pf_params.rdma_pf_params.min_dpis = 8;
5885 pf_params.rdma_pf_params.roce_edpm_mode = 0;
5886 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
5887 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE;
5890 #endif /* #ifdef QLNX_ENABLE_IWARP */
5894 rc = qlnx_nic_setup(cdev, &pf_params);
5896 goto qlnx_slowpath_start_exit;
5898 cdev->int_mode = ECORE_INT_MODE_MSIX;
5899 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5901 #ifdef QLNX_MAX_COALESCE
5902 cdev->rx_coalesce_usecs = 255;
5903 cdev->tx_coalesce_usecs = 255;
5906 rc = qlnx_nic_start(cdev);
5908 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5909 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5911 #ifdef QLNX_USER_LLDP
5912 (void)qlnx_set_lldp_tlvx(ha, NULL);
5913 #endif /* #ifdef QLNX_USER_LLDP */
5915 qlnx_slowpath_start_exit:
5921 qlnx_slowpath_stop(qlnx_host_t *ha)
5923 struct ecore_dev *cdev;
5924 device_t dev = ha->pci_dev;
5929 ecore_hw_stop(cdev);
5931 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5933 if (ha->sp_handle[i])
5934 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5937 ha->sp_handle[i] = NULL;
5940 (void) bus_release_resource(dev, SYS_RES_IRQ,
5941 ha->sp_irq_rid[i], ha->sp_irq[i]);
5942 ha->sp_irq[i] = NULL;
5945 ecore_resc_free(cdev);
5951 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5952 char ver_str[VER_SIZE])
5956 memcpy(cdev->name, name, NAME_SIZE);
5958 for_each_hwfn(cdev, i) {
5959 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5962 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5968 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5970 enum ecore_mcp_protocol_type type;
5971 union ecore_mcp_protocol_stats *stats;
5972 struct ecore_eth_stats eth_stats;
5976 stats = proto_stats;
5981 case ECORE_MCP_LAN_STATS:
5982 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats);
5983 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5984 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5985 stats->lan_stats.fcs_err = -1;
5989 ha->err_get_proto_invalid_type++;
5991 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5998 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
6000 struct ecore_hwfn *p_hwfn;
6001 struct ecore_ptt *p_ptt;
6003 p_hwfn = &ha->cdev.hwfns[0];
6004 p_ptt = ecore_ptt_acquire(p_hwfn);
6006 if (p_ptt == NULL) {
6007 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6010 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
6012 ecore_ptt_release(p_hwfn, p_ptt);
6018 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
6020 struct ecore_hwfn *p_hwfn;
6021 struct ecore_ptt *p_ptt;
6023 p_hwfn = &ha->cdev.hwfns[0];
6024 p_ptt = ecore_ptt_acquire(p_hwfn);
6026 if (p_ptt == NULL) {
6027 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
6030 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
6032 ecore_ptt_release(p_hwfn, p_ptt);
6038 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
6040 struct ecore_dev *cdev;
6044 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
6045 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
6046 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
6052 qlnx_init_fp(qlnx_host_t *ha)
6054 int rss_id, txq_array_index, tc;
6056 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6058 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6060 fp->rss_id = rss_id;
6062 fp->sb_info = &ha->sb_array[rss_id];
6063 fp->rxq = &ha->rxq_array[rss_id];
6064 fp->rxq->rxq_id = rss_id;
6066 for (tc = 0; tc < ha->num_tc; tc++) {
6067 txq_array_index = tc * ha->num_rss + rss_id;
6068 fp->txq[tc] = &ha->txq_array[txq_array_index];
6069 fp->txq[tc]->index = txq_array_index;
6072 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
6075 fp->tx_ring_full = 0;
6077 /* reset all the statistics counters */
6079 fp->tx_pkts_processed = 0;
6080 fp->tx_pkts_freed = 0;
6081 fp->tx_pkts_transmitted = 0;
6082 fp->tx_pkts_completed = 0;
6084 #ifdef QLNX_TRACE_PERF_DATA
6085 fp->tx_pkts_trans_ctx = 0;
6086 fp->tx_pkts_compl_ctx = 0;
6087 fp->tx_pkts_trans_fp = 0;
6088 fp->tx_pkts_compl_fp = 0;
6089 fp->tx_pkts_compl_intr = 0;
6091 fp->tx_lso_wnd_min_len = 0;
6093 fp->tx_nsegs_gt_elem_left = 0;
6094 fp->tx_tso_max_nsegs = 0;
6095 fp->tx_tso_min_nsegs = 0;
6096 fp->err_tx_nsegs_gt_elem_left = 0;
6097 fp->err_tx_dmamap_create = 0;
6098 fp->err_tx_defrag_dmamap_load = 0;
6099 fp->err_tx_non_tso_max_seg = 0;
6100 fp->err_tx_dmamap_load = 0;
6101 fp->err_tx_defrag = 0;
6102 fp->err_tx_free_pkt_null = 0;
6103 fp->err_tx_cons_idx_conflict = 0;
6106 fp->err_m_getcl = 0;
6107 fp->err_m_getjcl = 0;
6113 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
6115 struct ecore_dev *cdev;
6119 if (sb_info->sb_virt) {
6120 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
6121 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
6122 sb_info->sb_virt = NULL;
6127 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
6128 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
6130 struct ecore_hwfn *p_hwfn;
6134 hwfn_index = sb_id % cdev->num_hwfns;
6135 p_hwfn = &cdev->hwfns[hwfn_index];
6136 rel_sb_id = sb_id / cdev->num_hwfns;
6138 QL_DPRINT2(((qlnx_host_t *)cdev),
6139 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
6140 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
6141 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
6142 sb_virt_addr, (void *)sb_phy_addr);
6144 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
6145 sb_virt_addr, sb_phy_addr, rel_sb_id);
6150 /* This function allocates fast-path status block memory */
6152 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
6154 struct status_block_e4 *sb_virt;
6158 struct ecore_dev *cdev;
6162 size = sizeof(*sb_virt);
6163 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
6166 QL_DPRINT1(ha, "Status block allocation failed\n");
6170 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
6172 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
6179 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6182 struct sw_rx_data *rx_buf;
6184 for (i = 0; i < rxq->num_rx_buffers; i++) {
6186 rx_buf = &rxq->sw_rx_ring[i];
6188 if (rx_buf->data != NULL) {
6189 if (rx_buf->map != NULL) {
6190 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6191 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6194 m_freem(rx_buf->data);
6195 rx_buf->data = NULL;
6202 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6204 struct ecore_dev *cdev;
6209 qlnx_free_rx_buffers(ha, rxq);
6211 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6212 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6213 if (rxq->tpa_info[i].mpf != NULL)
6214 m_freem(rxq->tpa_info[i].mpf);
6217 bzero((void *)&rxq->sw_rx_ring[0],
6218 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6220 /* Free the real RQ ring used by FW */
6221 if (rxq->rx_bd_ring.p_virt_addr) {
6222 ecore_chain_free(cdev, &rxq->rx_bd_ring);
6223 rxq->rx_bd_ring.p_virt_addr = NULL;
6226 /* Free the real completion ring used by FW */
6227 if (rxq->rx_comp_ring.p_virt_addr &&
6228 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
6229 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6230 rxq->rx_comp_ring.p_virt_addr = NULL;
6231 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6234 #ifdef QLNX_SOFT_LRO
6236 struct lro_ctrl *lro;
6241 #endif /* #ifdef QLNX_SOFT_LRO */
6247 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6249 register struct mbuf *mp;
6250 uint16_t rx_buf_size;
6251 struct sw_rx_data *sw_rx_data;
6252 struct eth_rx_bd *rx_bd;
6253 dma_addr_t dma_addr;
6255 bus_dma_segment_t segs[1];
6258 struct ecore_dev *cdev;
6262 rx_buf_size = rxq->rx_buf_size;
6264 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6267 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6271 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6273 map = (bus_dmamap_t)0;
6275 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6277 dma_addr = segs[0].ds_addr;
6279 if (ret || !dma_addr || (nsegs != 1)) {
6281 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6282 ret, (long long unsigned int)dma_addr, nsegs);
6286 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6287 sw_rx_data->data = mp;
6288 sw_rx_data->dma_addr = dma_addr;
6289 sw_rx_data->map = map;
6291 /* Advance PROD and get BD pointer */
6292 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6293 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6294 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6295 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6297 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6303 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6304 struct qlnx_agg_info *tpa)
6307 dma_addr_t dma_addr;
6309 bus_dma_segment_t segs[1];
6312 struct sw_rx_data *rx_buf;
6314 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
6317 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6321 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6323 map = (bus_dmamap_t)0;
6325 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6327 dma_addr = segs[0].ds_addr;
6329 if (ret || !dma_addr || (nsegs != 1)) {
6331 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6332 ret, (long long unsigned int)dma_addr, nsegs);
6336 rx_buf = &tpa->rx_buf;
6338 memset(rx_buf, 0, sizeof (struct sw_rx_data));
6341 rx_buf->dma_addr = dma_addr;
6344 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6350 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6352 struct sw_rx_data *rx_buf;
6354 rx_buf = &tpa->rx_buf;
6356 if (rx_buf->data != NULL) {
6357 if (rx_buf->map != NULL) {
6358 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6359 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6362 m_freem(rx_buf->data);
6363 rx_buf->data = NULL;
6368 /* This function allocates all memory needed per Rx queue */
6370 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6372 int i, rc, num_allocated;
6374 struct ecore_dev *cdev;
6379 rxq->num_rx_buffers = RX_RING_SIZE;
6381 rxq->rx_buf_size = ha->rx_buf_size;
6383 /* Allocate the parallel driver ring for Rx buffers */
6384 bzero((void *)&rxq->sw_rx_ring[0],
6385 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
6387 /* Allocate FW Rx ring */
6389 rc = ecore_chain_alloc(cdev,
6390 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6391 ECORE_CHAIN_MODE_NEXT_PTR,
6392 ECORE_CHAIN_CNT_TYPE_U16,
6394 sizeof(struct eth_rx_bd),
6395 &rxq->rx_bd_ring, NULL);
6400 /* Allocate FW completion ring */
6401 rc = ecore_chain_alloc(cdev,
6402 ECORE_CHAIN_USE_TO_CONSUME,
6403 ECORE_CHAIN_MODE_PBL,
6404 ECORE_CHAIN_CNT_TYPE_U16,
6406 sizeof(union eth_rx_cqe),
6407 &rxq->rx_comp_ring, NULL);
6412 /* Allocate buffers for the Rx ring */
6414 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
6415 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6422 for (i = 0; i < rxq->num_rx_buffers; i++) {
6423 rc = qlnx_alloc_rx_buffer(ha, rxq);
6428 if (!num_allocated) {
6429 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6431 } else if (num_allocated < rxq->num_rx_buffers) {
6432 QL_DPRINT1(ha, "Allocated less buffers than"
6433 " desired (%d allocated)\n", num_allocated);
6436 #ifdef QLNX_SOFT_LRO
6439 struct lro_ctrl *lro;
6443 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6444 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6445 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6450 if (tcp_lro_init(lro)) {
6451 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6455 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6459 #endif /* #ifdef QLNX_SOFT_LRO */
6463 qlnx_free_mem_rxq(ha, rxq);
6469 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6470 struct qlnx_tx_queue *txq)
6472 struct ecore_dev *cdev;
6476 bzero((void *)&txq->sw_tx_ring[0],
6477 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6479 /* Free the real RQ ring used by FW */
6480 if (txq->tx_pbl.p_virt_addr) {
6481 ecore_chain_free(cdev, &txq->tx_pbl);
6482 txq->tx_pbl.p_virt_addr = NULL;
6487 /* This function allocates all memory needed per Tx queue */
6489 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6490 struct qlnx_tx_queue *txq)
6492 int ret = ECORE_SUCCESS;
6493 union eth_tx_bd_types *p_virt;
6494 struct ecore_dev *cdev;
6498 bzero((void *)&txq->sw_tx_ring[0],
6499 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
6501 /* Allocate the real Tx ring to be used by FW */
6502 ret = ecore_chain_alloc(cdev,
6503 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
6504 ECORE_CHAIN_MODE_PBL,
6505 ECORE_CHAIN_CNT_TYPE_U16,
6508 &txq->tx_pbl, NULL);
6510 if (ret != ECORE_SUCCESS) {
6514 txq->num_tx_buffers = TX_RING_SIZE;
6519 qlnx_free_mem_txq(ha, fp, txq);
6524 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6527 struct ifnet *ifp = ha->ifp;
6529 if (mtx_initialized(&fp->tx_mtx)) {
6531 if (fp->tx_br != NULL) {
6533 mtx_lock(&fp->tx_mtx);
6535 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6536 fp->tx_pkts_freed++;
6540 mtx_unlock(&fp->tx_mtx);
6542 buf_ring_free(fp->tx_br, M_DEVBUF);
6545 mtx_destroy(&fp->tx_mtx);
6551 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6555 qlnx_free_mem_sb(ha, fp->sb_info);
6557 qlnx_free_mem_rxq(ha, fp->rxq);
6559 for (tc = 0; tc < ha->num_tc; tc++)
6560 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6566 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6568 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6569 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6571 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6573 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6574 M_NOWAIT, &fp->tx_mtx);
6575 if (fp->tx_br == NULL) {
6576 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6577 ha->dev_unit, fp->rss_id);
6584 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6588 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6592 if (ha->rx_jumbo_buf_eq_mtu) {
6593 if (ha->max_frame_size <= MCLBYTES)
6594 ha->rx_buf_size = MCLBYTES;
6595 else if (ha->max_frame_size <= MJUMPAGESIZE)
6596 ha->rx_buf_size = MJUMPAGESIZE;
6597 else if (ha->max_frame_size <= MJUM9BYTES)
6598 ha->rx_buf_size = MJUM9BYTES;
6599 else if (ha->max_frame_size <= MJUM16BYTES)
6600 ha->rx_buf_size = MJUM16BYTES;
6602 if (ha->max_frame_size <= MCLBYTES)
6603 ha->rx_buf_size = MCLBYTES;
6605 ha->rx_buf_size = MJUMPAGESIZE;
6608 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6612 for (tc = 0; tc < ha->num_tc; tc++) {
6613 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6621 qlnx_free_mem_fp(ha, fp);
6626 qlnx_free_mem_load(qlnx_host_t *ha)
6629 struct ecore_dev *cdev;
6633 for (i = 0; i < ha->num_rss; i++) {
6634 struct qlnx_fastpath *fp = &ha->fp_array[i];
6636 qlnx_free_mem_fp(ha, fp);
6642 qlnx_alloc_mem_load(qlnx_host_t *ha)
6646 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6647 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6649 rc = qlnx_alloc_mem_fp(ha, fp);
6657 qlnx_start_vport(struct ecore_dev *cdev,
6661 u8 inner_vlan_removal_en_flg,
6666 struct ecore_sp_vport_start_params vport_start_params = { 0 };
6669 ha = (qlnx_host_t *)cdev;
6671 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
6672 vport_start_params.tx_switching = 0;
6673 vport_start_params.handle_ptp_pkts = 0;
6674 vport_start_params.only_untagged = 0;
6675 vport_start_params.drop_ttl0 = drop_ttl0_flg;
6677 vport_start_params.tpa_mode =
6678 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
6679 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6681 vport_start_params.vport_id = vport_id;
6682 vport_start_params.mtu = mtu;
6685 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6687 for_each_hwfn(cdev, i) {
6688 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6690 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6691 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6693 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
6696 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6697 " with MTU %d\n" , vport_id, mtu);
6701 ecore_hw_start_fastpath(p_hwfn);
6703 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6711 qlnx_update_vport(struct ecore_dev *cdev,
6712 struct qlnx_update_vport_params *params)
6714 struct ecore_sp_vport_update_params sp_params;
6715 int rc, i, j, fp_index;
6716 struct ecore_hwfn *p_hwfn;
6717 struct ecore_rss_params *rss;
6718 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6719 struct qlnx_fastpath *fp;
6721 memset(&sp_params, 0, sizeof(sp_params));
6722 /* Translate protocol params into sp params */
6723 sp_params.vport_id = params->vport_id;
6725 sp_params.update_vport_active_rx_flg =
6726 params->update_vport_active_rx_flg;
6727 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6729 sp_params.update_vport_active_tx_flg =
6730 params->update_vport_active_tx_flg;
6731 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6733 sp_params.update_inner_vlan_removal_flg =
6734 params->update_inner_vlan_removal_flg;
6735 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6737 sp_params.sge_tpa_params = params->sge_tpa_params;
6739 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6740 * We need to re-fix the rss values per engine for CMT.
6742 if (params->rss_params->update_rss_config)
6743 sp_params.rss_params = params->rss_params;
6745 sp_params.rss_params = NULL;
6747 for_each_hwfn(cdev, i) {
6749 p_hwfn = &cdev->hwfns[i];
6751 if ((cdev->num_hwfns > 1) &&
6752 params->rss_params->update_rss_config &&
6753 params->rss_params->rss_enable) {
6755 rss = params->rss_params;
6757 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
6759 fp_index = ((cdev->num_hwfns * j) + i) %
6762 fp = &ha->fp_array[fp_index];
6763 rss->rss_ind_table[j] = fp->rxq->handle;
6766 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
6767 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6768 rss->rss_ind_table[j],
6769 rss->rss_ind_table[j+1],
6770 rss->rss_ind_table[j+2],
6771 rss->rss_ind_table[j+3],
6772 rss->rss_ind_table[j+4],
6773 rss->rss_ind_table[j+5],
6774 rss->rss_ind_table[j+6],
6775 rss->rss_ind_table[j+7]);
6780 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6782 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6784 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
6785 ECORE_SPQ_MODE_EBLOCK, NULL);
6787 QL_DPRINT1(ha, "Failed to update VPORT\n");
6791 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6792 rx_active_flag %d [tx_update %d], [rx_update %d]\n",
6793 params->vport_id, params->vport_active_tx_flg,
6794 params->vport_active_rx_flg,
6795 params->update_vport_active_tx_flg,
6796 params->update_vport_active_rx_flg);
6803 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6805 struct eth_rx_bd *rx_bd_cons =
6806 ecore_chain_consume(&rxq->rx_bd_ring);
6807 struct eth_rx_bd *rx_bd_prod =
6808 ecore_chain_produce(&rxq->rx_bd_ring);
6809 struct sw_rx_data *sw_rx_data_cons =
6810 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6811 struct sw_rx_data *sw_rx_data_prod =
6812 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6814 sw_rx_data_prod->data = sw_rx_data_cons->data;
6815 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
6817 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6818 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6824 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6830 struct eth_rx_prod_data rx_prod_data;
6834 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6835 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6837 /* Update producers */
6838 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
6839 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
6841 /* Make sure that the BD and SGE data is updated before updating the
6842 * producers since FW might read the BD/SGE right after the producer
6847 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6848 sizeof(rx_prods), &rx_prods.data32);
6850 /* mmiowb is needed to synchronize doorbell writes from more than one
6851 * processor. It guarantees that the write arrives to the device before
6852 * the napi lock is released and another qlnx_poll is called (possibly
6853 * on another CPU). Without this barrier, the next doorbell can bypass
6854 * this doorbell. This is applicable to IA64/Altix systems.
6861 static uint32_t qlnx_hash_key[] = {
6862 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
6863 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
6864 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
6865 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
6866 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
6867 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
6868 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
6869 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
6870 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
6871 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
6874 qlnx_start_queues(qlnx_host_t *ha)
6876 int rc, tc, i, vport_id = 0,
6877 drop_ttl0_flg = 1, vlan_removal_en = 1,
6878 tx_switching = 0, hw_lro_enable = 0;
6879 struct ecore_dev *cdev = &ha->cdev;
6880 struct ecore_rss_params *rss_params = &ha->rss_params;
6881 struct qlnx_update_vport_params vport_update_params;
6883 struct ecore_hwfn *p_hwfn;
6884 struct ecore_sge_tpa_params tpa_params;
6885 struct ecore_queue_start_common_params qparams;
6886 struct qlnx_fastpath *fp;
6890 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6893 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6894 " are no Rx queues\n");
6898 #ifndef QLNX_SOFT_LRO
6899 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6900 #endif /* #ifndef QLNX_SOFT_LRO */
6902 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6903 vlan_removal_en, tx_switching, hw_lro_enable);
6906 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6910 QL_DPRINT2(ha, "Start vport ramrod passed, "
6911 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6912 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6915 struct ecore_rxq_start_ret_params rx_ret_params;
6916 struct ecore_txq_start_ret_params tx_ret_params;
6918 fp = &ha->fp_array[i];
6919 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6921 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6922 bzero(&rx_ret_params,
6923 sizeof (struct ecore_rxq_start_ret_params));
6925 qparams.queue_id = i ;
6926 qparams.vport_id = vport_id;
6927 qparams.stats_id = vport_id;
6928 qparams.p_sb = fp->sb_info;
6929 qparams.sb_idx = RX_PI;
6932 rc = ecore_eth_rx_queue_start(p_hwfn,
6933 p_hwfn->hw_info.opaque_fid,
6935 fp->rxq->rx_buf_size, /* bd_max_bytes */
6936 /* bd_chain_phys_addr */
6937 fp->rxq->rx_bd_ring.p_phys_addr,
6939 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6941 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6945 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6949 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6950 fp->rxq->handle = rx_ret_params.p_handle;
6951 fp->rxq->hw_cons_ptr =
6952 &fp->sb_info->sb_virt->pi_array[RX_PI];
6954 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6956 for (tc = 0; tc < ha->num_tc; tc++) {
6957 struct qlnx_tx_queue *txq = fp->txq[tc];
6960 sizeof(struct ecore_queue_start_common_params));
6961 bzero(&tx_ret_params,
6962 sizeof (struct ecore_txq_start_ret_params));
6964 qparams.queue_id = txq->index / cdev->num_hwfns ;
6965 qparams.vport_id = vport_id;
6966 qparams.stats_id = vport_id;
6967 qparams.p_sb = fp->sb_info;
6968 qparams.sb_idx = TX_PI(tc);
6970 rc = ecore_eth_tx_queue_start(p_hwfn,
6971 p_hwfn->hw_info.opaque_fid,
6973 /* bd_chain_phys_addr */
6974 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6975 ecore_chain_get_page_cnt(&txq->tx_pbl),
6979 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6984 txq->doorbell_addr = tx_ret_params.p_doorbell;
6985 txq->handle = tx_ret_params.p_handle;
6988 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6989 SET_FIELD(txq->tx_db.data.params,
6990 ETH_DB_DATA_DEST, DB_DEST_XCM);
6991 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6993 SET_FIELD(txq->tx_db.data.params,
6994 ETH_DB_DATA_AGG_VAL_SEL,
6995 DQ_XCM_ETH_TX_BD_PROD_CMD);
6997 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
7001 /* Fill struct with RSS params */
7002 if (ha->num_rss > 1) {
7004 rss_params->update_rss_config = 1;
7005 rss_params->rss_enable = 1;
7006 rss_params->update_rss_capabilities = 1;
7007 rss_params->update_rss_ind_table = 1;
7008 rss_params->update_rss_key = 1;
7009 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
7010 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
7011 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
7013 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
7014 fp = &ha->fp_array[(i % ha->num_rss)];
7015 rss_params->rss_ind_table[i] = fp->rxq->handle;
7018 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
7019 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
7022 memset(rss_params, 0, sizeof(*rss_params));
7026 /* Prepare and send the vport enable */
7027 memset(&vport_update_params, 0, sizeof(vport_update_params));
7028 vport_update_params.vport_id = vport_id;
7029 vport_update_params.update_vport_active_tx_flg = 1;
7030 vport_update_params.vport_active_tx_flg = 1;
7031 vport_update_params.update_vport_active_rx_flg = 1;
7032 vport_update_params.vport_active_rx_flg = 1;
7033 vport_update_params.rss_params = rss_params;
7034 vport_update_params.update_inner_vlan_removal_flg = 1;
7035 vport_update_params.inner_vlan_removal_flg = 1;
7037 if (hw_lro_enable) {
7038 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
7040 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
7042 tpa_params.update_tpa_en_flg = 1;
7043 tpa_params.tpa_ipv4_en_flg = 1;
7044 tpa_params.tpa_ipv6_en_flg = 1;
7046 tpa_params.update_tpa_param_flg = 1;
7047 tpa_params.tpa_pkt_split_flg = 0;
7048 tpa_params.tpa_hdr_data_split_flg = 0;
7049 tpa_params.tpa_gro_consistent_flg = 0;
7050 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
7051 tpa_params.tpa_max_size = (uint16_t)(-1);
7052 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
7053 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
7055 vport_update_params.sge_tpa_params = &tpa_params;
7058 rc = qlnx_update_vport(cdev, &vport_update_params);
7060 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
7068 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
7069 struct qlnx_tx_queue *txq)
7071 uint16_t hw_bd_cons;
7072 uint16_t ecore_cons_idx;
7074 QL_DPRINT2(ha, "enter\n");
7076 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7078 while (hw_bd_cons !=
7079 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
7081 mtx_lock(&fp->tx_mtx);
7083 (void)qlnx_tx_int(ha, fp, txq);
7085 mtx_unlock(&fp->tx_mtx);
7087 qlnx_mdelay(__func__, 2);
7089 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
7092 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
7098 qlnx_stop_queues(qlnx_host_t *ha)
7100 struct qlnx_update_vport_params vport_update_params;
7101 struct ecore_dev *cdev;
7102 struct qlnx_fastpath *fp;
7107 /* Disable the vport */
7109 memset(&vport_update_params, 0, sizeof(vport_update_params));
7111 vport_update_params.vport_id = 0;
7112 vport_update_params.update_vport_active_tx_flg = 1;
7113 vport_update_params.vport_active_tx_flg = 0;
7114 vport_update_params.update_vport_active_rx_flg = 1;
7115 vport_update_params.vport_active_rx_flg = 0;
7116 vport_update_params.rss_params = &ha->rss_params;
7117 vport_update_params.rss_params->update_rss_config = 0;
7118 vport_update_params.rss_params->rss_enable = 0;
7119 vport_update_params.update_inner_vlan_removal_flg = 0;
7120 vport_update_params.inner_vlan_removal_flg = 0;
7122 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
7124 rc = qlnx_update_vport(cdev, &vport_update_params);
7126 QL_DPRINT1(ha, "Failed to update vport\n");
7130 /* Flush Tx queues. If needed, request drain from MCP */
7132 fp = &ha->fp_array[i];
7134 for (tc = 0; tc < ha->num_tc; tc++) {
7135 struct qlnx_tx_queue *txq = fp->txq[tc];
7137 rc = qlnx_drain_txq(ha, fp, txq);
7143 /* Stop all Queues in reverse order*/
7144 for (i = ha->num_rss - 1; i >= 0; i--) {
7146 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
7148 fp = &ha->fp_array[i];
7150 /* Stop the Tx Queue(s)*/
7151 for (tc = 0; tc < ha->num_tc; tc++) {
7154 tx_queue_id = tc * ha->num_rss + i;
7155 rc = ecore_eth_tx_queue_stop(p_hwfn,
7156 fp->txq[tc]->handle);
7159 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
7165 /* Stop the Rx Queue*/
7166 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
7169 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
7174 /* Stop the vport */
7175 for_each_hwfn(cdev, i) {
7177 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
7179 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
7182 QL_DPRINT1(ha, "Failed to stop VPORT\n");
7191 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
7192 enum ecore_filter_opcode opcode,
7193 unsigned char mac[ETH_ALEN])
7195 struct ecore_filter_ucast ucast;
7196 struct ecore_dev *cdev;
7201 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7203 ucast.opcode = opcode;
7204 ucast.type = ECORE_FILTER_MAC;
7205 ucast.is_rx_filter = 1;
7206 ucast.vport_to_add_to = 0;
7207 memcpy(&ucast.mac[0], mac, ETH_ALEN);
7209 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7215 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
7217 struct ecore_filter_ucast ucast;
7218 struct ecore_dev *cdev;
7221 bzero(&ucast, sizeof(struct ecore_filter_ucast));
7223 ucast.opcode = ECORE_FILTER_REPLACE;
7224 ucast.type = ECORE_FILTER_MAC;
7225 ucast.is_rx_filter = 1;
7229 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
7235 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
7237 struct ecore_filter_mcast *mcast;
7238 struct ecore_dev *cdev;
7243 mcast = &ha->ecore_mcast;
7244 bzero(mcast, sizeof(struct ecore_filter_mcast));
7246 mcast->opcode = ECORE_FILTER_REMOVE;
7248 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
7250 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
7251 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
7252 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
7254 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7255 mcast->num_mc_addrs++;
7258 mcast = &ha->ecore_mcast;
7260 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
7262 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7269 qlnx_clean_filters(qlnx_host_t *ha)
7273 /* Remove all unicast macs */
7274 rc = qlnx_remove_all_ucast_mac(ha);
7278 /* Remove all multicast macs */
7279 rc = qlnx_remove_all_mcast_mac(ha);
7283 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7289 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7291 struct ecore_filter_accept_flags accept;
7293 struct ecore_dev *cdev;
7297 bzero(&accept, sizeof(struct ecore_filter_accept_flags));
7299 accept.update_rx_mode_config = 1;
7300 accept.rx_accept_filter = filter;
7302 accept.update_tx_mode_config = 1;
7303 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
7304 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
7306 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
7307 ECORE_SPQ_MODE_CB, NULL);
7313 qlnx_set_rx_mode(qlnx_host_t *ha)
7318 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7322 rc = qlnx_remove_all_mcast_mac(ha);
7326 filter = ECORE_ACCEPT_UCAST_MATCHED |
7327 ECORE_ACCEPT_MCAST_MATCHED |
7330 if (qlnx_vf_device(ha) == 0) {
7331 filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
7332 filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
7334 ha->filter = filter;
7336 rc = qlnx_set_rx_accept_filter(ha, filter);
7342 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7345 struct ecore_dev *cdev;
7346 struct ecore_hwfn *hwfn;
7347 struct ecore_ptt *ptt;
7349 if (qlnx_vf_device(ha) == 0)
7354 for_each_hwfn(cdev, i) {
7356 hwfn = &cdev->hwfns[i];
7358 ptt = ecore_ptt_acquire(hwfn);
7362 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
7364 ecore_ptt_release(hwfn, ptt);
7372 #if __FreeBSD_version >= 1100000
7374 qlnx_get_counter(if_t ifp, ift_counter cnt)
7379 ha = (qlnx_host_t *)if_getsoftc(ifp);
7383 case IFCOUNTER_IPACKETS:
7384 count = ha->hw_stats.common.rx_ucast_pkts +
7385 ha->hw_stats.common.rx_mcast_pkts +
7386 ha->hw_stats.common.rx_bcast_pkts;
7389 case IFCOUNTER_IERRORS:
7390 count = ha->hw_stats.common.rx_crc_errors +
7391 ha->hw_stats.common.rx_align_errors +
7392 ha->hw_stats.common.rx_oversize_packets +
7393 ha->hw_stats.common.rx_undersize_packets;
7396 case IFCOUNTER_OPACKETS:
7397 count = ha->hw_stats.common.tx_ucast_pkts +
7398 ha->hw_stats.common.tx_mcast_pkts +
7399 ha->hw_stats.common.tx_bcast_pkts;
7402 case IFCOUNTER_OERRORS:
7403 count = ha->hw_stats.common.tx_err_drop_pkts;
7406 case IFCOUNTER_COLLISIONS:
7409 case IFCOUNTER_IBYTES:
7410 count = ha->hw_stats.common.rx_ucast_bytes +
7411 ha->hw_stats.common.rx_mcast_bytes +
7412 ha->hw_stats.common.rx_bcast_bytes;
7415 case IFCOUNTER_OBYTES:
7416 count = ha->hw_stats.common.tx_ucast_bytes +
7417 ha->hw_stats.common.tx_mcast_bytes +
7418 ha->hw_stats.common.tx_bcast_bytes;
7421 case IFCOUNTER_IMCASTS:
7422 count = ha->hw_stats.common.rx_mcast_bytes;
7425 case IFCOUNTER_OMCASTS:
7426 count = ha->hw_stats.common.tx_mcast_bytes;
7429 case IFCOUNTER_IQDROPS:
7430 case IFCOUNTER_OQDROPS:
7431 case IFCOUNTER_NOPROTO:
7434 return (if_get_counter_default(ifp, cnt));
7442 qlnx_timer(void *arg)
7446 ha = (qlnx_host_t *)arg;
7448 if (ha->error_recovery) {
7449 ha->error_recovery = 0;
7450 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7454 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7456 if (ha->storm_stats_gather)
7457 qlnx_sample_storm_stats(ha);
7459 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7465 qlnx_load(qlnx_host_t *ha)
7469 struct ecore_dev *cdev;
7475 QL_DPRINT2(ha, "enter\n");
7477 rc = qlnx_alloc_mem_arrays(ha);
7479 goto qlnx_load_exit0;
7483 rc = qlnx_alloc_mem_load(ha);
7485 goto qlnx_load_exit1;
7487 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7488 ha->num_rss, ha->num_tc);
7490 for (i = 0; i < ha->num_rss; i++) {
7492 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7493 (INTR_TYPE_NET | INTR_MPSAFE),
7494 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7495 &ha->irq_vec[i].handle))) {
7497 QL_DPRINT1(ha, "could not setup interrupt\n");
7498 goto qlnx_load_exit2;
7501 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7502 irq %p handle %p\n", i,
7503 ha->irq_vec[i].irq_rid,
7504 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7506 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7509 rc = qlnx_start_queues(ha);
7511 goto qlnx_load_exit2;
7513 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7515 /* Add primary mac and set Rx filters */
7516 rc = qlnx_set_rx_mode(ha);
7518 goto qlnx_load_exit2;
7520 /* Ask for link-up using current configuration */
7521 qlnx_set_link(ha, true);
7523 if (qlnx_vf_device(ha) == 0)
7524 qlnx_link_update(&ha->cdev.hwfns[0]);
7526 ha->state = QLNX_STATE_OPEN;
7528 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7530 if (ha->flags.callout_init)
7531 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7533 goto qlnx_load_exit0;
7536 qlnx_free_mem_load(ha);
7542 QL_DPRINT2(ha, "exit [%d]\n", rc);
7547 qlnx_drain_soft_lro(qlnx_host_t *ha)
7549 #ifdef QLNX_SOFT_LRO
7557 if (ifp->if_capenable & IFCAP_LRO) {
7559 for (i = 0; i < ha->num_rss; i++) {
7561 struct qlnx_fastpath *fp = &ha->fp_array[i];
7562 struct lro_ctrl *lro;
7564 lro = &fp->rxq->lro;
7566 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
7568 tcp_lro_flush_all(lro);
7571 struct lro_entry *queued;
7573 while ((!SLIST_EMPTY(&lro->lro_active))){
7574 queued = SLIST_FIRST(&lro->lro_active);
7575 SLIST_REMOVE_HEAD(&lro->lro_active, next);
7576 tcp_lro_flush(lro, queued);
7579 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
7584 #endif /* #ifdef QLNX_SOFT_LRO */
7590 qlnx_unload(qlnx_host_t *ha)
7592 struct ecore_dev *cdev;
7599 QL_DPRINT2(ha, "enter\n");
7600 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7602 if (ha->state == QLNX_STATE_OPEN) {
7604 qlnx_set_link(ha, false);
7605 qlnx_clean_filters(ha);
7606 qlnx_stop_queues(ha);
7607 ecore_hw_stop_fastpath(cdev);
7609 for (i = 0; i < ha->num_rss; i++) {
7610 if (ha->irq_vec[i].handle) {
7611 (void)bus_teardown_intr(dev,
7613 ha->irq_vec[i].handle);
7614 ha->irq_vec[i].handle = NULL;
7618 qlnx_drain_fp_taskqueues(ha);
7619 qlnx_drain_soft_lro(ha);
7620 qlnx_free_mem_load(ha);
7623 if (ha->flags.callout_init)
7624 callout_drain(&ha->qlnx_callout);
7626 qlnx_mdelay(__func__, 1000);
7628 ha->state = QLNX_STATE_CLOSED;
7630 QL_DPRINT2(ha, "exit\n");
7635 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7638 struct ecore_hwfn *p_hwfn;
7639 struct ecore_ptt *p_ptt;
7641 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7643 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7644 p_ptt = ecore_ptt_acquire(p_hwfn);
7647 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7651 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7653 if (rval == DBG_STATUS_OK)
7656 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7660 ecore_ptt_release(p_hwfn, p_ptt);
7666 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7669 struct ecore_hwfn *p_hwfn;
7670 struct ecore_ptt *p_ptt;
7672 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
7674 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7675 p_ptt = ecore_ptt_acquire(p_hwfn);
7678 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7682 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
7684 if (rval == DBG_STATUS_OK)
7687 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7691 ecore_ptt_release(p_hwfn, p_ptt);
7698 qlnx_sample_storm_stats(qlnx_host_t *ha)
7701 struct ecore_dev *cdev;
7702 qlnx_storm_stats_t *s_stats;
7704 struct ecore_ptt *p_ptt;
7705 struct ecore_hwfn *hwfn;
7707 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7708 ha->storm_stats_gather = 0;
7714 for_each_hwfn(cdev, i) {
7716 hwfn = &cdev->hwfns[i];
7718 p_ptt = ecore_ptt_acquire(hwfn);
7722 index = ha->storm_stats_index +
7723 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
7725 s_stats = &ha->storm_stats[index];
7728 reg = XSEM_REG_FAST_MEMORY +
7729 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7730 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7732 reg = XSEM_REG_FAST_MEMORY +
7733 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7734 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7736 reg = XSEM_REG_FAST_MEMORY +
7737 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7738 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7740 reg = XSEM_REG_FAST_MEMORY +
7741 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7742 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7745 reg = YSEM_REG_FAST_MEMORY +
7746 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7747 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7749 reg = YSEM_REG_FAST_MEMORY +
7750 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7751 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7753 reg = YSEM_REG_FAST_MEMORY +
7754 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7755 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7757 reg = YSEM_REG_FAST_MEMORY +
7758 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7759 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7762 reg = PSEM_REG_FAST_MEMORY +
7763 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7764 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7766 reg = PSEM_REG_FAST_MEMORY +
7767 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7768 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7770 reg = PSEM_REG_FAST_MEMORY +
7771 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7772 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7774 reg = PSEM_REG_FAST_MEMORY +
7775 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7776 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7779 reg = TSEM_REG_FAST_MEMORY +
7780 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7781 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7783 reg = TSEM_REG_FAST_MEMORY +
7784 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7785 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7787 reg = TSEM_REG_FAST_MEMORY +
7788 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7789 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7791 reg = TSEM_REG_FAST_MEMORY +
7792 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7793 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7796 reg = MSEM_REG_FAST_MEMORY +
7797 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7798 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7800 reg = MSEM_REG_FAST_MEMORY +
7801 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7802 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7804 reg = MSEM_REG_FAST_MEMORY +
7805 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7806 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7808 reg = MSEM_REG_FAST_MEMORY +
7809 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7810 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7813 reg = USEM_REG_FAST_MEMORY +
7814 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
7815 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7817 reg = USEM_REG_FAST_MEMORY +
7818 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
7819 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7821 reg = USEM_REG_FAST_MEMORY +
7822 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
7823 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7825 reg = USEM_REG_FAST_MEMORY +
7826 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
7827 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7829 ecore_ptt_release(hwfn, p_ptt);
7832 ha->storm_stats_index++;
7838 * Name: qlnx_dump_buf8
7839 * Function: dumps a buffer as bytes
7842 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7851 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
7854 device_printf(dev,"0x%08x:"
7855 " %02x %02x %02x %02x %02x %02x %02x %02x"
7856 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7857 buf[0], buf[1], buf[2], buf[3],
7858 buf[4], buf[5], buf[6], buf[7],
7859 buf[8], buf[9], buf[10], buf[11],
7860 buf[12], buf[13], buf[14], buf[15]);
7867 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
7870 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
7873 device_printf(dev,"0x%08x: %02x %02x %02x\n",
7874 i, buf[0], buf[1], buf[2]);
7877 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
7878 buf[0], buf[1], buf[2], buf[3]);
7881 device_printf(dev,"0x%08x:"
7882 " %02x %02x %02x %02x %02x\n", i,
7883 buf[0], buf[1], buf[2], buf[3], buf[4]);
7886 device_printf(dev,"0x%08x:"
7887 " %02x %02x %02x %02x %02x %02x\n", i,
7888 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
7891 device_printf(dev,"0x%08x:"
7892 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7893 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
7896 device_printf(dev,"0x%08x:"
7897 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
7898 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7902 device_printf(dev,"0x%08x:"
7903 " %02x %02x %02x %02x %02x %02x %02x %02x"
7905 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7909 device_printf(dev,"0x%08x:"
7910 " %02x %02x %02x %02x %02x %02x %02x %02x"
7912 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7913 buf[7], buf[8], buf[9]);
7916 device_printf(dev,"0x%08x:"
7917 " %02x %02x %02x %02x %02x %02x %02x %02x"
7918 " %02x %02x %02x\n", i,
7919 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7920 buf[7], buf[8], buf[9], buf[10]);
7923 device_printf(dev,"0x%08x:"
7924 " %02x %02x %02x %02x %02x %02x %02x %02x"
7925 " %02x %02x %02x %02x\n", i,
7926 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7927 buf[7], buf[8], buf[9], buf[10], buf[11]);
7930 device_printf(dev,"0x%08x:"
7931 " %02x %02x %02x %02x %02x %02x %02x %02x"
7932 " %02x %02x %02x %02x %02x\n", i,
7933 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7934 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7937 device_printf(dev,"0x%08x:"
7938 " %02x %02x %02x %02x %02x %02x %02x %02x"
7939 " %02x %02x %02x %02x %02x %02x\n", i,
7940 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7941 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7945 device_printf(dev,"0x%08x:"
7946 " %02x %02x %02x %02x %02x %02x %02x %02x"
7947 " %02x %02x %02x %02x %02x %02x %02x\n", i,
7948 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7949 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7956 device_printf(dev, "%s: %s dump end\n", __func__, msg);
7961 #ifdef CONFIG_ECORE_SRIOV
7964 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id)
7966 struct ecore_public_vf_info *vf_info;
7968 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false);
7973 /* Clear the VF mac */
7974 memset(vf_info->forced_mac, 0, ETH_ALEN);
7976 vf_info->forced_vlan = 0;
7982 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id)
7984 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id);
7989 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid,
7990 struct ecore_filter_ucast *params)
7992 struct ecore_public_vf_info *vf;
7994 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
7995 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7996 "VF[%d] vport not initialized\n", vfid);
8000 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true);
8004 /* No real decision to make; Store the configured MAC */
8005 if (params->type == ECORE_FILTER_MAC ||
8006 params->type == ECORE_FILTER_MAC_VLAN)
8007 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
8013 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params)
8015 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params));
8019 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid,
8020 struct ecore_sp_vport_update_params *params, uint16_t * tlvs)
8023 struct ecore_filter_accept_flags *flags;
8025 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) {
8026 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
8027 "VF[%d] vport not initialized\n", vfid);
8031 /* Untrusted VFs can't even be trusted to know that fact.
8032 * Simply indicate everything is configured fine, and trace
8033 * configuration 'behind their back'.
8035 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED;
8036 flags = ¶ms->accept_flags;
8037 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM)))
8044 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs)
8046 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs));
8050 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn)
8053 struct ecore_dev *cdev;
8055 cdev = p_hwfn->p_dev;
8057 for (i = 0; i < cdev->num_hwfns; i++) {
8058 if (&cdev->hwfns[i] == p_hwfn)
8062 if (i >= cdev->num_hwfns)
8069 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id)
8071 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8074 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
8075 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
8077 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8080 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8082 atomic_testandset_32(&ha->sriov_task[i].flags,
8083 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG);
8085 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8086 &ha->sriov_task[i].pf_task);
8090 return (ECORE_SUCCESS);
8095 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id)
8097 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id));
8101 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn)
8103 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8106 if (!ha->sriov_initialized)
8109 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8110 ha, p_hwfn->p_dev, p_hwfn);
8112 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8116 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8118 atomic_testandset_32(&ha->sriov_task[i].flags,
8119 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE);
8121 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8122 &ha->sriov_task[i].pf_task);
8130 qlnx_vf_flr_update(void *p_hwfn)
8132 __qlnx_vf_flr_update(p_hwfn);
8140 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn)
8142 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
8145 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
8146 ha, p_hwfn->p_dev, p_hwfn);
8148 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8151 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
8152 ha, p_hwfn->p_dev, p_hwfn, i);
8154 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8156 atomic_testandset_32(&ha->sriov_task[i].flags,
8157 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE);
8159 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
8160 &ha->sriov_task[i].pf_task);
8165 qlnx_initialize_sriov(qlnx_host_t *ha)
8168 nvlist_t *pf_schema, *vf_schema;
8173 pf_schema = pci_iov_schema_alloc_node();
8174 vf_schema = pci_iov_schema_alloc_node();
8176 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
8177 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
8178 IOV_SCHEMA_HASDEFAULT, FALSE);
8179 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
8180 IOV_SCHEMA_HASDEFAULT, FALSE);
8181 pci_iov_schema_add_uint16(vf_schema, "num-queues",
8182 IOV_SCHEMA_HASDEFAULT, 1);
8184 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
8186 if (iov_error != 0) {
8187 ha->sriov_initialized = 0;
8189 device_printf(dev, "SRIOV initialized\n");
8190 ha->sriov_initialized = 1;
8197 qlnx_sriov_disable(qlnx_host_t *ha)
8199 struct ecore_dev *cdev;
8204 ecore_iov_set_vfs_to_disable(cdev, true);
8207 for_each_hwfn(cdev, i) {
8209 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
8210 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8213 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8216 /* Clean WFQ db and configure equal weight for all vports */
8217 ecore_clean_wfq_db(hwfn, ptt);
8219 ecore_for_each_vf(hwfn, j) {
8222 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false))
8225 if (ecore_iov_is_vf_started(hwfn, j)) {
8226 /* Wait until VF is disabled before releasing */
8228 for (k = 0; k < 100; k++) {
8229 if (!ecore_iov_is_vf_stopped(hwfn, j)) {
8230 qlnx_mdelay(__func__, 10);
8237 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
8241 "Timeout waiting for VF's FLR to end\n");
8244 ecore_ptt_release(hwfn, ptt);
8247 ecore_iov_set_vfs_to_disable(cdev, false);
8254 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid,
8255 struct ecore_iov_vf_init_params *params)
8259 /* Since we have an equal resource distribution per-VF, and we assume
8260 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting
8261 * sequentially from there.
8263 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
8265 params->rel_vf_id = vfid;
8267 for (i = 0; i < params->num_queues; i++) {
8268 params->req_rx_queue[i] = base + i;
8269 params->req_tx_queue[i] = base + i;
8272 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
8273 params->vport_id = vfid + 1;
8274 params->rss_eng_id = vfid + 1;
8280 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params)
8283 struct ecore_dev *cdev;
8284 struct ecore_iov_vf_init_params params;
8288 if ((ha = device_get_softc(dev)) == NULL) {
8289 device_printf(dev, "%s: cannot get softc\n", __func__);
8293 if (qlnx_create_pf_taskqueues(ha) != 0)
8294 goto qlnx_iov_init_err0;
8298 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8300 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8301 dev, num_vfs, max_vfs);
8303 if (num_vfs >= max_vfs) {
8304 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8305 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8306 goto qlnx_iov_init_err0;
8309 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8312 if (ha->vf_attr == NULL)
8313 goto qlnx_iov_init_err0;
8316 memset(¶ms, 0, sizeof(params));
8318 /* Initialize HW for VF access */
8319 for_each_hwfn(cdev, j) {
8320 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8321 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn);
8323 /* Make sure not to use more than 16 queues per VF */
8324 params.num_queues = min_t(int,
8325 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs),
8329 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8330 goto qlnx_iov_init_err1;
8333 for (i = 0; i < num_vfs; i++) {
8335 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true))
8338 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms);
8340 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
8343 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8344 ecore_ptt_release(hwfn, ptt);
8345 goto qlnx_iov_init_err1;
8349 ecore_ptt_release(hwfn, ptt);
8352 ha->num_vfs = num_vfs;
8353 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8355 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8360 qlnx_sriov_disable(ha);
8363 qlnx_destroy_pf_taskqueues(ha);
8370 qlnx_iov_uninit(device_t dev)
8374 if ((ha = device_get_softc(dev)) == NULL) {
8375 device_printf(dev, "%s: cannot get softc\n", __func__);
8379 QL_DPRINT2(ha," dev = %p enter\n", dev);
8381 qlnx_sriov_disable(ha);
8382 qlnx_destroy_pf_taskqueues(ha);
8384 free(ha->vf_attr, M_QLNXBUF);
8389 QL_DPRINT2(ha," dev = %p exit\n", dev);
8394 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
8397 qlnx_vf_attr_t *vf_attr;
8398 unsigned const char *mac;
8400 struct ecore_hwfn *p_hwfn;
8402 if ((ha = device_get_softc(dev)) == NULL) {
8403 device_printf(dev, "%s: cannot get softc\n", __func__);
8407 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8409 if (vfnum > (ha->num_vfs - 1)) {
8410 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8411 vfnum, (ha->num_vfs - 1));
8414 vf_attr = &ha->vf_attr[vfnum];
8416 if (nvlist_exists_binary(params, "mac-addr")) {
8417 mac = nvlist_get_binary(params, "mac-addr", &size);
8418 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8420 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
8421 __func__, vf_attr->mac_addr[0],
8422 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8423 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8424 vf_attr->mac_addr[5]);
8425 p_hwfn = &ha->cdev.hwfns[0];
8426 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8430 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8435 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8437 uint64_t events[ECORE_VF_ARRAY_LENGTH];
8438 struct ecore_ptt *ptt;
8441 ptt = ecore_ptt_acquire(p_hwfn);
8443 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8444 __qlnx_pf_vf_msg(p_hwfn, 0);
8448 ecore_iov_pf_get_pending_events(p_hwfn, events);
8450 QL_DPRINT2(ha, "Event mask of VF events:"
8451 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n",
8452 events[0], events[1], events[2]);
8454 ecore_for_each_vf(p_hwfn, i) {
8456 /* Skip VFs with no pending messages */
8457 if (!(events[i / 64] & (1ULL << (i % 64))))
8461 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
8462 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8464 /* Copy VF's message to PF's request buffer for that VF */
8465 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i))
8468 ecore_iov_process_mbx_req(p_hwfn, ptt, i);
8471 ecore_ptt_release(p_hwfn, ptt);
8477 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8479 struct ecore_ptt *ptt;
8482 ptt = ecore_ptt_acquire(p_hwfn);
8485 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8486 __qlnx_vf_flr_update(p_hwfn);
8490 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt);
8493 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8496 ecore_ptt_release(p_hwfn, ptt);
8502 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8504 struct ecore_ptt *ptt;
8507 ptt = ecore_ptt_acquire(p_hwfn);
8510 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8511 qlnx_vf_bulleting_update(p_hwfn);
8515 ecore_for_each_vf(p_hwfn, i) {
8516 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8518 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt);
8521 ecore_ptt_release(p_hwfn, ptt);
8527 qlnx_pf_taskqueue(void *context, int pending)
8529 struct ecore_hwfn *p_hwfn;
8538 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8540 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8543 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8544 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG))
8545 qlnx_handle_vf_msg(ha, p_hwfn);
8547 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8548 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE))
8549 qlnx_handle_vf_flr_update(ha, p_hwfn);
8551 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8552 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE))
8553 qlnx_handle_bulletin_update(ha, p_hwfn);
8559 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8562 uint8_t tq_name[32];
8564 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8566 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8568 bzero(tq_name, sizeof (tq_name));
8569 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i);
8571 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8573 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8574 taskqueue_thread_enqueue,
8575 &ha->sriov_task[i].pf_taskqueue);
8577 if (ha->sriov_task[i].pf_taskqueue == NULL)
8580 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8581 PI_NET, "%s", tq_name);
8583 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8590 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8594 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8595 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8596 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8597 &ha->sriov_task[i].pf_task);
8598 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8599 ha->sriov_task[i].pf_taskqueue = NULL;
8606 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8608 struct ecore_mcp_link_capabilities caps;
8609 struct ecore_mcp_link_params params;
8610 struct ecore_mcp_link_state link;
8613 if (!p_hwfn->pf_iov_info)
8616 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params));
8617 memset(&link, 0, sizeof(struct ecore_mcp_link_state));
8618 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities));
8620 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
8621 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
8622 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
8624 QL_DPRINT2(ha, "called\n");
8626 /* Update bulletin of all future possible VFs with link configuration */
8627 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8629 /* Modify link according to the VF's configured link state */
8631 link.link_up = false;
8634 link.link_up = true;
8635 /* Set speed according to maximum supported by HW.
8636 * that is 40G for regular devices and 100G for CMT
8639 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?
8640 100000 : link.speed;
8642 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);
8643 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps);
8646 qlnx_vf_bulleting_update(p_hwfn);
8650 #endif /* #ifndef QLNX_VF */
8651 #endif /* #ifdef CONFIG_ECORE_SRIOV */