]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/qlnx/qlnxe/qlnx_os.c
MFC r318676
[FreeBSD/stable/10.git] / sys / dev / qlnx / qlnxe / qlnx_os.c
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc. 
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28
29 /*
30  * File: qlnx_os.c
31  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 #include "reg_addr.h"
40 #include "ecore_gtt_reg_addr.h"
41 #include "ecore.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
44 #include "ecore_hw.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
58 #include "nvm_cfg.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61
62 #include "qlnx_ioctl.h"
63 #include "qlnx_def.h"
64 #include "qlnx_ver.h"
65 #include <sys/smp.h>
66
67
68 /*
69  * static functions
70  */
71 /*
72  * ioctl related functions
73  */
74 static void qlnx_add_sysctls(qlnx_host_t *ha);
75
76 /*
77  * main driver
78  */
79 static void qlnx_release(qlnx_host_t *ha);
80 static void qlnx_fp_isr(void *arg);
81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82 static void qlnx_init(void *arg);
83 static void qlnx_init_locked(qlnx_host_t *ha);
84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85 static int qlnx_set_promisc(qlnx_host_t *ha);
86 static int qlnx_set_allmulti(qlnx_host_t *ha);
87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qlnx_media_change(struct ifnet *ifp);
89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 static void qlnx_stop(qlnx_host_t *ha);
91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92                 struct mbuf **m_headp);
93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95                         struct qlnx_link_output *if_link);
96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp);
97 static void qlnx_qflush(struct ifnet *ifp);
98
99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
105
106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
108
109 static int qlnx_nic_setup(struct ecore_dev *cdev,
110                 struct ecore_pf_params *func_params);
111 static int qlnx_nic_start(struct ecore_dev *cdev);
112 static int qlnx_slowpath_start(qlnx_host_t *ha);
113 static int qlnx_slowpath_stop(qlnx_host_t *ha);
114 static int qlnx_init_hw(qlnx_host_t *ha);
115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
116                 char ver_str[VER_SIZE]);
117 static void qlnx_unload(qlnx_host_t *ha);
118 static int qlnx_load(qlnx_host_t *ha);
119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
120                 uint32_t add_mac);
121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
122                 uint32_t len);
123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
126                 struct qlnx_rx_queue *rxq);
127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
129                 int hwfn_index);
130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
131                 int hwfn_index);
132 static void qlnx_timer(void *arg);
133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
135 static void qlnx_trigger_dump(qlnx_host_t *ha);
136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
137                 struct qlnx_tx_queue *txq);
138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
139                 int lro_enable);
140 static void qlnx_fp_taskqueue(void *context, int pending);
141 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
143                 struct qlnx_agg_info *tpa);
144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
145
146 #if __FreeBSD_version >= 1100000
147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
148 #endif
149
150
151 /*
152  * Hooks to the Operating Systems
153  */
154 static int qlnx_pci_probe (device_t);
155 static int qlnx_pci_attach (device_t);
156 static int qlnx_pci_detach (device_t);
157
158 static device_method_t qlnx_pci_methods[] = {
159         /* Device interface */
160         DEVMETHOD(device_probe, qlnx_pci_probe),
161         DEVMETHOD(device_attach, qlnx_pci_attach),
162         DEVMETHOD(device_detach, qlnx_pci_detach),
163         { 0, 0 }
164 };
165
166 static driver_t qlnx_pci_driver = {
167         "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
168 };
169
170 static devclass_t qlnx_devclass;
171
172 MODULE_VERSION(if_qlnxe,1);
173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
174
175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
177
178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
179
180
181 char qlnx_dev_str[64];
182 char qlnx_ver_str[VER_SIZE];
183 char qlnx_name_str[NAME_SIZE];
184
185 /*
186  * Some PCI Configuration Space Related Defines
187  */
188
189 #ifndef PCI_VENDOR_QLOGIC
190 #define PCI_VENDOR_QLOGIC               0x1077
191 #endif
192
193 /* 40G Adapter QLE45xxx*/
194 #ifndef QLOGIC_PCI_DEVICE_ID_1634
195 #define QLOGIC_PCI_DEVICE_ID_1634       0x1634
196 #endif
197
198 /* 100G Adapter QLE45xxx*/
199 #ifndef QLOGIC_PCI_DEVICE_ID_1644
200 #define QLOGIC_PCI_DEVICE_ID_1644       0x1644
201 #endif
202
203 /* 25G Adapter QLE45xxx*/
204 #ifndef QLOGIC_PCI_DEVICE_ID_1656
205 #define QLOGIC_PCI_DEVICE_ID_1656       0x1656
206 #endif
207
208 /* 50G Adapter QLE45xxx*/
209 #ifndef QLOGIC_PCI_DEVICE_ID_1654
210 #define QLOGIC_PCI_DEVICE_ID_1654       0x1654
211 #endif
212
213 static int
214 qlnx_valid_device(device_t dev)
215 {
216         uint16_t        device_id;
217
218         device_id = pci_get_device(dev);
219
220         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
221                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
222                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
223                 (device_id == QLOGIC_PCI_DEVICE_ID_1654))
224                 return 0;
225
226         return -1;
227 }
228
229 /*
230  * Name:        qlnx_pci_probe
231  * Function:    Validate the PCI device to be a QLA80XX device
232  */
233 static int
234 qlnx_pci_probe(device_t dev)
235 {
236         snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
237                 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
238         snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
239
240         if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
241                 return (ENXIO);
242         }
243
244         switch (pci_get_device(dev)) {
245
246         case QLOGIC_PCI_DEVICE_ID_1644:
247                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
248                         "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
249                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
250                         QLNX_VERSION_BUILD);
251                 device_set_desc_copy(dev, qlnx_dev_str);
252
253                 break;
254
255         case QLOGIC_PCI_DEVICE_ID_1634:
256                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
257                         "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
258                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
259                         QLNX_VERSION_BUILD);
260                 device_set_desc_copy(dev, qlnx_dev_str);
261
262                 break;
263
264         case QLOGIC_PCI_DEVICE_ID_1656:
265                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
266                         "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
267                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
268                         QLNX_VERSION_BUILD);
269                 device_set_desc_copy(dev, qlnx_dev_str);
270
271                 break;
272
273         case QLOGIC_PCI_DEVICE_ID_1654:
274                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
275                         "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
276                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
277                         QLNX_VERSION_BUILD);
278                 device_set_desc_copy(dev, qlnx_dev_str);
279
280                 break;
281
282         default:
283                 return (ENXIO);
284         }
285
286         return (BUS_PROBE_DEFAULT);
287 }
288
289
290 static void
291 qlnx_sp_intr(void *arg)
292 {
293         struct ecore_hwfn       *p_hwfn;
294         qlnx_host_t             *ha;
295         int                     i;
296         
297         p_hwfn = arg;
298
299         if (p_hwfn == NULL) {
300                 printf("%s: spurious slowpath intr\n", __func__);
301                 return;
302         }
303
304         ha = (qlnx_host_t *)p_hwfn->p_dev;
305
306         QL_DPRINT2(ha, "enter\n");
307
308         for (i = 0; i < ha->cdev.num_hwfns; i++) {
309                 if (&ha->cdev.hwfns[i] == p_hwfn) {
310                         taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
311                         break;
312                 }
313         }
314         QL_DPRINT2(ha, "exit\n");
315         
316         return;
317 }
318
319 static void
320 qlnx_sp_taskqueue(void *context, int pending)
321 {
322         struct ecore_hwfn       *p_hwfn;
323
324         p_hwfn = context;
325
326         if (p_hwfn != NULL) {
327                 qlnx_sp_isr(p_hwfn);
328         }
329         return;
330 }
331
332 static int
333 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
334 {
335         int     i;
336         uint8_t tq_name[32];
337
338         for (i = 0; i < ha->cdev.num_hwfns; i++) {
339
340                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
341
342                 bzero(tq_name, sizeof (tq_name));
343                 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
344
345                 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
346
347                 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
348                          taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
349
350                 if (ha->sp_taskqueue[i] == NULL) 
351                         return (-1);
352
353                 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
354                         tq_name);
355
356                 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
357         }
358
359         return (0);
360 }
361
362 static void
363 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
364 {
365         int     i;
366
367         for (i = 0; i < ha->cdev.num_hwfns; i++) {
368                 if (ha->sp_taskqueue[i] != NULL) {
369                         taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
370                         taskqueue_free(ha->sp_taskqueue[i]);
371                 }
372         }
373         return;
374 }
375
376 static void
377 qlnx_fp_taskqueue(void *context, int pending)
378 {
379         struct qlnx_fastpath    *fp;
380         qlnx_host_t             *ha;
381         struct ifnet            *ifp;
382         struct mbuf             *mp;
383         int                     ret;
384         int                     lro_enable, tc;
385         int                     rx_int = 0, total_rx_count = 0;
386         struct thread           *cthread;
387
388         fp = context;
389
390         if (fp == NULL)
391                 return;
392
393         cthread = curthread;
394
395         thread_lock(cthread);
396
397         if (!sched_is_bound(cthread))
398                 sched_bind(cthread, fp->rss_id);
399
400         thread_unlock(cthread);
401
402         ha = (qlnx_host_t *)fp->edev;
403
404         ifp = ha->ifp;
405
406         lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
407
408         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
409
410         if (rx_int) {
411                 fp->rx_pkts += rx_int;
412                 total_rx_count += rx_int;
413         }
414
415 #ifdef QLNX_SOFT_LRO
416         {
417                 struct lro_ctrl *lro;
418
419                 lro = &fp->rxq->lro;
420
421                 if (lro_enable && total_rx_count) {
422
423 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
424
425                         if (ha->dbg_trace_lro_cnt) {
426                                 if (lro->lro_mbuf_count & ~1023)
427                                         fp->lro_cnt_1024++;
428                                 else if (lro->lro_mbuf_count & ~511)
429                                         fp->lro_cnt_512++;
430                                 else if (lro->lro_mbuf_count & ~255)
431                                         fp->lro_cnt_256++;
432                                 else if (lro->lro_mbuf_count & ~127)
433                                         fp->lro_cnt_128++;
434                                 else if (lro->lro_mbuf_count & ~63)
435                                         fp->lro_cnt_64++;
436                         }
437                         tcp_lro_flush_all(lro);
438
439 #else
440                         struct lro_entry *queued;
441
442                         while ((!SLIST_EMPTY(&lro->lro_active))) {
443                                 queued = SLIST_FIRST(&lro->lro_active);
444                                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
445                                 tcp_lro_flush(lro, queued);
446                         }
447 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
448                 }
449         }
450 #endif /* #ifdef QLNX_SOFT_LRO */
451
452         ecore_sb_update_sb_idx(fp->sb_info);
453         rmb();
454
455         mtx_lock(&fp->tx_mtx);
456
457         if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
458                 IFF_DRV_RUNNING) || (!ha->link_up)) {
459
460                 mtx_unlock(&fp->tx_mtx);
461                 goto qlnx_fp_taskqueue_exit;
462         }
463
464         for (tc = 0; tc < ha->num_tc; tc++) {
465                 (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
466         }
467
468         mp = drbr_peek(ifp, fp->tx_br);
469
470         while (mp != NULL) {
471
472                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
473                         ret = qlnx_send(ha, fp, &mp);
474                 } else {
475                         ret = -1;
476                 }
477
478                 if (ret) {
479
480                         if (mp != NULL) {
481                                 drbr_putback(ifp, fp->tx_br, mp);
482                         } else {
483                                 fp->tx_pkts_processed++;
484                                 drbr_advance(ifp, fp->tx_br);
485                         }
486
487                         mtx_unlock(&fp->tx_mtx);
488
489                         goto qlnx_fp_taskqueue_exit;
490
491                 } else {
492                         drbr_advance(ifp, fp->tx_br);
493                         fp->tx_pkts_transmitted++;
494                         fp->tx_pkts_processed++;
495                 }
496
497                 if (fp->tx_ring_full)
498                         break;
499
500                 mp = drbr_peek(ifp, fp->tx_br);
501         }
502
503         for (tc = 0; tc < ha->num_tc; tc++) {
504                 (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
505         }
506
507         mtx_unlock(&fp->tx_mtx);
508
509 qlnx_fp_taskqueue_exit:
510         if (rx_int) {
511                 if (fp->fp_taskqueue != NULL)
512                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
513         } else {
514                 if (fp->tx_ring_full) {
515                         qlnx_mdelay(__func__, 100);
516                 }
517                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
518         }
519
520         QL_DPRINT2(ha, "exit ret = %d\n", ret);
521         return;
522 }
523
524 static int
525 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
526 {
527         int     i;
528         uint8_t tq_name[32];
529         struct qlnx_fastpath *fp;
530
531         for (i = 0; i < ha->num_rss; i++) {
532
533                 fp = &ha->fp_array[i];
534
535                 bzero(tq_name, sizeof (tq_name));
536                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
537
538                 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
539
540                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
541                                         taskqueue_thread_enqueue,
542                                         &fp->fp_taskqueue);
543
544                 if (fp->fp_taskqueue == NULL) 
545                         return (-1);
546
547                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
548                         tq_name);
549
550                 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
551         }
552
553         return (0);
554 }
555
556 static void
557 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
558 {
559         int                     i;
560         struct qlnx_fastpath    *fp;
561
562         for (i = 0; i < ha->num_rss; i++) {
563
564                 fp = &ha->fp_array[i];
565
566                 if (fp->fp_taskqueue != NULL) {
567
568                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
569                         taskqueue_free(fp->fp_taskqueue);
570                         fp->fp_taskqueue = NULL;
571                 }
572         }
573         return;
574 }
575
576 static void
577 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
578 {
579         int                     i;
580         struct qlnx_fastpath    *fp;
581
582         for (i = 0; i < ha->num_rss; i++) {
583                 fp = &ha->fp_array[i];
584
585                 if (fp->fp_taskqueue != NULL) {
586                         QLNX_UNLOCK(ha);
587                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
588                         QLNX_LOCK(ha);
589                 }
590         }
591         return;
592 }
593
594 /*
595  * Name:        qlnx_pci_attach
596  * Function:    attaches the device to the operating system
597  */
598 static int
599 qlnx_pci_attach(device_t dev)
600 {
601         qlnx_host_t     *ha = NULL;
602         uint32_t        rsrc_len_reg = 0;
603         uint32_t        rsrc_len_dbells = 0;
604         uint32_t        rsrc_len_msix = 0;
605         int             i;
606         uint32_t        mfw_ver;
607
608         if ((ha = device_get_softc(dev)) == NULL) {
609                 device_printf(dev, "cannot get softc\n");
610                 return (ENOMEM);
611         }
612
613         memset(ha, 0, sizeof (qlnx_host_t));
614
615         if (qlnx_valid_device(dev) != 0) {
616                 device_printf(dev, "device is not valid device\n");
617                 return (ENXIO);
618         }
619         ha->pci_func = pci_get_function(dev);
620
621         ha->pci_dev = dev;
622
623         mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
624
625         ha->flags.lock_init = 1;
626
627         pci_enable_busmaster(dev);
628
629         /*
630          * map the PCI BARs
631          */
632
633         ha->reg_rid = PCIR_BAR(0);
634         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
635                                 RF_ACTIVE);
636
637         if (ha->pci_reg == NULL) {
638                 device_printf(dev, "unable to map BAR0\n");
639                 goto qlnx_pci_attach_err;
640         }
641
642         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
643                                         ha->reg_rid);
644
645         ha->dbells_rid = PCIR_BAR(2);
646         ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
647                         &ha->dbells_rid, RF_ACTIVE);
648
649         if (ha->pci_dbells == NULL) {
650                 device_printf(dev, "unable to map BAR1\n");
651                 goto qlnx_pci_attach_err;
652         }
653
654         rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
655                                         ha->dbells_rid);
656
657         ha->dbells_phys_addr = (uint64_t)
658                 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
659         ha->dbells_size = rsrc_len_dbells;
660
661         ha->msix_rid = PCIR_BAR(4);
662         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
663                         &ha->msix_rid, RF_ACTIVE);
664
665         if (ha->msix_bar == NULL) {
666                 device_printf(dev, "unable to map BAR2\n");
667                 goto qlnx_pci_attach_err;
668         }
669
670         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
671                                         ha->msix_rid);
672         /*
673          * allocate dma tags
674          */
675
676         if (qlnx_alloc_parent_dma_tag(ha))
677                 goto qlnx_pci_attach_err;
678
679         if (qlnx_alloc_tx_dma_tag(ha))
680                 goto qlnx_pci_attach_err;
681
682         if (qlnx_alloc_rx_dma_tag(ha))
683                 goto qlnx_pci_attach_err;
684                 
685
686         if (qlnx_init_hw(ha) != 0)
687                 goto qlnx_pci_attach_err;
688
689         /*
690          * Allocate MSI-x vectors
691          */
692         ha->num_rss = QLNX_MAX_RSS;
693         ha->num_tc = QLNX_MAX_TC;
694
695         ha->msix_count = pci_msix_count(dev);
696
697         if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
698                 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
699
700         if (!ha->msix_count ||
701                 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
702                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
703                         ha->msix_count);
704                 goto qlnx_pci_attach_err;
705         }
706
707         if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
708                 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
709         else
710                 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
711
712         QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
713                 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
714                 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
715                 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
716                  ha->pci_reg, rsrc_len_reg,
717                 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
718                 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
719                 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
720         if (pci_alloc_msix(dev, &ha->msix_count)) {
721                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
722                         ha->msix_count);
723                 ha->msix_count = 0;
724                 goto qlnx_pci_attach_err;
725         }
726
727         /*
728          * Initialize slow path interrupt and task queue
729          */
730         if (qlnx_create_sp_taskqueues(ha) != 0)
731                 goto qlnx_pci_attach_err;
732
733         for (i = 0; i < ha->cdev.num_hwfns; i++) {
734
735                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
736
737                 ha->sp_irq_rid[i] = i + 1;
738                 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
739                                 &ha->sp_irq_rid[i],
740                                 (RF_ACTIVE | RF_SHAREABLE));
741                 if (ha->sp_irq[i] == NULL) {
742                         device_printf(dev,
743                                 "could not allocate mbx interrupt\n");
744                         goto qlnx_pci_attach_err;
745                 }
746
747                 if (bus_setup_intr(dev, ha->sp_irq[i],
748                                 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
749                                 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
750                         device_printf(dev,
751                                 "could not setup slow path interrupt\n");
752                         goto qlnx_pci_attach_err;
753                 }
754
755                 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
756                         " sp_irq %p sp_handle %p\n", p_hwfn,
757                         ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
758
759         }
760
761         /*
762          * initialize fast path interrupt
763          */
764         if (qlnx_create_fp_taskqueues(ha) != 0)
765                 goto qlnx_pci_attach_err;
766
767         for (i = 0; i < ha->num_rss; i++) {
768                 ha->irq_vec[i].rss_idx = i;
769                 ha->irq_vec[i].ha = ha;
770                 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
771
772                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
773                                 &ha->irq_vec[i].irq_rid,
774                                 (RF_ACTIVE | RF_SHAREABLE));
775
776                 if (ha->irq_vec[i].irq == NULL) {
777                         device_printf(dev,
778                                 "could not allocate interrupt[%d]\n", i);
779                         goto qlnx_pci_attach_err;
780                 }
781                 
782                 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
783                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
784                         goto qlnx_pci_attach_err;
785
786                 }
787         }
788
789         callout_init(&ha->qlnx_callout, 1);
790         ha->flags.callout_init = 1;
791
792         for (i = 0; i < ha->cdev.num_hwfns; i++) {
793
794                 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
795                         goto qlnx_pci_attach_err;
796                 if (ha->grcdump_size[i] == 0)
797                         goto qlnx_pci_attach_err;
798
799                 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
800                 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
801                         i, ha->grcdump_size[i]);
802
803                 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
804                 if (ha->grcdump[i] == NULL) {
805                         device_printf(dev, "grcdump alloc[%d] failed\n", i);
806                         goto qlnx_pci_attach_err;
807                 }
808
809                 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
810                         goto qlnx_pci_attach_err;
811                 if (ha->idle_chk_size[i] == 0)
812                         goto qlnx_pci_attach_err;
813
814                 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
815                 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
816                         i, ha->idle_chk_size[i]);
817
818                 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
819
820                 if (ha->idle_chk[i] == NULL) {
821                         device_printf(dev, "idle_chk alloc failed\n");
822                         goto qlnx_pci_attach_err;
823                 }
824         }
825
826         if (qlnx_slowpath_start(ha) != 0) {
827
828                 qlnx_mdelay(__func__, 1000);
829                 qlnx_trigger_dump(ha);
830
831                 goto qlnx_pci_attach_err0;
832         } else
833                 ha->flags.slowpath_start = 1;
834
835         if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
836                 qlnx_mdelay(__func__, 1000);
837                 qlnx_trigger_dump(ha);
838
839                 goto qlnx_pci_attach_err0;
840         }
841
842         if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
843                 qlnx_mdelay(__func__, 1000);
844                 qlnx_trigger_dump(ha);
845
846                 goto qlnx_pci_attach_err0;
847         }
848         snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
849                 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
850                 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
851         snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
852                 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
853                 FW_ENGINEERING_VERSION);
854
855         QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
856                  ha->stormfw_ver, ha->mfw_ver);
857
858         qlnx_init_ifnet(dev, ha);
859
860         /*
861          * add sysctls
862          */ 
863         qlnx_add_sysctls(ha);
864
865 qlnx_pci_attach_err0:
866         /*
867          * create ioctl device interface
868          */
869         if (qlnx_make_cdev(ha)) {
870                 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
871                 goto qlnx_pci_attach_err;
872         }
873
874         QL_DPRINT2(ha, "success\n");
875
876         return (0);
877
878 qlnx_pci_attach_err:
879
880         qlnx_release(ha);
881
882         return (ENXIO);
883 }
884
885 /*
886  * Name:        qlnx_pci_detach
887  * Function:    Unhooks the device from the operating system
888  */
889 static int
890 qlnx_pci_detach(device_t dev)
891 {
892         qlnx_host_t     *ha = NULL;
893
894         if ((ha = device_get_softc(dev)) == NULL) {
895                 device_printf(dev, "cannot get softc\n");
896                 return (ENOMEM);
897         }
898
899         QLNX_LOCK(ha);
900         qlnx_stop(ha);
901         QLNX_UNLOCK(ha);
902
903         qlnx_release(ha);
904
905         return (0);
906 }
907
908 static int
909 qlnx_init_hw(qlnx_host_t *ha)
910 {
911         int                             rval = 0;
912         struct ecore_hw_prepare_params  params;
913
914         ecore_init_struct(&ha->cdev);
915
916         /* ha->dp_module = ECORE_MSG_PROBE |
917                                 ECORE_MSG_INTR |
918                                 ECORE_MSG_SP |
919                                 ECORE_MSG_LINK |
920                                 ECORE_MSG_SPQ |
921                                 ECORE_MSG_RDMA;
922         ha->dp_level = ECORE_LEVEL_VERBOSE;*/
923         ha->dp_level = ECORE_LEVEL_NOTICE;
924
925         ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
926
927         ha->cdev.regview = ha->pci_reg;
928         ha->cdev.doorbells = ha->pci_dbells;
929         ha->cdev.db_phys_addr = ha->dbells_phys_addr;
930         ha->cdev.db_size = ha->dbells_size;
931
932         bzero(&params, sizeof (struct ecore_hw_prepare_params));
933
934         ha->personality = ECORE_PCI_DEFAULT;
935
936         params.personality = ha->personality;
937
938         params.drv_resc_alloc = false;
939         params.chk_reg_fifo = false;
940         params.initiate_pf_flr = true;
941         params.epoch = 0;
942
943         ecore_hw_prepare(&ha->cdev, &params);
944
945         qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
946
947         return (rval);
948 }
949
950 static void
951 qlnx_release(qlnx_host_t *ha)
952 {
953         device_t        dev;
954         int             i;
955
956         dev = ha->pci_dev;
957
958         QL_DPRINT2(ha, "enter\n");
959
960         for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
961                 if (ha->idle_chk[i] != NULL) {
962                         free(ha->idle_chk[i], M_QLNXBUF);
963                         ha->idle_chk[i] = NULL;
964                 }
965
966                 if (ha->grcdump[i] != NULL) {
967                         free(ha->grcdump[i], M_QLNXBUF);
968                         ha->grcdump[i] = NULL;
969                 }
970         }
971
972         if (ha->flags.callout_init)
973                 callout_drain(&ha->qlnx_callout);
974
975         if (ha->flags.slowpath_start) {
976                 qlnx_slowpath_stop(ha);
977         }
978
979         ecore_hw_remove(&ha->cdev);
980
981         qlnx_del_cdev(ha);
982
983         if (ha->ifp != NULL)
984                 ether_ifdetach(ha->ifp);
985
986         qlnx_free_tx_dma_tag(ha);
987
988         qlnx_free_rx_dma_tag(ha);
989
990         qlnx_free_parent_dma_tag(ha);
991
992         for (i = 0; i < ha->num_rss; i++) {
993                 struct qlnx_fastpath *fp = &ha->fp_array[i];
994
995                 if (ha->irq_vec[i].handle) {
996                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
997                                         ha->irq_vec[i].handle);
998                 }
999
1000                 if (ha->irq_vec[i].irq) {
1001                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1002                                 ha->irq_vec[i].irq_rid,
1003                                 ha->irq_vec[i].irq);
1004                 }
1005
1006                 qlnx_free_tx_br(ha, fp);
1007         }
1008         qlnx_destroy_fp_taskqueues(ha);
1009
1010         for (i = 0; i < ha->cdev.num_hwfns; i++) {
1011                 if (ha->sp_handle[i])
1012                         (void)bus_teardown_intr(dev, ha->sp_irq[i],
1013                                 ha->sp_handle[i]);
1014
1015                 if (ha->sp_irq[i])
1016                         (void) bus_release_resource(dev, SYS_RES_IRQ,
1017                                 ha->sp_irq_rid[i], ha->sp_irq[i]);
1018         }
1019
1020         qlnx_destroy_sp_taskqueues(ha);
1021
1022         if (ha->msix_count)
1023                 pci_release_msi(dev);
1024
1025         if (ha->flags.lock_init) {
1026                 mtx_destroy(&ha->hw_lock);
1027         }
1028
1029         if (ha->pci_reg)
1030                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1031                                 ha->pci_reg);
1032
1033         if (ha->pci_dbells)
1034                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1035                                 ha->pci_dbells);
1036
1037         if (ha->msix_bar)
1038                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1039                                 ha->msix_bar);
1040
1041         QL_DPRINT2(ha, "exit\n");
1042         return;
1043 }
1044
1045 static void
1046 qlnx_trigger_dump(qlnx_host_t *ha)
1047 {
1048         int     i;
1049
1050         if (ha->ifp != NULL)
1051                 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1052
1053         QL_DPRINT2(ha, "enter\n");
1054
1055         for (i = 0; i < ha->cdev.num_hwfns; i++) {
1056                 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1057                 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1058         }
1059
1060         QL_DPRINT2(ha, "exit\n");
1061
1062         return;
1063 }
1064
1065 static int
1066 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1067 {
1068         int             err, ret = 0;
1069         qlnx_host_t     *ha;
1070
1071         err = sysctl_handle_int(oidp, &ret, 0, req);
1072
1073         if (err || !req->newptr)
1074                 return (err);
1075
1076         if (ret == 1) {
1077                 ha = (qlnx_host_t *)arg1;
1078                 qlnx_trigger_dump(ha);
1079         }
1080         return (err);
1081 }
1082
1083 static int
1084 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1085 {
1086         int                     err, i, ret = 0, usecs = 0;
1087         qlnx_host_t             *ha;
1088         struct ecore_hwfn       *p_hwfn;
1089         struct qlnx_fastpath    *fp;
1090
1091         err = sysctl_handle_int(oidp, &usecs, 0, req);
1092
1093         if (err || !req->newptr || !usecs || (usecs > 255))
1094                 return (err);
1095
1096         ha = (qlnx_host_t *)arg1;
1097
1098         for (i = 0; i < ha->num_rss; i++) {
1099
1100                 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1101
1102                 fp = &ha->fp_array[i];
1103
1104                 if (fp->txq[0]->handle != NULL) {
1105                         ret = ecore_set_queue_coalesce(p_hwfn, 0,
1106                                         (uint16_t)usecs, fp->txq[0]->handle);
1107                 }
1108         }
1109
1110         if (!ret)
1111                 ha->tx_coalesce_usecs = (uint8_t)usecs;
1112
1113         return (err);
1114 }
1115
1116 static int
1117 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1118 {
1119         int                     err, i, ret = 0, usecs = 0;
1120         qlnx_host_t             *ha;
1121         struct ecore_hwfn       *p_hwfn;
1122         struct qlnx_fastpath    *fp;
1123
1124         err = sysctl_handle_int(oidp, &usecs, 0, req);
1125
1126         if (err || !req->newptr || !usecs || (usecs > 255))
1127                 return (err);
1128
1129         ha = (qlnx_host_t *)arg1;
1130
1131         for (i = 0; i < ha->num_rss; i++) {
1132
1133                 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1134
1135                 fp = &ha->fp_array[i];
1136
1137                 if (fp->rxq->handle != NULL) {
1138                         ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1139                                          0, fp->rxq->handle);
1140                 }
1141         }
1142
1143         if (!ret)
1144                 ha->rx_coalesce_usecs = (uint8_t)usecs;
1145
1146         return (err);
1147 }
1148
1149 static void
1150 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1151 {
1152         struct sysctl_ctx_list  *ctx;
1153         struct sysctl_oid_list  *children;
1154         struct sysctl_oid       *ctx_oid;
1155
1156         ctx = device_get_sysctl_ctx(ha->pci_dev);
1157         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1158
1159         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1160                         CTLFLAG_RD, NULL, "spstat");
1161         children = SYSCTL_CHILDREN(ctx_oid);
1162
1163         SYSCTL_ADD_QUAD(ctx, children,
1164                 OID_AUTO, "sp_interrupts",
1165                 CTLFLAG_RD, &ha->sp_interrupts,
1166                 "No. of slowpath interrupts");
1167
1168         return;
1169 }
1170
1171 static void
1172 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1173 {
1174         struct sysctl_ctx_list  *ctx;
1175         struct sysctl_oid_list  *children;
1176         struct sysctl_oid_list  *node_children;
1177         struct sysctl_oid       *ctx_oid;
1178         int                     i, j;
1179         uint8_t                 name_str[16];
1180
1181         ctx = device_get_sysctl_ctx(ha->pci_dev);
1182         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1183
1184         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1185                         CTLFLAG_RD, NULL, "fpstat");
1186         children = SYSCTL_CHILDREN(ctx_oid);
1187
1188         for (i = 0; i < ha->num_rss; i++) {
1189
1190                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1191                 snprintf(name_str, sizeof(name_str), "%d", i);
1192
1193                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1194                         CTLFLAG_RD, NULL, name_str);
1195                 node_children = SYSCTL_CHILDREN(ctx_oid);
1196
1197                 /* Tx Related */
1198
1199                 SYSCTL_ADD_QUAD(ctx, node_children,
1200                         OID_AUTO, "tx_pkts_processed",
1201                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1202                         "No. of packets processed for transmission");
1203
1204                 SYSCTL_ADD_QUAD(ctx, node_children,
1205                         OID_AUTO, "tx_pkts_freed",
1206                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1207                         "No. of freed packets");
1208
1209                 SYSCTL_ADD_QUAD(ctx, node_children,
1210                         OID_AUTO, "tx_pkts_transmitted",
1211                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1212                         "No. of transmitted packets");
1213
1214                 SYSCTL_ADD_QUAD(ctx, node_children,
1215                         OID_AUTO, "tx_pkts_completed",
1216                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1217                         "No. of transmit completions");
1218
1219                 SYSCTL_ADD_QUAD(ctx, node_children,
1220                         OID_AUTO, "tx_lso_wnd_min_len",
1221                         CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1222                         "tx_lso_wnd_min_len");
1223
1224                 SYSCTL_ADD_QUAD(ctx, node_children,
1225                         OID_AUTO, "tx_defrag",
1226                         CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1227                         "tx_defrag");
1228
1229                 SYSCTL_ADD_QUAD(ctx, node_children,
1230                         OID_AUTO, "tx_nsegs_gt_elem_left",
1231                         CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1232                         "tx_nsegs_gt_elem_left");
1233
1234                 SYSCTL_ADD_UINT(ctx, node_children,
1235                         OID_AUTO, "tx_tso_max_nsegs",
1236                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1237                         ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1238
1239                 SYSCTL_ADD_UINT(ctx, node_children,
1240                         OID_AUTO, "tx_tso_min_nsegs",
1241                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1242                         ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1243
1244                 SYSCTL_ADD_UINT(ctx, node_children,
1245                         OID_AUTO, "tx_tso_max_pkt_len",
1246                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1247                         ha->fp_array[i].tx_tso_max_pkt_len,
1248                         "tx_tso_max_pkt_len");
1249
1250                 SYSCTL_ADD_UINT(ctx, node_children,
1251                         OID_AUTO, "tx_tso_min_pkt_len",
1252                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1253                         ha->fp_array[i].tx_tso_min_pkt_len,
1254                         "tx_tso_min_pkt_len");
1255
1256                 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1257
1258                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1259                         snprintf(name_str, sizeof(name_str),
1260                                 "tx_pkts_nseg_%02d", (j+1));
1261
1262                         SYSCTL_ADD_QUAD(ctx, node_children,
1263                                 OID_AUTO, name_str, CTLFLAG_RD,
1264                                 &ha->fp_array[i].tx_pkts[j], name_str);
1265                 }
1266
1267                 SYSCTL_ADD_QUAD(ctx, node_children,
1268                         OID_AUTO, "err_tx_nsegs_gt_elem_left",
1269                         CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1270                         "err_tx_nsegs_gt_elem_left");
1271
1272                 SYSCTL_ADD_QUAD(ctx, node_children,
1273                         OID_AUTO, "err_tx_dmamap_create",
1274                         CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1275                         "err_tx_dmamap_create");
1276
1277                 SYSCTL_ADD_QUAD(ctx, node_children,
1278                         OID_AUTO, "err_tx_defrag_dmamap_load",
1279                         CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1280                         "err_tx_defrag_dmamap_load");
1281
1282                 SYSCTL_ADD_QUAD(ctx, node_children,
1283                         OID_AUTO, "err_tx_non_tso_max_seg",
1284                         CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1285                         "err_tx_non_tso_max_seg");
1286
1287                 SYSCTL_ADD_QUAD(ctx, node_children,
1288                         OID_AUTO, "err_tx_dmamap_load",
1289                         CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1290                         "err_tx_dmamap_load");
1291
1292                 SYSCTL_ADD_QUAD(ctx, node_children,
1293                         OID_AUTO, "err_tx_defrag",
1294                         CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1295                         "err_tx_defrag");
1296
1297                 SYSCTL_ADD_QUAD(ctx, node_children,
1298                         OID_AUTO, "err_tx_free_pkt_null",
1299                         CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1300                         "err_tx_free_pkt_null");
1301
1302                 SYSCTL_ADD_QUAD(ctx, node_children,
1303                         OID_AUTO, "err_tx_cons_idx_conflict",
1304                         CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1305                         "err_tx_cons_idx_conflict");
1306
1307                 SYSCTL_ADD_QUAD(ctx, node_children,
1308                         OID_AUTO, "lro_cnt_64",
1309                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1310                         "lro_cnt_64");
1311
1312                 SYSCTL_ADD_QUAD(ctx, node_children,
1313                         OID_AUTO, "lro_cnt_128",
1314                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1315                         "lro_cnt_128");
1316
1317                 SYSCTL_ADD_QUAD(ctx, node_children,
1318                         OID_AUTO, "lro_cnt_256",
1319                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1320                         "lro_cnt_256");
1321
1322                 SYSCTL_ADD_QUAD(ctx, node_children,
1323                         OID_AUTO, "lro_cnt_512",
1324                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1325                         "lro_cnt_512");
1326
1327                 SYSCTL_ADD_QUAD(ctx, node_children,
1328                         OID_AUTO, "lro_cnt_1024",
1329                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1330                         "lro_cnt_1024");
1331
1332                 /* Rx Related */
1333
1334                 SYSCTL_ADD_QUAD(ctx, node_children,
1335                         OID_AUTO, "rx_pkts",
1336                         CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1337                         "No. of received packets");
1338
1339                 SYSCTL_ADD_QUAD(ctx, node_children,
1340                         OID_AUTO, "tpa_start",
1341                         CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1342                         "No. of tpa_start packets");
1343
1344                 SYSCTL_ADD_QUAD(ctx, node_children,
1345                         OID_AUTO, "tpa_cont",
1346                         CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1347                         "No. of tpa_cont packets");
1348
1349                 SYSCTL_ADD_QUAD(ctx, node_children,
1350                         OID_AUTO, "tpa_end",
1351                         CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1352                         "No. of tpa_end packets");
1353
1354                 SYSCTL_ADD_QUAD(ctx, node_children,
1355                         OID_AUTO, "err_m_getcl",
1356                         CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1357                         "err_m_getcl");
1358
1359                 SYSCTL_ADD_QUAD(ctx, node_children,
1360                         OID_AUTO, "err_m_getjcl",
1361                         CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1362                         "err_m_getjcl");
1363
1364                 SYSCTL_ADD_QUAD(ctx, node_children,
1365                         OID_AUTO, "err_rx_hw_errors",
1366                         CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1367                         "err_rx_hw_errors");
1368
1369                 SYSCTL_ADD_QUAD(ctx, node_children,
1370                         OID_AUTO, "err_rx_alloc_errors",
1371                         CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1372                         "err_rx_alloc_errors");
1373         }
1374
1375         return;
1376 }
1377
1378 static void
1379 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1380 {
1381         struct sysctl_ctx_list  *ctx;
1382         struct sysctl_oid_list  *children;
1383         struct sysctl_oid       *ctx_oid;
1384
1385         ctx = device_get_sysctl_ctx(ha->pci_dev);
1386         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1387
1388         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1389                         CTLFLAG_RD, NULL, "hwstat");
1390         children = SYSCTL_CHILDREN(ctx_oid);
1391
1392         SYSCTL_ADD_QUAD(ctx, children,
1393                 OID_AUTO, "no_buff_discards",
1394                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1395                 "No. of packets discarded due to lack of buffer");
1396
1397         SYSCTL_ADD_QUAD(ctx, children,
1398                 OID_AUTO, "packet_too_big_discard",
1399                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1400                 "No. of packets discarded because packet was too big");
1401
1402         SYSCTL_ADD_QUAD(ctx, children,
1403                 OID_AUTO, "ttl0_discard",
1404                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1405                 "ttl0_discard");
1406
1407         SYSCTL_ADD_QUAD(ctx, children,
1408                 OID_AUTO, "rx_ucast_bytes",
1409                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1410                 "rx_ucast_bytes");
1411
1412         SYSCTL_ADD_QUAD(ctx, children,
1413                 OID_AUTO, "rx_mcast_bytes",
1414                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1415                 "rx_mcast_bytes");
1416
1417         SYSCTL_ADD_QUAD(ctx, children,
1418                 OID_AUTO, "rx_bcast_bytes",
1419                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1420                 "rx_bcast_bytes");
1421
1422         SYSCTL_ADD_QUAD(ctx, children,
1423                 OID_AUTO, "rx_ucast_pkts",
1424                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1425                 "rx_ucast_pkts");
1426
1427         SYSCTL_ADD_QUAD(ctx, children,
1428                 OID_AUTO, "rx_mcast_pkts",
1429                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1430                 "rx_mcast_pkts");
1431
1432         SYSCTL_ADD_QUAD(ctx, children,
1433                 OID_AUTO, "rx_bcast_pkts",
1434                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1435                 "rx_bcast_pkts");
1436
1437         SYSCTL_ADD_QUAD(ctx, children,
1438                 OID_AUTO, "mftag_filter_discards",
1439                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1440                 "mftag_filter_discards");
1441
1442         SYSCTL_ADD_QUAD(ctx, children,
1443                 OID_AUTO, "mac_filter_discards",
1444                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1445                 "mac_filter_discards");
1446
1447         SYSCTL_ADD_QUAD(ctx, children,
1448                 OID_AUTO, "tx_ucast_bytes",
1449                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1450                 "tx_ucast_bytes");
1451
1452         SYSCTL_ADD_QUAD(ctx, children,
1453                 OID_AUTO, "tx_mcast_bytes",
1454                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1455                 "tx_mcast_bytes");
1456
1457         SYSCTL_ADD_QUAD(ctx, children,
1458                 OID_AUTO, "tx_bcast_bytes",
1459                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1460                 "tx_bcast_bytes");
1461
1462         SYSCTL_ADD_QUAD(ctx, children,
1463                 OID_AUTO, "tx_ucast_pkts",
1464                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1465                 "tx_ucast_pkts");
1466
1467         SYSCTL_ADD_QUAD(ctx, children,
1468                 OID_AUTO, "tx_mcast_pkts",
1469                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1470                 "tx_mcast_pkts");
1471
1472         SYSCTL_ADD_QUAD(ctx, children,
1473                 OID_AUTO, "tx_bcast_pkts",
1474                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1475                 "tx_bcast_pkts");
1476
1477         SYSCTL_ADD_QUAD(ctx, children,
1478                 OID_AUTO, "tx_err_drop_pkts",
1479                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1480                 "tx_err_drop_pkts");
1481
1482         SYSCTL_ADD_QUAD(ctx, children,
1483                 OID_AUTO, "tpa_coalesced_pkts",
1484                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1485                 "tpa_coalesced_pkts");
1486
1487         SYSCTL_ADD_QUAD(ctx, children,
1488                 OID_AUTO, "tpa_coalesced_events",
1489                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1490                 "tpa_coalesced_events");
1491
1492         SYSCTL_ADD_QUAD(ctx, children,
1493                 OID_AUTO, "tpa_aborts_num",
1494                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1495                 "tpa_aborts_num");
1496
1497         SYSCTL_ADD_QUAD(ctx, children,
1498                 OID_AUTO, "tpa_not_coalesced_pkts",
1499                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1500                 "tpa_not_coalesced_pkts");
1501
1502         SYSCTL_ADD_QUAD(ctx, children,
1503                 OID_AUTO, "tpa_coalesced_bytes",
1504                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1505                 "tpa_coalesced_bytes");
1506
1507         SYSCTL_ADD_QUAD(ctx, children,
1508                 OID_AUTO, "rx_64_byte_packets",
1509                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1510                 "rx_64_byte_packets");
1511
1512         SYSCTL_ADD_QUAD(ctx, children,
1513                 OID_AUTO, "rx_65_to_127_byte_packets",
1514                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1515                 "rx_65_to_127_byte_packets");
1516
1517         SYSCTL_ADD_QUAD(ctx, children,
1518                 OID_AUTO, "rx_128_to_255_byte_packets",
1519                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1520                 "rx_128_to_255_byte_packets");
1521
1522         SYSCTL_ADD_QUAD(ctx, children,
1523                 OID_AUTO, "rx_256_to_511_byte_packets",
1524                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1525                 "rx_256_to_511_byte_packets");
1526
1527         SYSCTL_ADD_QUAD(ctx, children,
1528                 OID_AUTO, "rx_512_to_1023_byte_packets",
1529                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1530                 "rx_512_to_1023_byte_packets");
1531
1532         SYSCTL_ADD_QUAD(ctx, children,
1533                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1534                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1535                 "rx_1024_to_1518_byte_packets");
1536
1537         SYSCTL_ADD_QUAD(ctx, children,
1538                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1539                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1540                 "rx_1519_to_1522_byte_packets");
1541
1542         SYSCTL_ADD_QUAD(ctx, children,
1543                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1544                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1545                 "rx_1523_to_2047_byte_packets");
1546
1547         SYSCTL_ADD_QUAD(ctx, children,
1548                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1549                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1550                 "rx_2048_to_4095_byte_packets");
1551
1552         SYSCTL_ADD_QUAD(ctx, children,
1553                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1554                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1555                 "rx_4096_to_9216_byte_packets");
1556
1557         SYSCTL_ADD_QUAD(ctx, children,
1558                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1559                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1560                 "rx_9217_to_16383_byte_packets");
1561
1562         SYSCTL_ADD_QUAD(ctx, children,
1563                 OID_AUTO, "rx_crc_errors",
1564                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1565                 "rx_crc_errors");
1566
1567         SYSCTL_ADD_QUAD(ctx, children,
1568                 OID_AUTO, "rx_mac_crtl_frames",
1569                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1570                 "rx_mac_crtl_frames");
1571
1572         SYSCTL_ADD_QUAD(ctx, children,
1573                 OID_AUTO, "rx_pause_frames",
1574                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1575                 "rx_pause_frames");
1576
1577         SYSCTL_ADD_QUAD(ctx, children,
1578                 OID_AUTO, "rx_pfc_frames",
1579                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1580                 "rx_pfc_frames");
1581
1582         SYSCTL_ADD_QUAD(ctx, children,
1583                 OID_AUTO, "rx_align_errors",
1584                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1585                 "rx_align_errors");
1586
1587         SYSCTL_ADD_QUAD(ctx, children,
1588                 OID_AUTO, "rx_carrier_errors",
1589                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1590                 "rx_carrier_errors");
1591
1592         SYSCTL_ADD_QUAD(ctx, children,
1593                 OID_AUTO, "rx_oversize_packets",
1594                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1595                 "rx_oversize_packets");
1596
1597         SYSCTL_ADD_QUAD(ctx, children,
1598                 OID_AUTO, "rx_jabbers",
1599                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1600                 "rx_jabbers");
1601
1602         SYSCTL_ADD_QUAD(ctx, children,
1603                 OID_AUTO, "rx_undersize_packets",
1604                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1605                 "rx_undersize_packets");
1606
1607         SYSCTL_ADD_QUAD(ctx, children,
1608                 OID_AUTO, "rx_fragments",
1609                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1610                 "rx_fragments");
1611
1612         SYSCTL_ADD_QUAD(ctx, children,
1613                 OID_AUTO, "tx_64_byte_packets",
1614                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1615                 "tx_64_byte_packets");
1616
1617         SYSCTL_ADD_QUAD(ctx, children,
1618                 OID_AUTO, "tx_65_to_127_byte_packets",
1619                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1620                 "tx_65_to_127_byte_packets");
1621
1622         SYSCTL_ADD_QUAD(ctx, children,
1623                 OID_AUTO, "tx_128_to_255_byte_packets",
1624                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1625                 "tx_128_to_255_byte_packets");
1626
1627         SYSCTL_ADD_QUAD(ctx, children,
1628                 OID_AUTO, "tx_256_to_511_byte_packets",
1629                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1630                 "tx_256_to_511_byte_packets");
1631
1632         SYSCTL_ADD_QUAD(ctx, children,
1633                 OID_AUTO, "tx_512_to_1023_byte_packets",
1634                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1635                 "tx_512_to_1023_byte_packets");
1636
1637         SYSCTL_ADD_QUAD(ctx, children,
1638                 OID_AUTO, "tx_1024_to_1518_byte_packets",
1639                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1640                 "tx_1024_to_1518_byte_packets");
1641
1642         SYSCTL_ADD_QUAD(ctx, children,
1643                 OID_AUTO, "tx_1519_to_2047_byte_packets",
1644                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1645                 "tx_1519_to_2047_byte_packets");
1646
1647         SYSCTL_ADD_QUAD(ctx, children,
1648                 OID_AUTO, "tx_2048_to_4095_byte_packets",
1649                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1650                 "tx_2048_to_4095_byte_packets");
1651
1652         SYSCTL_ADD_QUAD(ctx, children,
1653                 OID_AUTO, "tx_4096_to_9216_byte_packets",
1654                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1655                 "tx_4096_to_9216_byte_packets");
1656
1657         SYSCTL_ADD_QUAD(ctx, children,
1658                 OID_AUTO, "tx_9217_to_16383_byte_packets",
1659                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1660                 "tx_9217_to_16383_byte_packets");
1661
1662         SYSCTL_ADD_QUAD(ctx, children,
1663                 OID_AUTO, "tx_pause_frames",
1664                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1665                 "tx_pause_frames");
1666
1667         SYSCTL_ADD_QUAD(ctx, children,
1668                 OID_AUTO, "tx_pfc_frames",
1669                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1670                 "tx_pfc_frames");
1671
1672         SYSCTL_ADD_QUAD(ctx, children,
1673                 OID_AUTO, "tx_lpi_entry_count",
1674                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1675                 "tx_lpi_entry_count");
1676
1677         SYSCTL_ADD_QUAD(ctx, children,
1678                 OID_AUTO, "tx_total_collisions",
1679                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1680                 "tx_total_collisions");
1681
1682         SYSCTL_ADD_QUAD(ctx, children,
1683                 OID_AUTO, "brb_truncates",
1684                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1685                 "brb_truncates");
1686
1687         SYSCTL_ADD_QUAD(ctx, children,
1688                 OID_AUTO, "brb_discards",
1689                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1690                 "brb_discards");
1691
1692         SYSCTL_ADD_QUAD(ctx, children,
1693                 OID_AUTO, "rx_mac_bytes",
1694                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1695                 "rx_mac_bytes");
1696
1697         SYSCTL_ADD_QUAD(ctx, children,
1698                 OID_AUTO, "rx_mac_uc_packets",
1699                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1700                 "rx_mac_uc_packets");
1701
1702         SYSCTL_ADD_QUAD(ctx, children,
1703                 OID_AUTO, "rx_mac_mc_packets",
1704                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1705                 "rx_mac_mc_packets");
1706
1707         SYSCTL_ADD_QUAD(ctx, children,
1708                 OID_AUTO, "rx_mac_bc_packets",
1709                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1710                 "rx_mac_bc_packets");
1711
1712         SYSCTL_ADD_QUAD(ctx, children,
1713                 OID_AUTO, "rx_mac_frames_ok",
1714                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1715                 "rx_mac_frames_ok");
1716
1717         SYSCTL_ADD_QUAD(ctx, children,
1718                 OID_AUTO, "tx_mac_bytes",
1719                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1720                 "tx_mac_bytes");
1721
1722         SYSCTL_ADD_QUAD(ctx, children,
1723                 OID_AUTO, "tx_mac_uc_packets",
1724                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1725                 "tx_mac_uc_packets");
1726
1727         SYSCTL_ADD_QUAD(ctx, children,
1728                 OID_AUTO, "tx_mac_mc_packets",
1729                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1730                 "tx_mac_mc_packets");
1731
1732         SYSCTL_ADD_QUAD(ctx, children,
1733                 OID_AUTO, "tx_mac_bc_packets",
1734                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1735                 "tx_mac_bc_packets");
1736
1737         SYSCTL_ADD_QUAD(ctx, children,
1738                 OID_AUTO, "tx_mac_ctrl_frames",
1739                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1740                 "tx_mac_ctrl_frames");
1741         return;
1742 }
1743
1744 static void
1745 qlnx_add_sysctls(qlnx_host_t *ha)
1746 {
1747         device_t                dev = ha->pci_dev;
1748         struct sysctl_ctx_list  *ctx;
1749         struct sysctl_oid_list  *children;
1750
1751         ctx = device_get_sysctl_ctx(dev);
1752         children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1753
1754         qlnx_add_fp_stats_sysctls(ha);
1755         qlnx_add_sp_stats_sysctls(ha);
1756         qlnx_add_hw_stats_sysctls(ha);
1757
1758         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1759                 CTLFLAG_RD, qlnx_ver_str, 0,
1760                 "Driver Version");
1761
1762         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1763                 CTLFLAG_RD, ha->stormfw_ver, 0,
1764                 "STORM Firmware Version");
1765
1766         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1767                 CTLFLAG_RD, ha->mfw_ver, 0,
1768                 "Management Firmware Version");
1769
1770         SYSCTL_ADD_UINT(ctx, children,
1771                 OID_AUTO, "personality", CTLFLAG_RD,
1772                 &ha->personality, ha->personality,
1773                 "\tpersonality = 0 => Ethernet Only\n"
1774                 "\tpersonality = 3 => Ethernet and RoCE\n"
1775                 "\tpersonality = 4 => Ethernet and iWARP\n"
1776                 "\tpersonality = 6 => Default in Shared Memory\n");
1777
1778         ha->dbg_level = 0;
1779         SYSCTL_ADD_UINT(ctx, children,
1780                 OID_AUTO, "debug", CTLFLAG_RW,
1781                 &ha->dbg_level, ha->dbg_level, "Debug Level");
1782
1783         ha->dp_level = 0x01;
1784         SYSCTL_ADD_UINT(ctx, children,
1785                 OID_AUTO, "dp_level", CTLFLAG_RW,
1786                 &ha->dp_level, ha->dp_level, "DP Level");
1787
1788         ha->dbg_trace_lro_cnt = 0;
1789         SYSCTL_ADD_UINT(ctx, children,
1790                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1791                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1792                 "Trace LRO Counts");
1793
1794         ha->dbg_trace_tso_pkt_len = 0;
1795         SYSCTL_ADD_UINT(ctx, children,
1796                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1797                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1798                 "Trace TSO packet lengths");
1799
1800         ha->dp_module = 0;
1801         SYSCTL_ADD_UINT(ctx, children,
1802                 OID_AUTO, "dp_module", CTLFLAG_RW,
1803                 &ha->dp_module, ha->dp_module, "DP Module");
1804
1805         ha->err_inject = 0;
1806
1807         SYSCTL_ADD_UINT(ctx, children,
1808                 OID_AUTO, "err_inject", CTLFLAG_RW,
1809                 &ha->err_inject, ha->err_inject, "Error Inject");
1810
1811         ha->storm_stats_enable = 0;
1812
1813         SYSCTL_ADD_UINT(ctx, children,
1814                 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1815                 &ha->storm_stats_enable, ha->storm_stats_enable,
1816                 "Enable Storm Statistics Gathering");
1817
1818         ha->storm_stats_index = 0;
1819
1820         SYSCTL_ADD_UINT(ctx, children,
1821                 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1822                 &ha->storm_stats_index, ha->storm_stats_index,
1823                 "Enable Storm Statistics Gathering Current Index");
1824
1825         ha->grcdump_taken = 0;
1826         SYSCTL_ADD_UINT(ctx, children,
1827                 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1828                 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1829
1830         ha->idle_chk_taken = 0;
1831         SYSCTL_ADD_UINT(ctx, children,
1832                 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1833                 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1834
1835         SYSCTL_ADD_UINT(ctx, children,
1836                 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1837                 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1838                 "rx_coalesce_usecs");
1839
1840         SYSCTL_ADD_UINT(ctx, children,
1841                 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1842                 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1843                 "tx_coalesce_usecs");
1844
1845         ha->rx_pkt_threshold = 128;
1846         SYSCTL_ADD_UINT(ctx, children,
1847                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1848                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1849                 "No. of Rx Pkts to process at a time");
1850
1851         ha->rx_jumbo_buf_eq_mtu = 0;
1852         SYSCTL_ADD_UINT(ctx, children,
1853                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1854                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1855                 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1856                 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
1857
1858         SYSCTL_ADD_PROC(ctx, children,
1859                 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1860                 (void *)ha, 0,
1861                 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1862
1863         SYSCTL_ADD_PROC(ctx, children,
1864                 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1865                 (void *)ha, 0,
1866                 qlnx_set_rx_coalesce, "I",
1867                 "rx interrupt coalesce period microseconds");
1868
1869         SYSCTL_ADD_PROC(ctx, children,
1870                 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1871                 (void *)ha, 0,
1872                 qlnx_set_tx_coalesce, "I",
1873                 "tx interrupt coalesce period microseconds");
1874
1875         SYSCTL_ADD_QUAD(ctx, children,
1876                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1877                 &ha->err_illegal_intr, "err_illegal_intr");
1878
1879         SYSCTL_ADD_QUAD(ctx, children,
1880                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
1881                 &ha->err_fp_null, "err_fp_null");
1882
1883         SYSCTL_ADD_QUAD(ctx, children,
1884                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1885                 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
1886         return;
1887 }
1888
1889
1890
1891 /*****************************************************************************
1892  * Operating System Network Interface Functions
1893  *****************************************************************************/
1894
1895 static void
1896 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
1897 {
1898         uint16_t        device_id;
1899         struct ifnet    *ifp;
1900
1901         ifp = ha->ifp = if_alloc(IFT_ETHER);
1902
1903         if (ifp == NULL)
1904                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
1905
1906         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1907
1908         device_id = pci_get_device(ha->pci_dev);
1909
1910 #if __FreeBSD_version >= 1000000
1911
1912         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 
1913                 ifp->if_baudrate = IF_Gbps(40);
1914         else if (device_id == QLOGIC_PCI_DEVICE_ID_1656)
1915                 ifp->if_baudrate = IF_Gbps(25);
1916         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
1917                 ifp->if_baudrate = IF_Gbps(50);
1918         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
1919                 ifp->if_baudrate = IF_Gbps(100);
1920
1921         ifp->if_capabilities = IFCAP_LINKSTATE;
1922 #else
1923         ifp->if_mtu = ETHERMTU;
1924         ifp->if_baudrate = (1 * 1000 * 1000 *1000);
1925
1926 #endif /* #if __FreeBSD_version >= 1000000 */
1927
1928         ifp->if_init = qlnx_init;
1929         ifp->if_softc = ha;
1930         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1931         ifp->if_ioctl = qlnx_ioctl;
1932         ifp->if_transmit = qlnx_transmit;
1933         ifp->if_qflush = qlnx_qflush;
1934
1935         IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
1936         ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
1937         IFQ_SET_READY(&ifp->if_snd);
1938
1939 #if __FreeBSD_version >= 1100036
1940         if_setgetcounterfn(ifp, qlnx_get_counter);
1941 #endif
1942
1943         ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1944
1945         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
1946         ether_ifattach(ifp, ha->primary_mac);
1947         bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
1948
1949         ifp->if_capabilities = IFCAP_HWCSUM;
1950         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1951
1952         ifp->if_capabilities |= IFCAP_VLAN_MTU;
1953         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1954         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1955         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1956         ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1957         ifp->if_capabilities |= IFCAP_TSO4;
1958         ifp->if_capabilities |= IFCAP_TSO6;
1959         ifp->if_capabilities |= IFCAP_LRO;
1960
1961         ifp->if_capenable = ifp->if_capabilities;
1962
1963         ifp->if_hwassist = CSUM_IP;
1964         ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
1965         ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
1966         ifp->if_hwassist |= CSUM_TSO;
1967
1968         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1969
1970         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
1971                 qlnx_media_status);
1972
1973         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
1974                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
1975                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
1976                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
1977         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) {
1978                 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
1979                 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
1980         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
1981                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
1982                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
1983         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
1984                 ifmedia_add(&ha->media,
1985                         (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
1986                 ifmedia_add(&ha->media,
1987                         (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
1988                 ifmedia_add(&ha->media,
1989                         (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
1990         }
1991
1992         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
1993         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
1994
1995
1996         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
1997
1998         QL_DPRINT2(ha, "exit\n");
1999
2000         return;
2001 }
2002
2003 static void
2004 qlnx_init_locked(qlnx_host_t *ha)
2005 {
2006         struct ifnet    *ifp = ha->ifp;
2007
2008         QL_DPRINT1(ha, "Driver Initialization start \n");
2009
2010         qlnx_stop(ha);
2011
2012         if (qlnx_load(ha) == 0) {
2013                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2014                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2015         }
2016
2017         return;
2018 }
2019
2020 static void
2021 qlnx_init(void *arg)
2022 {
2023         qlnx_host_t     *ha;
2024
2025         ha = (qlnx_host_t *)arg;
2026
2027         QL_DPRINT2(ha, "enter\n");
2028
2029         QLNX_LOCK(ha);
2030         qlnx_init_locked(ha);
2031         QLNX_UNLOCK(ha);
2032
2033         QL_DPRINT2(ha, "exit\n");
2034
2035         return;
2036 }
2037
2038 static int
2039 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2040 {
2041         struct ecore_filter_mcast       *mcast;
2042         struct ecore_dev                *cdev;
2043         int                             rc;
2044
2045         cdev = &ha->cdev;
2046
2047         mcast = &ha->ecore_mcast;
2048         bzero(mcast, sizeof(struct ecore_filter_mcast));
2049
2050         if (add_mac)
2051                 mcast->opcode = ECORE_FILTER_ADD;
2052         else
2053                 mcast->opcode = ECORE_FILTER_REMOVE;
2054
2055         mcast->num_mc_addrs = 1;
2056         memcpy(mcast->mac, mac_addr, ETH_ALEN);
2057
2058         rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2059
2060         return (rc);
2061 }
2062
2063 static int
2064 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2065 {
2066         int     i;
2067
2068         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2069
2070                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2071                         return 0; /* its been already added */
2072         }
2073
2074         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2075
2076                 if ((ha->mcast[i].addr[0] == 0) &&
2077                         (ha->mcast[i].addr[1] == 0) &&
2078                         (ha->mcast[i].addr[2] == 0) &&
2079                         (ha->mcast[i].addr[3] == 0) &&
2080                         (ha->mcast[i].addr[4] == 0) &&
2081                         (ha->mcast[i].addr[5] == 0)) {
2082
2083                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2084                                 return (-1);
2085
2086                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2087                         ha->nmcast++;
2088
2089                         return 0;
2090                 }
2091         }
2092         return 0;
2093 }
2094
2095 static int
2096 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2097 {
2098         int     i;
2099
2100         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2101                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2102
2103                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2104                                 return (-1);
2105
2106                         ha->mcast[i].addr[0] = 0;
2107                         ha->mcast[i].addr[1] = 0;
2108                         ha->mcast[i].addr[2] = 0;
2109                         ha->mcast[i].addr[3] = 0;
2110                         ha->mcast[i].addr[4] = 0;
2111                         ha->mcast[i].addr[5] = 0;
2112
2113                         ha->nmcast--;
2114
2115                         return 0;
2116                 }
2117         }
2118         return 0;
2119 }
2120
2121 /*
2122  * Name: qls_hw_set_multi
2123  * Function: Sets the Multicast Addresses provided the host O.S into the
2124  *      hardware (for the given interface)
2125  */
2126 static void
2127 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2128         uint32_t add_mac)
2129 {
2130         int     i;
2131
2132         for (i = 0; i < mcnt; i++) {
2133                 if (add_mac) {
2134                         if (qlnx_hw_add_mcast(ha, mta))
2135                                 break;
2136                 } else {
2137                         if (qlnx_hw_del_mcast(ha, mta))
2138                                 break;
2139                 }
2140
2141                 mta += ETHER_HDR_LEN;
2142         }
2143         return;
2144 }
2145
2146
2147 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2148 static int
2149 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2150 {
2151         uint8_t                 mta[QLNX_MCAST_ADDRS_SIZE];
2152         struct ifmultiaddr      *ifma;
2153         int                     mcnt = 0;
2154         struct ifnet            *ifp = ha->ifp;
2155         int                     ret = 0;
2156
2157         if_maddr_rlock(ifp);
2158
2159         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2160
2161                 if (ifma->ifma_addr->sa_family != AF_LINK)
2162                         continue;
2163
2164                 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2165                         break;
2166
2167                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2168                         &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2169
2170                 mcnt++;
2171         }
2172
2173         if_maddr_runlock(ifp);
2174
2175         QLNX_LOCK(ha);
2176         qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2177         QLNX_UNLOCK(ha);
2178
2179         return (ret);
2180 }
2181
2182 static int
2183 qlnx_set_promisc(qlnx_host_t *ha)
2184 {
2185         int     rc = 0;
2186         uint8_t filter;
2187
2188         filter = ha->filter;
2189         filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2190         filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2191
2192         rc = qlnx_set_rx_accept_filter(ha, filter);
2193         return (rc);
2194 }
2195
2196 static int
2197 qlnx_set_allmulti(qlnx_host_t *ha)
2198 {
2199         int     rc = 0;
2200         uint8_t filter;
2201
2202         filter = ha->filter;
2203         filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2204         rc = qlnx_set_rx_accept_filter(ha, filter);
2205
2206         return (rc);
2207 }
2208
2209
2210 static int
2211 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2212 {
2213         int             ret = 0, mask;
2214         struct ifreq    *ifr = (struct ifreq *)data;
2215         struct ifaddr   *ifa = (struct ifaddr *)data;
2216         qlnx_host_t     *ha;
2217
2218         ha = (qlnx_host_t *)ifp->if_softc;
2219
2220         switch (cmd) {
2221         case SIOCSIFADDR:
2222                 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2223
2224                 if (ifa->ifa_addr->sa_family == AF_INET) {
2225                         ifp->if_flags |= IFF_UP;
2226                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2227                                 QLNX_LOCK(ha);
2228                                 qlnx_init_locked(ha);
2229                                 QLNX_UNLOCK(ha);
2230                         }
2231                         QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2232                                    cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2233
2234                         arp_ifinit(ifp, ifa);
2235                 } else {
2236                         ether_ioctl(ifp, cmd, data);
2237                 }
2238                 break;
2239
2240         case SIOCSIFMTU:
2241                 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2242
2243                 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2244                         ret = EINVAL;
2245                 } else {
2246                         QLNX_LOCK(ha);
2247                         ifp->if_mtu = ifr->ifr_mtu;
2248                         ha->max_frame_size =
2249                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2250                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2251                                 qlnx_init_locked(ha);
2252                         }
2253
2254                         QLNX_UNLOCK(ha);
2255                 }
2256
2257                 break;
2258
2259         case SIOCSIFFLAGS:
2260                 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2261
2262                 QLNX_LOCK(ha);
2263
2264                 if (ifp->if_flags & IFF_UP) {
2265                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2266                                 if ((ifp->if_flags ^ ha->if_flags) &
2267                                         IFF_PROMISC) {
2268                                         ret = qlnx_set_promisc(ha);
2269                                 } else if ((ifp->if_flags ^ ha->if_flags) &
2270                                         IFF_ALLMULTI) {
2271                                         ret = qlnx_set_allmulti(ha);
2272                                 }
2273                         } else {
2274                                 ha->max_frame_size = ifp->if_mtu +
2275                                         ETHER_HDR_LEN + ETHER_CRC_LEN;
2276                                 qlnx_init_locked(ha);
2277                         }
2278                 } else {
2279                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2280                                 qlnx_stop(ha);
2281                         ha->if_flags = ifp->if_flags;
2282                 }
2283
2284                 QLNX_UNLOCK(ha);
2285                 break;
2286
2287         case SIOCADDMULTI:
2288                 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2289
2290                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2291                         if (qlnx_set_multi(ha, 1))
2292                                 ret = EINVAL;
2293                 }
2294                 break;
2295
2296         case SIOCDELMULTI:
2297                 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2298
2299                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2300                         if (qlnx_set_multi(ha, 0))
2301                                 ret = EINVAL;
2302                 }
2303                 break;
2304
2305         case SIOCSIFMEDIA:
2306         case SIOCGIFMEDIA:
2307                 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2308
2309                 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2310                 break;
2311
2312         case SIOCSIFCAP:
2313                 
2314                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2315
2316                 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2317
2318                 if (mask & IFCAP_HWCSUM)
2319                         ifp->if_capenable ^= IFCAP_HWCSUM;
2320                 if (mask & IFCAP_TSO4)
2321                         ifp->if_capenable ^= IFCAP_TSO4;
2322                 if (mask & IFCAP_TSO6)
2323                         ifp->if_capenable ^= IFCAP_TSO6;
2324                 if (mask & IFCAP_VLAN_HWTAGGING)
2325                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2326                 if (mask & IFCAP_VLAN_HWTSO)
2327                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2328                 if (mask & IFCAP_LRO)
2329                         ifp->if_capenable ^= IFCAP_LRO;
2330
2331                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2332                         qlnx_init(ha);
2333
2334                 VLAN_CAPABILITIES(ifp);
2335                 break;
2336
2337 #if (__FreeBSD_version >= 1100101)
2338
2339         case SIOCGI2C:
2340         {
2341                 struct ifi2creq i2c;
2342                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2343                 struct ecore_ptt *p_ptt;
2344
2345                 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2346
2347                 if (ret)
2348                         break;
2349
2350                 if ((i2c.len > sizeof (i2c.data)) ||
2351                         (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2352                         ret = EINVAL;
2353                         break;
2354                 }
2355
2356                 p_ptt = ecore_ptt_acquire(p_hwfn);
2357
2358                 if (!p_ptt) {
2359                         QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2360                         ret = -1;
2361                         break;
2362                 }
2363
2364                 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2365                         (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2366                         i2c.len, &i2c.data[0]);
2367
2368                 ecore_ptt_release(p_hwfn, p_ptt);
2369
2370                 if (ret) {
2371                         ret = -1;
2372                         break;
2373                 }
2374
2375                 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2376
2377                 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2378                          len = %d addr = 0x%02x offset = 0x%04x \
2379                          data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2380                          0x%02x 0x%02x 0x%02x\n",
2381                         ret, i2c.len, i2c.dev_addr, i2c.offset,
2382                         i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2383                         i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2384                 break;
2385         }
2386 #endif /* #if (__FreeBSD_version >= 1100101) */
2387
2388         default:
2389                 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2390                 ret = ether_ioctl(ifp, cmd, data);
2391                 break;
2392         }
2393
2394         return (ret);
2395 }
2396
2397 static int
2398 qlnx_media_change(struct ifnet *ifp)
2399 {
2400         qlnx_host_t     *ha;
2401         struct ifmedia  *ifm;
2402         int             ret = 0;
2403
2404         ha = (qlnx_host_t *)ifp->if_softc;
2405
2406         QL_DPRINT2(ha, "enter\n");
2407
2408         ifm = &ha->media;
2409
2410         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2411                 ret = EINVAL;
2412
2413         QL_DPRINT2(ha, "exit\n");
2414
2415         return (ret);
2416 }
2417
2418 static void
2419 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2420 {
2421         qlnx_host_t             *ha;
2422
2423         ha = (qlnx_host_t *)ifp->if_softc;
2424
2425         QL_DPRINT2(ha, "enter\n");
2426
2427         ifmr->ifm_status = IFM_AVALID;
2428         ifmr->ifm_active = IFM_ETHER;
2429
2430         if (ha->link_up) {
2431                 ifmr->ifm_status |= IFM_ACTIVE;
2432                 ifmr->ifm_active |=
2433                         (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2434
2435                 if (ha->if_link.link_partner_caps &
2436                         (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2437                         ifmr->ifm_active |=
2438                                 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2439         }
2440
2441         QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2442
2443         return;
2444 }
2445
2446
2447 static void
2448 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2449         struct qlnx_tx_queue *txq)
2450 {
2451         u16                     idx;
2452         struct mbuf             *mp;
2453         bus_dmamap_t            map;
2454         int                     i;
2455         struct eth_tx_bd        *tx_data_bd;
2456         struct eth_tx_1st_bd    *first_bd;
2457         int                     nbds = 0;
2458
2459         idx = txq->sw_tx_cons;
2460         mp = txq->sw_tx_ring[idx].mp;
2461         map = txq->sw_tx_ring[idx].map;
2462
2463         if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2464
2465                 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2466
2467                 QL_DPRINT1(ha, "(mp == NULL) "
2468                         " tx_idx = 0x%x"
2469                         " ecore_prod_idx = 0x%x"
2470                         " ecore_cons_idx = 0x%x"
2471                         " hw_bd_cons = 0x%x"
2472                         " txq_db_last = 0x%x"
2473                         " elem_left = 0x%x\n",
2474                         fp->rss_id,
2475                         ecore_chain_get_prod_idx(&txq->tx_pbl),
2476                         ecore_chain_get_cons_idx(&txq->tx_pbl),
2477                         le16toh(*txq->hw_cons_ptr),
2478                         txq->tx_db.raw,
2479                         ecore_chain_get_elem_left(&txq->tx_pbl));
2480
2481                 fp->err_tx_free_pkt_null++;
2482
2483                 //DEBUG
2484                 qlnx_trigger_dump(ha);
2485
2486                 return;
2487         } else {
2488
2489                 QLNX_INC_OPACKETS((ha->ifp));
2490                 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2491
2492                 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2493                 bus_dmamap_unload(ha->tx_tag, map);
2494
2495                 fp->tx_pkts_freed++;
2496                 fp->tx_pkts_completed++;
2497
2498                 m_freem(mp);
2499         }
2500
2501         first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2502         nbds = first_bd->data.nbds;
2503
2504 //      BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2505
2506         for (i = 1; i < nbds; i++) {
2507                 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2508 //              BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2509         }
2510         txq->sw_tx_ring[idx].flags = 0;
2511         txq->sw_tx_ring[idx].mp = NULL;
2512         txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2513
2514         return;
2515 }
2516
2517 static void
2518 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2519         struct qlnx_tx_queue *txq)
2520 {
2521         u16 hw_bd_cons;
2522         u16 ecore_cons_idx;
2523         uint16_t diff;
2524
2525         hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2526
2527         while (hw_bd_cons !=
2528                 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2529
2530                 if (hw_bd_cons < ecore_cons_idx) {
2531                         diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2532                 } else {
2533                         diff = hw_bd_cons - ecore_cons_idx;
2534                 }
2535                 if ((diff > TX_RING_SIZE) ||
2536                         QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2537
2538                         QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2539
2540                         QL_DPRINT1(ha, "(diff = 0x%x) "
2541                                 " tx_idx = 0x%x"
2542                                 " ecore_prod_idx = 0x%x"
2543                                 " ecore_cons_idx = 0x%x"
2544                                 " hw_bd_cons = 0x%x"
2545                                 " txq_db_last = 0x%x"
2546                                 " elem_left = 0x%x\n",
2547                                 diff,
2548                                 fp->rss_id,
2549                                 ecore_chain_get_prod_idx(&txq->tx_pbl),
2550                                 ecore_chain_get_cons_idx(&txq->tx_pbl),
2551                                 le16toh(*txq->hw_cons_ptr),
2552                                 txq->tx_db.raw,
2553                                 ecore_chain_get_elem_left(&txq->tx_pbl));
2554
2555                         fp->err_tx_cons_idx_conflict++;
2556
2557                         //DEBUG
2558                         qlnx_trigger_dump(ha);
2559                 }
2560
2561                 qlnx_free_tx_pkt(ha, fp, txq);
2562
2563                 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2564         }
2565         return;
2566 }
2567
2568 static int
2569 qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp)
2570 {
2571         qlnx_host_t             *ha = (qlnx_host_t *)ifp->if_softc;
2572         struct qlnx_fastpath    *fp;
2573         int                     rss_id = 0, ret = 0;
2574
2575         QL_DPRINT2(ha, "enter\n");
2576
2577 #if __FreeBSD_version >= 1100000
2578         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2579 #else
2580         if (mp->m_flags & M_FLOWID)
2581 #endif
2582                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2583                                         ha->num_rss;
2584
2585         fp = &ha->fp_array[rss_id];
2586
2587         if (fp->tx_br == NULL) {
2588                 ret = EINVAL;
2589                 goto qlnx_transmit_exit;
2590         }
2591
2592         if (mp != NULL) {
2593                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2594         }
2595
2596         if (fp->fp_taskqueue != NULL)
2597                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2598
2599         ret = 0;
2600
2601 qlnx_transmit_exit:
2602
2603         QL_DPRINT2(ha, "exit ret = %d\n", ret);
2604         return ret;
2605 }
2606
2607 static void
2608 qlnx_qflush(struct ifnet *ifp)
2609 {
2610         int                     rss_id;
2611         struct qlnx_fastpath    *fp;
2612         struct mbuf             *mp;
2613         qlnx_host_t             *ha;
2614
2615         ha = (qlnx_host_t *)ifp->if_softc;
2616
2617         QL_DPRINT2(ha, "enter\n");
2618
2619         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2620
2621                 fp = &ha->fp_array[rss_id];
2622
2623                 if (fp == NULL)
2624                         continue;
2625
2626                 if (fp->tx_br) {
2627                         mtx_lock(&fp->tx_mtx);
2628
2629                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 
2630                                 fp->tx_pkts_freed++;
2631                                 m_freem(mp);                    
2632                         }
2633                         mtx_unlock(&fp->tx_mtx);
2634                 }
2635         }
2636         QL_DPRINT2(ha, "exit\n");
2637
2638         return;
2639 }
2640
2641 static void
2642 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2643 {
2644         struct ecore_dev        *cdev;
2645         uint32_t                offset;
2646
2647         cdev = &ha->cdev;
2648                 
2649         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2650
2651         bus_write_4(ha->pci_dbells, offset, value);
2652         bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
2653         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
2654
2655         return;
2656 }
2657
2658 static uint32_t
2659 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2660 {
2661         struct ether_vlan_header        *eh = NULL;
2662         struct ip                       *ip = NULL;
2663         struct ip6_hdr                  *ip6 = NULL;
2664         struct tcphdr                   *th = NULL;
2665         uint32_t                        ehdrlen = 0, ip_hlen = 0, offset = 0;
2666         uint16_t                        etype = 0;
2667         device_t                        dev;
2668         uint8_t                         buf[sizeof(struct ip6_hdr)];
2669
2670         dev = ha->pci_dev;
2671
2672         eh = mtod(mp, struct ether_vlan_header *);
2673
2674         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2675                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2676                 etype = ntohs(eh->evl_proto);
2677         } else {
2678                 ehdrlen = ETHER_HDR_LEN;
2679                 etype = ntohs(eh->evl_encap_proto);
2680         }
2681
2682         switch (etype) {
2683
2684                 case ETHERTYPE_IP:
2685                         ip = (struct ip *)(mp->m_data + ehdrlen);
2686
2687                         ip_hlen = sizeof (struct ip);
2688
2689                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2690                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2691                                 ip = (struct ip *)buf;
2692                         }
2693
2694                         th = (struct tcphdr *)(ip + 1);
2695                         offset = ip_hlen + ehdrlen + (th->th_off << 2);
2696                 break;
2697
2698                 case ETHERTYPE_IPV6:
2699                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2700
2701                         ip_hlen = sizeof(struct ip6_hdr);
2702
2703                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2704                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2705                                         buf);
2706                                 ip6 = (struct ip6_hdr *)buf;
2707                         }
2708                         th = (struct tcphdr *)(ip6 + 1);
2709                         offset = ip_hlen + ehdrlen + (th->th_off << 2);
2710                 break;
2711
2712                 default:
2713                 break;
2714         }
2715
2716         return (offset);
2717 }
2718
2719 static __inline int
2720 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2721         uint32_t offset)
2722 {
2723         int                     i;
2724         uint32_t                sum, nbds_in_hdr = 1;
2725         bus_dma_segment_t       *t_segs = segs;
2726
2727         /* count the number of segments spanned by TCP header */
2728
2729         i = 0;
2730         while ((i < nsegs) && (offset > t_segs->ds_len)) {
2731                 nbds_in_hdr++;
2732                 offset = offset - t_segs->ds_len;
2733                 t_segs++;
2734                 i++;
2735         }
2736
2737         while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2738
2739                 sum = 0;
2740
2741                 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2742                         sum += segs->ds_len;
2743                         segs++;
2744                 }
2745
2746                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2747                         fp->tx_lso_wnd_min_len++;
2748                         return (-1);
2749                 }
2750
2751                 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2752         }
2753
2754         return (0);
2755 }
2756
2757 static int
2758 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2759 {
2760         bus_dma_segment_t       *segs;
2761         bus_dmamap_t            map = 0;
2762         uint32_t                nsegs = 0;
2763         int                     ret = -1;
2764         struct mbuf             *m_head = *m_headp;
2765         uint16_t                idx = 0;
2766         uint16_t                elem_left;
2767
2768         uint8_t                 nbd = 0;
2769         struct qlnx_tx_queue    *txq;
2770
2771         struct eth_tx_1st_bd    *first_bd;
2772         struct eth_tx_2nd_bd    *second_bd;
2773         struct eth_tx_3rd_bd    *third_bd;
2774         struct eth_tx_bd        *tx_data_bd;
2775
2776         int                     seg_idx = 0;
2777         uint32_t                nbds_in_hdr = 0;
2778         uint32_t                offset = 0;
2779
2780         QL_DPRINT8(ha, "enter\n");
2781
2782         if (!ha->link_up)
2783                 return (-1);
2784
2785         first_bd        = NULL;
2786         second_bd       = NULL;
2787         third_bd        = NULL;
2788         tx_data_bd      = NULL;
2789
2790         txq = fp->txq[0];
2791
2792         if (fp->tx_ring_full) {
2793                 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
2794
2795                 if (elem_left < (TX_RING_SIZE >> 4)) 
2796                         return (-1);
2797                 else 
2798                         fp->tx_ring_full = 0;
2799         }
2800
2801         idx = txq->sw_tx_prod;
2802
2803         map = txq->sw_tx_ring[idx].map;
2804         segs = txq->segs;
2805
2806         ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
2807                         BUS_DMA_NOWAIT);
2808
2809         if (ha->dbg_trace_tso_pkt_len) {
2810                 if (!fp->tx_tso_min_pkt_len) {
2811                         fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2812                         fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2813                 } else {
2814                         if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
2815                                 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2816                         if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
2817                                 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len;
2818                 }
2819         }
2820
2821         if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2822                 offset = qlnx_tcp_offset(ha, m_head);
2823
2824         if ((ret == EFBIG) ||
2825                 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2826                         (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2827                 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2828                         qlnx_tso_check(fp, segs, nsegs, offset))))) {
2829
2830                 struct mbuf *m;
2831
2832                 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
2833
2834                 fp->tx_defrag++;
2835
2836                 m = m_defrag(m_head, M_NOWAIT);
2837                 if (m == NULL) {
2838                         fp->err_tx_defrag++;
2839                         fp->tx_pkts_freed++;
2840                         m_freem(m_head);
2841                         *m_headp = NULL;
2842                         QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
2843                         return (ENOBUFS);
2844                 }
2845
2846                 m_head = m;
2847                 *m_headp = m_head;
2848
2849                 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2850                                 segs, &nsegs, BUS_DMA_NOWAIT))) {
2851
2852                         fp->err_tx_defrag_dmamap_load++;
2853
2854                         QL_DPRINT1(ha,
2855                                 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
2856                                 ret, m_head->m_pkthdr.len);
2857
2858                         fp->tx_pkts_freed++;
2859                         m_freem(m_head);
2860                         *m_headp = NULL;
2861
2862                         return (ret);
2863                 }
2864
2865                 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2866                         !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2867
2868                         fp->err_tx_non_tso_max_seg++;
2869
2870                         QL_DPRINT1(ha,
2871                                 "(%d) nsegs too many for non-TSO [%d, %d]\n",
2872                                 ret, nsegs, m_head->m_pkthdr.len);
2873
2874                         fp->tx_pkts_freed++;
2875                         m_freem(m_head);
2876                         *m_headp = NULL;
2877
2878                         return (ret);
2879                 }
2880                 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2881                         offset = qlnx_tcp_offset(ha, m_head);
2882
2883         } else if (ret) {
2884
2885                 fp->err_tx_dmamap_load++;
2886
2887                 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
2888                            ret, m_head->m_pkthdr.len);
2889                 fp->tx_pkts_freed++;
2890                 m_freem(m_head);
2891                 *m_headp = NULL;
2892                 return (ret);
2893         }
2894
2895         QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2896
2897         if (ha->dbg_trace_tso_pkt_len) {
2898                 if (nsegs < QLNX_FP_MAX_SEGS)
2899                         fp->tx_pkts[(nsegs - 1)]++;
2900                 else
2901                         fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 
2902         }
2903
2904         if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2905                 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2906
2907                 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
2908                         " in chain[%d] trying to free packets\n",
2909                         nsegs, elem_left, fp->rss_id);
2910
2911                 fp->tx_nsegs_gt_elem_left++;
2912
2913                 (void)qlnx_tx_int(ha, fp, txq);
2914
2915                 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2916                         ecore_chain_get_elem_left(&txq->tx_pbl))) {
2917
2918                         QL_DPRINT1(ha,
2919                                 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
2920                                 nsegs, elem_left, fp->rss_id);
2921
2922                         fp->err_tx_nsegs_gt_elem_left++;
2923                         fp->tx_ring_full = 1;
2924                         ha->storm_stats_enable = 1;
2925                         return (ENOBUFS);
2926                 }
2927         }
2928
2929         bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
2930
2931         txq->sw_tx_ring[idx].mp = m_head;
2932
2933         first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2934
2935         memset(first_bd, 0, sizeof(*first_bd));
2936
2937         first_bd->data.bd_flags.bitfields =
2938                 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2939
2940         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
2941
2942         nbd++;
2943
2944         if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
2945                 first_bd->data.bd_flags.bitfields |=
2946                         (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2947         }
2948
2949         if (m_head->m_pkthdr.csum_flags &
2950                 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
2951                 first_bd->data.bd_flags.bitfields |=
2952                         (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
2953         }
2954
2955         if (m_head->m_flags & M_VLANTAG) {
2956                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
2957                 first_bd->data.bd_flags.bitfields |=
2958                         (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
2959         }
2960
2961         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2962
2963                 first_bd->data.bd_flags.bitfields |=
2964                         (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
2965                 first_bd->data.bd_flags.bitfields |=
2966                         (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2967
2968                 nbds_in_hdr = 1;
2969
2970                 if (offset == segs->ds_len) {
2971                         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2972                         segs++;
2973                         seg_idx++;
2974
2975                         second_bd = (struct eth_tx_2nd_bd *)
2976                                         ecore_chain_produce(&txq->tx_pbl);
2977                         memset(second_bd, 0, sizeof(*second_bd));
2978                         nbd++;
2979
2980                         if (seg_idx < nsegs) {
2981                                 BD_SET_UNMAP_ADDR_LEN(second_bd, \
2982                                         (segs->ds_addr), (segs->ds_len));
2983                                 segs++;
2984                                 seg_idx++;
2985                         }
2986
2987                         third_bd = (struct eth_tx_3rd_bd *)
2988                                         ecore_chain_produce(&txq->tx_pbl);
2989                         memset(third_bd, 0, sizeof(*third_bd));
2990                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
2991                         third_bd->data.bitfields |=
2992                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2993                         nbd++;
2994
2995                         if (seg_idx < nsegs) {
2996                                 BD_SET_UNMAP_ADDR_LEN(third_bd, \
2997                                         (segs->ds_addr), (segs->ds_len));
2998                                 segs++;
2999                                 seg_idx++;
3000                         }
3001
3002                         for (; seg_idx < nsegs; seg_idx++) {
3003                                 tx_data_bd = (struct eth_tx_bd *)
3004                                         ecore_chain_produce(&txq->tx_pbl);
3005                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3006                                 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3007                                         segs->ds_addr,\
3008                                         segs->ds_len);
3009                                 segs++;
3010                                 nbd++;
3011                         }
3012
3013                 } else if (offset < segs->ds_len) {
3014                         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3015
3016                         second_bd = (struct eth_tx_2nd_bd *)
3017                                         ecore_chain_produce(&txq->tx_pbl);
3018                         memset(second_bd, 0, sizeof(*second_bd));
3019                         BD_SET_UNMAP_ADDR_LEN(second_bd, \
3020                                 (segs->ds_addr + offset),\
3021                                 (segs->ds_len - offset));
3022                         nbd++;
3023                         segs++;
3024
3025                         third_bd = (struct eth_tx_3rd_bd *)
3026                                         ecore_chain_produce(&txq->tx_pbl);
3027                         memset(third_bd, 0, sizeof(*third_bd));
3028
3029                         BD_SET_UNMAP_ADDR_LEN(third_bd, \
3030                                         segs->ds_addr,\
3031                                         segs->ds_len);
3032                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3033                         third_bd->data.bitfields |=
3034                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3035                         segs++;
3036                         nbd++;
3037
3038                         for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3039                                 tx_data_bd = (struct eth_tx_bd *)
3040                                         ecore_chain_produce(&txq->tx_pbl);
3041                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3042                                 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3043                                         segs->ds_addr,\
3044                                         segs->ds_len);
3045                                 segs++;
3046                                 nbd++;
3047                         }
3048
3049                 } else {
3050                         offset = offset - segs->ds_len;
3051                         segs++;
3052
3053                         for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3054
3055                                 if (offset)
3056                                         nbds_in_hdr++;
3057
3058                                 tx_data_bd = (struct eth_tx_bd *)
3059                                         ecore_chain_produce(&txq->tx_pbl);
3060                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3061
3062                                 if (second_bd == NULL) {
3063                                         second_bd = (struct eth_tx_2nd_bd *)
3064                                                                 tx_data_bd;
3065                                 } else if (third_bd == NULL) {
3066                                         third_bd = (struct eth_tx_3rd_bd *)
3067                                                                 tx_data_bd;
3068                                 }
3069                                 
3070                                 if (offset && (offset < segs->ds_len)) {
3071                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3072                                                 segs->ds_addr, offset);
3073
3074                                         tx_data_bd = (struct eth_tx_bd *)
3075                                         ecore_chain_produce(&txq->tx_pbl);
3076
3077                                         memset(tx_data_bd, 0,
3078                                                 sizeof(*tx_data_bd));
3079
3080                                         if (second_bd == NULL) {
3081                                                 second_bd =
3082                                         (struct eth_tx_2nd_bd *)tx_data_bd;
3083                                         } else if (third_bd == NULL) {
3084                                                 third_bd =
3085                                         (struct eth_tx_3rd_bd *)tx_data_bd;
3086                                         }
3087                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3088                                                 (segs->ds_addr + offset), \
3089                                                 (segs->ds_len - offset));
3090                                         nbd++;
3091                                         offset = 0;
3092                                 } else {
3093                                         if (offset)
3094                                                 offset = offset - segs->ds_len;
3095                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3096                                                 segs->ds_addr, segs->ds_len);
3097                                 }
3098                                 segs++;
3099                                 nbd++;
3100                         }
3101
3102                         if (third_bd == NULL) {
3103                                 third_bd = (struct eth_tx_3rd_bd *)
3104                                         ecore_chain_produce(&txq->tx_pbl);
3105                                 memset(third_bd, 0, sizeof(*third_bd));
3106                         }
3107
3108                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3109                         third_bd->data.bitfields |=
3110                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3111                 }
3112         } else {
3113                 segs++;
3114                 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3115                         tx_data_bd = (struct eth_tx_bd *)
3116                                         ecore_chain_produce(&txq->tx_pbl);
3117                         memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3118                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3119                                 segs->ds_len);
3120                         segs++;
3121                         nbd++;
3122                 }
3123                 first_bd->data.bitfields =
3124                         (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3125                                  << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3126                 first_bd->data.bitfields =
3127                         htole16(first_bd->data.bitfields);
3128         }
3129
3130
3131         first_bd->data.nbds = nbd;
3132
3133         if (ha->dbg_trace_tso_pkt_len) {
3134                 if (fp->tx_tso_max_nsegs < nsegs)
3135                         fp->tx_tso_max_nsegs = nsegs;
3136
3137                 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3138                         fp->tx_tso_min_nsegs = nsegs;
3139         }
3140
3141         txq->sw_tx_ring[idx].nsegs = nsegs;
3142         txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3143
3144         txq->tx_db.data.bd_prod =
3145                 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3146
3147         qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3148    
3149         QL_DPRINT8(ha, "exit\n");
3150         return (0);
3151 }
3152
3153 static void
3154 qlnx_stop(qlnx_host_t *ha)
3155 {
3156         struct ifnet    *ifp = ha->ifp;
3157         device_t        dev;
3158         int             i;
3159
3160         dev = ha->pci_dev;
3161
3162         ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3163
3164         /*
3165          * We simply lock and unlock each fp->tx_mtx to
3166          * propagate the if_drv_flags
3167          * state to each tx thread
3168          */
3169         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3170
3171         if (ha->state == QLNX_STATE_OPEN) {
3172                 for (i = 0; i < ha->num_rss; i++) {
3173                         struct qlnx_fastpath *fp = &ha->fp_array[i];
3174
3175                         mtx_lock(&fp->tx_mtx);
3176                         mtx_unlock(&fp->tx_mtx);
3177
3178                         if (fp->fp_taskqueue != NULL)
3179                                 taskqueue_enqueue(fp->fp_taskqueue,
3180                                         &fp->fp_task);
3181                 }
3182         }
3183
3184         qlnx_unload(ha);
3185
3186         return;
3187 }
3188
3189 static int
3190 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3191 {
3192         return(TX_RING_SIZE - 1);
3193 }
3194
3195 uint8_t *
3196 qlnx_get_mac_addr(qlnx_host_t *ha)
3197 {
3198         struct ecore_hwfn       *p_hwfn;
3199
3200         p_hwfn = &ha->cdev.hwfns[0];
3201         return (p_hwfn->hw_info.hw_mac_addr);
3202 }
3203
3204 static uint32_t
3205 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3206 {
3207         uint32_t        ifm_type = 0;
3208
3209         switch (if_link->media_type) {
3210
3211         case MEDIA_MODULE_FIBER:
3212         case MEDIA_UNSPECIFIED:
3213                 if (if_link->speed == (100 * 1000))
3214                         ifm_type = QLNX_IFM_100G_SR4;
3215                 else if (if_link->speed == (40 * 1000))
3216                         ifm_type = IFM_40G_SR4;
3217                 else if (if_link->speed == (25 * 1000))
3218                         ifm_type = QLNX_IFM_25G_SR;
3219                 break;
3220
3221         case MEDIA_DA_TWINAX:
3222                 if (if_link->speed == (100 * 1000))
3223                         ifm_type = QLNX_IFM_100G_CR4;
3224                 else if (if_link->speed == (40 * 1000))
3225                         ifm_type = IFM_40G_CR4;
3226                 else if (if_link->speed == (25 * 1000))
3227                         ifm_type = QLNX_IFM_25G_CR;
3228                 break;
3229
3230         default :
3231                 ifm_type = IFM_UNKNOWN;
3232                 break;
3233         }
3234         return (ifm_type);
3235 }
3236
3237
3238
3239 /*****************************************************************************
3240  * Interrupt Service Functions
3241  *****************************************************************************/
3242
3243 static int
3244 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3245         struct mbuf *mp_head, uint16_t len)
3246 {
3247         struct mbuf             *mp, *mpf, *mpl;
3248         struct sw_rx_data       *sw_rx_data;
3249         struct qlnx_rx_queue    *rxq;
3250         uint16_t                len_in_buffer;
3251
3252         rxq = fp->rxq;
3253         mpf = mpl = mp = NULL;
3254
3255         while (len) {
3256
3257                 rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3258
3259                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3260                 mp = sw_rx_data->data;
3261
3262                 if (mp == NULL) {
3263                         QL_DPRINT1(ha, "mp = NULL\n");
3264                         fp->err_rx_mp_null++;
3265                         rxq->sw_rx_cons  =
3266                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3267
3268                         if (mpf != NULL)
3269                                 m_freem(mpf);
3270
3271                         return (-1);
3272                 }
3273                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3274                         BUS_DMASYNC_POSTREAD);
3275
3276                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3277
3278                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3279                                 " incoming packet and reusing its buffer\n");
3280
3281                         qlnx_reuse_rx_data(rxq);
3282                         fp->err_rx_alloc_errors++;
3283
3284                         if (mpf != NULL)
3285                                 m_freem(mpf);
3286
3287                         return (-1);
3288                 }
3289                 ecore_chain_consume(&rxq->rx_bd_ring);
3290
3291                 if (len > rxq->rx_buf_size)
3292                         len_in_buffer = rxq->rx_buf_size;
3293                 else
3294                         len_in_buffer = len;
3295
3296                 len = len - len_in_buffer;
3297
3298                 mp->m_flags &= ~M_PKTHDR;
3299                 mp->m_next = NULL;
3300                 mp->m_len = len_in_buffer;
3301
3302                 if (mpf == NULL)
3303                         mpf = mpl = mp;
3304                 else {
3305                         mpl->m_next = mp;
3306                         mpl = mp;
3307                 }
3308         }
3309
3310         if (mpf != NULL)
3311                 mp_head->m_next = mpf;
3312
3313         return (0);
3314 }
3315
3316 static void
3317 qlnx_tpa_start(qlnx_host_t *ha,
3318         struct qlnx_fastpath *fp,
3319         struct qlnx_rx_queue *rxq,
3320         struct eth_fast_path_rx_tpa_start_cqe *cqe)
3321 {
3322         uint32_t                agg_index;
3323         struct ifnet            *ifp = ha->ifp;
3324         struct mbuf             *mp;
3325         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3326         struct sw_rx_data       *sw_rx_data;
3327         dma_addr_t              addr;
3328         bus_dmamap_t            map;
3329         struct eth_rx_bd        *rx_bd;
3330         int                     i;
3331         device_t                dev;
3332 #if __FreeBSD_version >= 1100000
3333         uint8_t                 hash_type;
3334 #endif /* #if __FreeBSD_version >= 1100000 */
3335
3336         dev = ha->pci_dev;
3337         agg_index = cqe->tpa_agg_index;
3338
3339         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3340                 \t type = 0x%x\n \
3341                 \t bitfields = 0x%x\n \
3342                 \t seg_len = 0x%x\n \
3343                 \t pars_flags = 0x%x\n \
3344                 \t vlan_tag = 0x%x\n \
3345                 \t rss_hash = 0x%x\n \
3346                 \t len_on_first_bd = 0x%x\n \
3347                 \t placement_offset = 0x%x\n \
3348                 \t tpa_agg_index = 0x%x\n \
3349                 \t header_len = 0x%x\n \
3350                 \t ext_bd_len_list[0] = 0x%x\n \
3351                 \t ext_bd_len_list[1] = 0x%x\n \
3352                 \t ext_bd_len_list[2] = 0x%x\n \
3353                 \t ext_bd_len_list[3] = 0x%x\n \
3354                 \t ext_bd_len_list[4] = 0x%x\n",
3355                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3356                 cqe->pars_flags.flags, cqe->vlan_tag,
3357                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3358                 cqe->tpa_agg_index, cqe->header_len,
3359                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3360                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3361                 cqe->ext_bd_len_list[4]);
3362
3363         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3364                 fp->err_rx_tpa_invalid_agg_num++;
3365                 return;
3366         }
3367
3368         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3369         bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3370         mp = sw_rx_data->data;
3371
3372         QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3373
3374         if (mp == NULL) {
3375                 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3376                 fp->err_rx_mp_null++;
3377                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3378
3379                 return;
3380         }
3381
3382         if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3383
3384                 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3385                         " flags = %x, dropping incoming packet\n", fp->rss_id,
3386                         rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3387
3388                 fp->err_rx_hw_errors++;
3389
3390                 qlnx_reuse_rx_data(rxq);
3391
3392                 QLNX_INC_IERRORS(ifp);
3393
3394                 return;
3395         }
3396
3397         if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3398
3399                 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3400                         " dropping incoming packet and reusing its buffer\n",
3401                         fp->rss_id);
3402
3403                 fp->err_rx_alloc_errors++;
3404                 QLNX_INC_IQDROPS(ifp);
3405
3406                 /*
3407                  * Load the tpa mbuf into the rx ring and save the 
3408                  * posted mbuf
3409                  */
3410
3411                 map = sw_rx_data->map;
3412                 addr = sw_rx_data->dma_addr;
3413
3414                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3415
3416                 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3417                 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3418                 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3419
3420                 rxq->tpa_info[agg_index].rx_buf.data = mp;
3421                 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3422                 rxq->tpa_info[agg_index].rx_buf.map = map;
3423
3424                 rx_bd = (struct eth_rx_bd *)
3425                                 ecore_chain_produce(&rxq->rx_bd_ring);
3426
3427                 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3428                 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3429
3430                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3431                         BUS_DMASYNC_PREREAD);
3432
3433                 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3434                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3435
3436                 ecore_chain_consume(&rxq->rx_bd_ring);
3437
3438                 /* Now reuse any buffers posted in ext_bd_len_list */
3439                 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3440
3441                         if (cqe->ext_bd_len_list[i] == 0)
3442                                 break;
3443
3444                         qlnx_reuse_rx_data(rxq);
3445                 }
3446
3447                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3448                 return;
3449         }
3450
3451         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3452
3453                 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3454                         " dropping incoming packet and reusing its buffer\n",
3455                         fp->rss_id);
3456
3457                 QLNX_INC_IQDROPS(ifp);
3458
3459                 /* if we already have mbuf head in aggregation free it */
3460                 if (rxq->tpa_info[agg_index].mpf) {
3461                         m_freem(rxq->tpa_info[agg_index].mpf);
3462                         rxq->tpa_info[agg_index].mpl = NULL;
3463                 }
3464                 rxq->tpa_info[agg_index].mpf = mp;
3465                 rxq->tpa_info[agg_index].mpl = NULL;
3466
3467                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3468                 ecore_chain_consume(&rxq->rx_bd_ring);
3469
3470                 /* Now reuse any buffers posted in ext_bd_len_list */
3471                 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3472
3473                         if (cqe->ext_bd_len_list[i] == 0)
3474                                 break;
3475
3476                         qlnx_reuse_rx_data(rxq);
3477                 }
3478                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3479
3480                 return;
3481         }
3482
3483         /*
3484          * first process the ext_bd_len_list 
3485          * if this fails then we simply drop the packet
3486          */
3487         ecore_chain_consume(&rxq->rx_bd_ring);
3488         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3489
3490         for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3491
3492                 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3493
3494                 if (cqe->ext_bd_len_list[i] == 0)
3495                         break;
3496
3497                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3498                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3499                         BUS_DMASYNC_POSTREAD);
3500
3501                 mpc = sw_rx_data->data;
3502
3503                 if (mpc == NULL) {
3504                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3505                         fp->err_rx_mp_null++;
3506                         if (mpf != NULL)
3507                                 m_freem(mpf);
3508                         mpf = mpl = NULL;
3509                         rxq->tpa_info[agg_index].agg_state =
3510                                                 QLNX_AGG_STATE_ERROR;
3511                         ecore_chain_consume(&rxq->rx_bd_ring);
3512                         rxq->sw_rx_cons =
3513                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3514                         continue;
3515                 }
3516
3517                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3518                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3519                                 " dropping incoming packet and reusing its"
3520                                 " buffer\n", fp->rss_id);
3521
3522                         qlnx_reuse_rx_data(rxq);
3523
3524                         if (mpf != NULL)
3525                                 m_freem(mpf);
3526                         mpf = mpl = NULL;
3527
3528                         rxq->tpa_info[agg_index].agg_state =
3529                                                 QLNX_AGG_STATE_ERROR;
3530
3531                         ecore_chain_consume(&rxq->rx_bd_ring);
3532                         rxq->sw_rx_cons =
3533                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3534
3535                         continue;
3536                 }
3537
3538                 mpc->m_flags &= ~M_PKTHDR;
3539                 mpc->m_next = NULL;
3540                 mpc->m_len = cqe->ext_bd_len_list[i];
3541
3542
3543                 if (mpf == NULL) {
3544                         mpf = mpl = mpc;
3545                 } else {
3546                         mpl->m_len = ha->rx_buf_size;
3547                         mpl->m_next = mpc;
3548                         mpl = mpc;
3549                 }
3550
3551                 ecore_chain_consume(&rxq->rx_bd_ring);
3552                 rxq->sw_rx_cons =
3553                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3554         }
3555
3556         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3557
3558                 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3559                         " incoming packet and reusing its buffer\n",
3560                         fp->rss_id);
3561
3562                 QLNX_INC_IQDROPS(ifp);
3563
3564                 rxq->tpa_info[agg_index].mpf = mp;
3565                 rxq->tpa_info[agg_index].mpl = NULL;
3566
3567                 return;
3568         }
3569            
3570         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3571
3572         if (mpf != NULL) {
3573                 mp->m_len = ha->rx_buf_size;
3574                 mp->m_next = mpf;
3575                 rxq->tpa_info[agg_index].mpf = mp;
3576                 rxq->tpa_info[agg_index].mpl = mpl;
3577         } else {
3578                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3579                 rxq->tpa_info[agg_index].mpf = mp;
3580                 rxq->tpa_info[agg_index].mpl = mp;
3581                 mp->m_next = NULL;
3582         }
3583
3584         mp->m_flags |= M_PKTHDR;
3585
3586         /* assign packet to this interface interface */
3587         mp->m_pkthdr.rcvif = ifp;
3588
3589         /* assume no hardware checksum has complated */
3590         mp->m_pkthdr.csum_flags = 0;
3591
3592         //mp->m_pkthdr.flowid = fp->rss_id;
3593         mp->m_pkthdr.flowid = cqe->rss_hash;
3594
3595 #if __FreeBSD_version >= 1100000
3596
3597         hash_type = cqe->bitfields &
3598                         (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3599                         ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3600
3601         switch (hash_type) {
3602
3603         case RSS_HASH_TYPE_IPV4:
3604                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3605                 break;
3606
3607         case RSS_HASH_TYPE_TCP_IPV4:
3608                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3609                 break;
3610
3611         case RSS_HASH_TYPE_IPV6:
3612                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3613                 break;
3614
3615         case RSS_HASH_TYPE_TCP_IPV6:
3616                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3617                 break;
3618
3619         default:
3620                 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3621                 break;
3622         }
3623
3624 #else
3625         mp->m_flags |= M_FLOWID;
3626 #endif
3627
3628         mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3629                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3630
3631         mp->m_pkthdr.csum_data = 0xFFFF;
3632
3633         if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3634                 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3635                 mp->m_flags |= M_VLANTAG;
3636         }
3637
3638         rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3639
3640         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3641                 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3642                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3643
3644         return;
3645 }
3646
3647 static void
3648 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3649         struct qlnx_rx_queue *rxq,
3650         struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3651 {
3652         struct sw_rx_data       *sw_rx_data;
3653         int                     i;
3654         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3655         struct mbuf             *mp;
3656         uint32_t                agg_index;
3657         device_t                dev;
3658
3659         dev = ha->pci_dev;
3660
3661         QL_DPRINT7(ha, "[%d]: enter\n \
3662                 \t type = 0x%x\n \
3663                 \t tpa_agg_index = 0x%x\n \
3664                 \t len_list[0] = 0x%x\n \
3665                 \t len_list[1] = 0x%x\n \
3666                 \t len_list[2] = 0x%x\n \
3667                 \t len_list[3] = 0x%x\n \
3668                 \t len_list[4] = 0x%x\n \
3669                 \t len_list[5] = 0x%x\n",
3670                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3671                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3672                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3673
3674         agg_index = cqe->tpa_agg_index;
3675
3676         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3677                 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3678                 fp->err_rx_tpa_invalid_agg_num++;
3679                 return;
3680         }
3681
3682
3683         for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3684
3685                 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3686
3687                 if (cqe->len_list[i] == 0)
3688                         break;
3689
3690                 if (rxq->tpa_info[agg_index].agg_state != 
3691                         QLNX_AGG_STATE_START) {
3692                         qlnx_reuse_rx_data(rxq);
3693                         continue;
3694                 }
3695
3696                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3697                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3698                         BUS_DMASYNC_POSTREAD);
3699
3700                 mpc = sw_rx_data->data;
3701
3702                 if (mpc == NULL) {
3703
3704                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3705
3706                         fp->err_rx_mp_null++;
3707                         if (mpf != NULL)
3708                                 m_freem(mpf);
3709                         mpf = mpl = NULL;
3710                         rxq->tpa_info[agg_index].agg_state =
3711                                                 QLNX_AGG_STATE_ERROR;
3712                         ecore_chain_consume(&rxq->rx_bd_ring);
3713                         rxq->sw_rx_cons =
3714                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3715                         continue;
3716                 }
3717
3718                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3719
3720                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3721                                 " dropping incoming packet and reusing its"
3722                                 " buffer\n", fp->rss_id);
3723
3724                         qlnx_reuse_rx_data(rxq);
3725
3726                         if (mpf != NULL)
3727                                 m_freem(mpf);
3728                         mpf = mpl = NULL;
3729
3730                         rxq->tpa_info[agg_index].agg_state =
3731                                                 QLNX_AGG_STATE_ERROR;
3732
3733                         ecore_chain_consume(&rxq->rx_bd_ring);
3734                         rxq->sw_rx_cons =
3735                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3736
3737                         continue;
3738                 }
3739
3740                 mpc->m_flags &= ~M_PKTHDR;
3741                 mpc->m_next = NULL;
3742                 mpc->m_len = cqe->len_list[i];
3743
3744
3745                 if (mpf == NULL) {
3746                         mpf = mpl = mpc;
3747                 } else {
3748                         mpl->m_len = ha->rx_buf_size;
3749                         mpl->m_next = mpc;
3750                         mpl = mpc;
3751                 }
3752
3753                 ecore_chain_consume(&rxq->rx_bd_ring);
3754                 rxq->sw_rx_cons =
3755                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3756         }
3757
3758         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3759                   fp->rss_id, mpf, mpl);
3760
3761         if (mpf != NULL) {
3762                 mp = rxq->tpa_info[agg_index].mpl;
3763                 mp->m_len = ha->rx_buf_size;
3764                 mp->m_next = mpf;
3765                 rxq->tpa_info[agg_index].mpl = mpl;
3766         }
3767
3768         return;
3769 }
3770
3771 static int
3772 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3773         struct qlnx_rx_queue *rxq,
3774         struct eth_fast_path_rx_tpa_end_cqe *cqe)
3775 {
3776         struct sw_rx_data       *sw_rx_data;
3777         int                     i;
3778         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3779         struct mbuf             *mp;
3780         uint32_t                agg_index;
3781         uint32_t                len = 0;
3782         struct ifnet            *ifp = ha->ifp;
3783         device_t                dev;
3784
3785         dev = ha->pci_dev;
3786
3787         QL_DPRINT7(ha, "[%d]: enter\n \
3788                 \t type = 0x%x\n \
3789                 \t tpa_agg_index = 0x%x\n \
3790                 \t total_packet_len = 0x%x\n \
3791                 \t num_of_bds = 0x%x\n \
3792                 \t end_reason = 0x%x\n \
3793                 \t num_of_coalesced_segs = 0x%x\n \
3794                 \t ts_delta = 0x%x\n \
3795                 \t len_list[0] = 0x%x\n \
3796                 \t len_list[1] = 0x%x\n \
3797                 \t len_list[2] = 0x%x\n \
3798                 \t len_list[3] = 0x%x\n",
3799                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
3800                 cqe->total_packet_len, cqe->num_of_bds,
3801                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3802                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3803                 cqe->len_list[3]);
3804
3805         agg_index = cqe->tpa_agg_index;
3806
3807         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3808
3809                 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3810
3811                 fp->err_rx_tpa_invalid_agg_num++;
3812                 return (0);
3813         }
3814
3815
3816         for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3817
3818                 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3819
3820                 if (cqe->len_list[i] == 0)
3821                         break;
3822
3823                 if (rxq->tpa_info[agg_index].agg_state != 
3824                         QLNX_AGG_STATE_START) {
3825
3826                         QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
3827         
3828                         qlnx_reuse_rx_data(rxq);
3829                         continue;
3830                 }
3831
3832                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3833                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3834                         BUS_DMASYNC_POSTREAD);
3835
3836                 mpc = sw_rx_data->data;
3837
3838                 if (mpc == NULL) {
3839
3840                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3841
3842                         fp->err_rx_mp_null++;
3843                         if (mpf != NULL)
3844                                 m_freem(mpf);
3845                         mpf = mpl = NULL;
3846                         rxq->tpa_info[agg_index].agg_state =
3847                                                 QLNX_AGG_STATE_ERROR;
3848                         ecore_chain_consume(&rxq->rx_bd_ring);
3849                         rxq->sw_rx_cons =
3850                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3851                         continue;
3852                 }
3853
3854                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3855                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3856                                 " dropping incoming packet and reusing its"
3857                                 " buffer\n", fp->rss_id);
3858
3859                         qlnx_reuse_rx_data(rxq);
3860
3861                         if (mpf != NULL)
3862                                 m_freem(mpf);
3863                         mpf = mpl = NULL;
3864
3865                         rxq->tpa_info[agg_index].agg_state =
3866                                                 QLNX_AGG_STATE_ERROR;
3867
3868                         ecore_chain_consume(&rxq->rx_bd_ring);
3869                         rxq->sw_rx_cons =
3870                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3871
3872                         continue;
3873                 }
3874
3875                 mpc->m_flags &= ~M_PKTHDR;
3876                 mpc->m_next = NULL;
3877                 mpc->m_len = cqe->len_list[i];
3878
3879
3880                 if (mpf == NULL) {
3881                         mpf = mpl = mpc;
3882                 } else {
3883                         mpl->m_len = ha->rx_buf_size;
3884                         mpl->m_next = mpc;
3885                         mpl = mpc;
3886                 }
3887
3888                 ecore_chain_consume(&rxq->rx_bd_ring);
3889                 rxq->sw_rx_cons =
3890                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3891         }
3892
3893         QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
3894
3895         if (mpf != NULL) {
3896
3897                 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
3898
3899                 mp = rxq->tpa_info[agg_index].mpl;
3900                 mp->m_len = ha->rx_buf_size;
3901                 mp->m_next = mpf;
3902         }
3903
3904         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3905
3906                 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
3907
3908                 if (rxq->tpa_info[agg_index].mpf != NULL)
3909                         m_freem(rxq->tpa_info[agg_index].mpf);
3910                 rxq->tpa_info[agg_index].mpf = NULL;
3911                 rxq->tpa_info[agg_index].mpl = NULL;
3912                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3913                 return (0);
3914         }
3915
3916         mp = rxq->tpa_info[agg_index].mpf;
3917         m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
3918         mp->m_pkthdr.len = cqe->total_packet_len;
3919
3920         if (mp->m_next  == NULL)
3921                 mp->m_len = mp->m_pkthdr.len;
3922         else {
3923                 /* compute the total packet length */
3924                 mpf = mp;
3925                 while (mpf != NULL) {
3926                         len += mpf->m_len;
3927                         mpf = mpf->m_next;
3928                 }
3929
3930                 if (cqe->total_packet_len > len) {
3931                         mpl = rxq->tpa_info[agg_index].mpl;
3932                         mpl->m_len += (cqe->total_packet_len - len);
3933                 }
3934         }
3935
3936         QLNX_INC_IPACKETS(ifp);
3937         QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3938
3939         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
3940                 m_len = 0x%x m_pkthdr_len = 0x%x\n",
3941                 fp->rss_id, mp->m_pkthdr.csum_data,
3942                 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
3943
3944         (*ifp->if_input)(ifp, mp);
3945
3946         rxq->tpa_info[agg_index].mpf = NULL;
3947         rxq->tpa_info[agg_index].mpl = NULL;
3948         rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3949
3950         return (cqe->num_of_coalesced_segs);
3951 }
3952
3953 static int
3954 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
3955         int lro_enable)
3956 {
3957         uint16_t                hw_comp_cons, sw_comp_cons;
3958         int                     rx_pkt = 0;
3959         struct qlnx_rx_queue    *rxq = fp->rxq;
3960         struct ifnet            *ifp = ha->ifp;
3961         struct ecore_dev        *cdev = &ha->cdev;
3962         struct ecore_hwfn       *p_hwfn;
3963
3964 #ifdef QLNX_SOFT_LRO
3965         struct lro_ctrl         *lro;
3966
3967         lro = &rxq->lro;
3968 #endif /* #ifdef QLNX_SOFT_LRO */
3969
3970         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
3971         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
3972
3973         p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
3974
3975         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
3976          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
3977          * read before it is written by FW, then FW writes CQE and SB, and then
3978          * the CPU reads the hw_comp_cons, it will use an old CQE.
3979          */
3980
3981         /* Loop to complete all indicated BDs */
3982         while (sw_comp_cons != hw_comp_cons) {
3983                 union eth_rx_cqe                *cqe;
3984                 struct eth_fast_path_rx_reg_cqe *fp_cqe;
3985                 struct sw_rx_data               *sw_rx_data;
3986                 register struct mbuf            *mp;
3987                 enum eth_rx_cqe_type            cqe_type;
3988                 uint16_t                        len, pad, len_on_first_bd;
3989                 uint8_t                         *data;
3990 #if __FreeBSD_version >= 1100000
3991                 uint8_t                         hash_type;
3992 #endif /* #if __FreeBSD_version >= 1100000 */
3993
3994                 /* Get the CQE from the completion ring */
3995                 cqe = (union eth_rx_cqe *)
3996                         ecore_chain_consume(&rxq->rx_comp_ring);
3997                 cqe_type = cqe->fast_path_regular.type;
3998
3999                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4000                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4001
4002                         ecore_eth_cqe_completion(p_hwfn,
4003                                         (struct eth_slow_path_rx_cqe *)cqe);
4004                         goto next_cqe;
4005                 }
4006
4007                 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4008
4009                         switch (cqe_type) {
4010
4011                         case ETH_RX_CQE_TYPE_TPA_START:
4012                                 qlnx_tpa_start(ha, fp, rxq,
4013                                         &cqe->fast_path_tpa_start);
4014                                 fp->tpa_start++;
4015                                 break;
4016
4017                         case ETH_RX_CQE_TYPE_TPA_CONT:
4018                                 qlnx_tpa_cont(ha, fp, rxq,
4019                                         &cqe->fast_path_tpa_cont);
4020                                 fp->tpa_cont++;
4021                                 break;
4022
4023                         case ETH_RX_CQE_TYPE_TPA_END:
4024                                 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4025                                                 &cqe->fast_path_tpa_end);
4026                                 fp->tpa_end++;
4027                                 break;
4028
4029                         default:
4030                                 break;
4031                         }
4032
4033                         goto next_cqe;
4034                 }
4035
4036                 /* Get the data from the SW ring */
4037                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4038                 mp = sw_rx_data->data;
4039
4040                 if (mp == NULL) {
4041                         QL_DPRINT1(ha, "mp = NULL\n");
4042                         fp->err_rx_mp_null++;
4043                         rxq->sw_rx_cons  =
4044                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4045                         goto next_cqe;
4046                 }
4047                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4048                         BUS_DMASYNC_POSTREAD);
4049
4050                 /* non GRO */
4051                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4052                 len =  le16toh(fp_cqe->pkt_len);
4053                 pad = fp_cqe->placement_offset;
4054
4055                 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4056                         " len %u, parsing flags = %d pad  = %d\n",
4057                         cqe_type, fp_cqe->bitfields,
4058                         le16toh(fp_cqe->vlan_tag),
4059                         len, le16toh(fp_cqe->pars_flags.flags), pad);
4060
4061                 data = mtod(mp, uint8_t *);
4062                 data = data + pad;
4063
4064                 if (0)
4065                         qlnx_dump_buf8(ha, __func__, data, len);
4066
4067                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4068                  * is always with a fixed size. If allocation fails, we take the
4069                  * consumed BD and return it to the ring in the PROD position.
4070                  * The packet that was received on that BD will be dropped (and
4071                  * not passed to the upper stack).
4072                  */
4073                 /* If this is an error packet then drop it */
4074                 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4075                         CQE_FLAGS_ERR) {
4076
4077                         QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4078                                 " dropping incoming packet\n", sw_comp_cons,
4079                         le16toh(cqe->fast_path_regular.pars_flags.flags));
4080                         fp->err_rx_hw_errors++;
4081
4082                         qlnx_reuse_rx_data(rxq);
4083
4084                         QLNX_INC_IERRORS(ifp);
4085
4086                         goto next_cqe;
4087                 }
4088
4089                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4090
4091                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4092                                 " incoming packet and reusing its buffer\n");
4093                         qlnx_reuse_rx_data(rxq);
4094
4095                         fp->err_rx_alloc_errors++;
4096
4097                         QLNX_INC_IQDROPS(ifp);
4098
4099                         goto next_cqe;
4100                 }
4101
4102                 ecore_chain_consume(&rxq->rx_bd_ring);
4103
4104                 len_on_first_bd = fp_cqe->len_on_first_bd;
4105                 m_adj(mp, pad);
4106                 mp->m_pkthdr.len = len;
4107
4108                 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4109                            len, len_on_first_bd);
4110                 if ((len > 60 ) && (len > len_on_first_bd)) {
4111
4112                         mp->m_len = len_on_first_bd;
4113
4114                         if (qlnx_rx_jumbo_chain(ha, fp, mp,
4115                                 (len - len_on_first_bd)) != 0) {
4116
4117                                 m_freem(mp);
4118
4119                                 QLNX_INC_IQDROPS(ifp);
4120
4121                                 goto next_cqe;
4122                         }
4123
4124                 } else if (len_on_first_bd < len) {
4125                         fp->err_rx_jumbo_chain_pkts++;
4126                 } else {
4127                         mp->m_len = len;
4128                 }
4129
4130                 mp->m_flags |= M_PKTHDR;
4131
4132                 /* assign packet to this interface interface */
4133                 mp->m_pkthdr.rcvif = ifp;
4134
4135                 /* assume no hardware checksum has complated */
4136                 mp->m_pkthdr.csum_flags = 0;
4137
4138                 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4139
4140 #if __FreeBSD_version >= 1100000
4141
4142                 hash_type = fp_cqe->bitfields &
4143                                 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4144                                 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4145
4146                 switch (hash_type) {
4147
4148                 case RSS_HASH_TYPE_IPV4:
4149                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4150                         break;
4151
4152                 case RSS_HASH_TYPE_TCP_IPV4:
4153                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4154                         break;
4155
4156                 case RSS_HASH_TYPE_IPV6:
4157                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4158                         break;
4159
4160                 case RSS_HASH_TYPE_TCP_IPV6:
4161                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4162                         break;
4163
4164                 default:
4165                         M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4166                         break;
4167                 }
4168
4169 #else
4170                 mp->m_flags |= M_FLOWID;
4171 #endif
4172
4173                 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4174                         mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4175                 }
4176
4177                 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4178                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4179                 }
4180
4181                 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4182                         mp->m_pkthdr.csum_data = 0xFFFF;
4183                         mp->m_pkthdr.csum_flags |=
4184                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4185                 }
4186
4187                 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4188                         mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4189                         mp->m_flags |= M_VLANTAG;
4190                 }
4191
4192                 QLNX_INC_IPACKETS(ifp);
4193                 QLNX_INC_IBYTES(ifp, len);
4194
4195 #ifdef QLNX_SOFT_LRO
4196
4197                 if (lro_enable) {
4198
4199 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4200
4201                         tcp_lro_queue_mbuf(lro, mp);
4202
4203 #else
4204
4205                         if (tcp_lro_rx(lro, mp, 0))
4206                                 (*ifp->if_input)(ifp, mp);
4207
4208 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4209
4210                 } else {
4211                         (*ifp->if_input)(ifp, mp);
4212                 }
4213 #else
4214
4215                 (*ifp->if_input)(ifp, mp);
4216
4217 #endif /* #ifdef QLNX_SOFT_LRO */
4218
4219                 rx_pkt++;
4220
4221                 rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4222
4223 next_cqe:       /* don't consume bd rx buffer */
4224                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4225                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4226
4227                 /* CR TPA - revisit how to handle budget in TPA perhaps
4228                    increase on "end" */
4229                 if (rx_pkt == budget)
4230                         break;
4231         } /* repeat while sw_comp_cons != hw_comp_cons... */
4232
4233         /* Update producers */
4234         qlnx_update_rx_prod(p_hwfn, rxq);
4235
4236         return rx_pkt;
4237 }
4238
4239 /*
4240  * fast path interrupt
4241  */
4242
4243 static void
4244 qlnx_fp_isr(void *arg)
4245 {
4246         qlnx_ivec_t             *ivec = arg;
4247         qlnx_host_t             *ha;
4248         struct qlnx_fastpath    *fp = NULL;
4249         int                     idx;
4250
4251         ha = ivec->ha;
4252
4253         if (ha->state != QLNX_STATE_OPEN) {
4254                 return;
4255         }
4256
4257         idx = ivec->rss_idx;
4258
4259         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4260                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4261                 ha->err_illegal_intr++;
4262                 return;
4263         }
4264         fp = &ha->fp_array[idx];
4265
4266         if (fp == NULL) {
4267                 ha->err_fp_null++;
4268         } else {
4269                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4270                 if (fp->fp_taskqueue != NULL)
4271                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4272         }
4273
4274         return;
4275 }
4276
4277
4278 /*
4279  * slow path interrupt processing function
4280  * can be invoked in polled mode or in interrupt mode via taskqueue.
4281  */
4282 void
4283 qlnx_sp_isr(void *arg)
4284 {
4285         struct ecore_hwfn       *p_hwfn;
4286         qlnx_host_t             *ha;
4287         
4288         p_hwfn = arg;
4289
4290         ha = (qlnx_host_t *)p_hwfn->p_dev;
4291
4292         ha->sp_interrupts++;
4293
4294         QL_DPRINT2(ha, "enter\n");
4295
4296         ecore_int_sp_dpc(p_hwfn);
4297
4298         QL_DPRINT2(ha, "exit\n");
4299         
4300         return;
4301 }
4302
4303 /*****************************************************************************
4304  * Support Functions for DMA'able Memory
4305  *****************************************************************************/
4306
4307 static void
4308 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4309 {
4310         *((bus_addr_t *)arg) = 0;
4311
4312         if (error) {
4313                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4314                 return;
4315         }
4316
4317         *((bus_addr_t *)arg) = segs[0].ds_addr;
4318
4319         return;
4320 }
4321
4322 static int
4323 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4324 {
4325         int             ret = 0;
4326         device_t        dev;
4327         bus_addr_t      b_addr;
4328
4329         dev = ha->pci_dev;
4330
4331         ret = bus_dma_tag_create(
4332                         ha->parent_tag,/* parent */
4333                         dma_buf->alignment,
4334                         ((bus_size_t)(1ULL << 32)),/* boundary */
4335                         BUS_SPACE_MAXADDR,      /* lowaddr */
4336                         BUS_SPACE_MAXADDR,      /* highaddr */
4337                         NULL, NULL,             /* filter, filterarg */
4338                         dma_buf->size,          /* maxsize */
4339                         1,                      /* nsegments */
4340                         dma_buf->size,          /* maxsegsize */
4341                         0,                      /* flags */
4342                         NULL, NULL,             /* lockfunc, lockarg */
4343                         &dma_buf->dma_tag);
4344
4345         if (ret) {
4346                 QL_DPRINT1(ha, "could not create dma tag\n");
4347                 goto qlnx_alloc_dmabuf_exit;
4348         }
4349         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4350                         (void **)&dma_buf->dma_b,
4351                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4352                         &dma_buf->dma_map);
4353         if (ret) {
4354                 bus_dma_tag_destroy(dma_buf->dma_tag);
4355                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4356                 goto qlnx_alloc_dmabuf_exit;
4357         }
4358
4359         ret = bus_dmamap_load(dma_buf->dma_tag,
4360                         dma_buf->dma_map,
4361                         dma_buf->dma_b,
4362                         dma_buf->size,
4363                         qlnx_dmamap_callback,
4364                         &b_addr, BUS_DMA_NOWAIT);
4365
4366         if (ret || !b_addr) {
4367                 bus_dma_tag_destroy(dma_buf->dma_tag);
4368                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4369                         dma_buf->dma_map);
4370                 ret = -1;
4371                 goto qlnx_alloc_dmabuf_exit;
4372         }
4373
4374         dma_buf->dma_addr = b_addr;
4375
4376 qlnx_alloc_dmabuf_exit:
4377
4378         return ret;
4379 }
4380
4381 static void
4382 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4383 {
4384         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4385         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4386         bus_dma_tag_destroy(dma_buf->dma_tag);
4387         return;
4388 }
4389
4390 void *
4391 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4392 {
4393         qlnx_dma_t      dma_buf;
4394         qlnx_dma_t      *dma_p;
4395         qlnx_host_t     *ha;
4396         device_t        dev;
4397
4398         ha = (qlnx_host_t *)ecore_dev;
4399         dev = ha->pci_dev;
4400
4401         size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4402
4403         memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4404
4405         dma_buf.size = size + PAGE_SIZE;
4406         dma_buf.alignment = 8;
4407
4408         if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4409                 return (NULL);
4410         bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4411
4412         *phys = dma_buf.dma_addr;
4413
4414         dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4415
4416         memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4417 /*
4418         QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4419                 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4420                 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4421 */
4422         return (dma_buf.dma_b);
4423 }
4424
4425 void
4426 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4427         uint32_t size)
4428 {
4429         qlnx_dma_t dma_buf, *dma_p;
4430         qlnx_host_t     *ha;
4431         device_t        dev;
4432
4433         ha = (qlnx_host_t *)ecore_dev;
4434         dev = ha->pci_dev;
4435
4436         if (v_addr == NULL)
4437                 return;
4438
4439         size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4440
4441         dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4442 /*
4443         QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4444                 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4445                 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4446 */
4447         dma_buf = *dma_p;
4448
4449         qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4450         return;
4451 }
4452
4453 static int
4454 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4455 {
4456         int             ret;
4457         device_t        dev;
4458
4459         dev = ha->pci_dev;
4460
4461         /*
4462          * Allocate parent DMA Tag
4463          */
4464         ret = bus_dma_tag_create(
4465                         bus_get_dma_tag(dev),   /* parent */
4466                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4467                         BUS_SPACE_MAXADDR,      /* lowaddr */
4468                         BUS_SPACE_MAXADDR,      /* highaddr */
4469                         NULL, NULL,             /* filter, filterarg */
4470                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4471                         0,                      /* nsegments */
4472                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4473                         0,                      /* flags */
4474                         NULL, NULL,             /* lockfunc, lockarg */
4475                         &ha->parent_tag);
4476
4477         if (ret) {
4478                 QL_DPRINT1(ha, "could not create parent dma tag\n");
4479                 return (-1);
4480         }
4481
4482         ha->flags.parent_tag = 1;
4483
4484         return (0);
4485 }
4486
4487 static void
4488 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4489 {
4490         if (ha->parent_tag != NULL) {
4491                 bus_dma_tag_destroy(ha->parent_tag);
4492                 ha->parent_tag = NULL;
4493         }
4494         return;
4495 }
4496
4497 static int
4498 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4499 {
4500         if (bus_dma_tag_create(NULL,    /* parent */
4501                 1, 0,    /* alignment, bounds */
4502                 BUS_SPACE_MAXADDR,       /* lowaddr */
4503                 BUS_SPACE_MAXADDR,       /* highaddr */
4504                 NULL, NULL,      /* filter, filterarg */
4505                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
4506                 QLNX_MAX_SEGMENTS,        /* nsegments */
4507                 (PAGE_SIZE * 4),        /* maxsegsize */
4508                 BUS_DMA_ALLOCNOW,        /* flags */
4509                 NULL,    /* lockfunc */
4510                 NULL,    /* lockfuncarg */
4511                 &ha->tx_tag)) {
4512
4513                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4514                 return (-1);
4515         }
4516
4517         return (0);
4518 }
4519
4520 static void
4521 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4522 {
4523         if (ha->tx_tag != NULL) {
4524                 bus_dma_tag_destroy(ha->tx_tag);
4525                 ha->tx_tag = NULL;
4526         }
4527         return;
4528 }
4529
4530 static int
4531 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4532 {
4533         if (bus_dma_tag_create(NULL,    /* parent */
4534                         1, 0,    /* alignment, bounds */
4535                         BUS_SPACE_MAXADDR,       /* lowaddr */
4536                         BUS_SPACE_MAXADDR,       /* highaddr */
4537                         NULL, NULL,      /* filter, filterarg */
4538                         MJUM9BYTES,     /* maxsize */
4539                         1,        /* nsegments */
4540                         MJUM9BYTES,        /* maxsegsize */
4541                         BUS_DMA_ALLOCNOW,        /* flags */
4542                         NULL,    /* lockfunc */
4543                         NULL,    /* lockfuncarg */
4544                         &ha->rx_tag)) {
4545
4546                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
4547
4548                 return (-1);
4549         }
4550         return (0);
4551 }
4552
4553 static void
4554 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4555 {
4556         if (ha->rx_tag != NULL) {
4557                 bus_dma_tag_destroy(ha->rx_tag);
4558                 ha->rx_tag = NULL;
4559         }
4560         return;
4561 }
4562
4563 /*********************************
4564  * Exported functions
4565  *********************************/
4566 uint32_t
4567 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4568 {
4569         uint32_t bar_size;
4570
4571         bar_id = bar_id * 2;
4572
4573         bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4574                                 SYS_RES_MEMORY,
4575                                 PCIR_BAR(bar_id));
4576
4577         return (bar_size);
4578 }
4579
4580 uint32_t
4581 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
4582 {
4583         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4584                                 pci_reg, 1);
4585         return 0;
4586 }
4587
4588 uint32_t
4589 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
4590         uint16_t *reg_value)
4591 {
4592         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4593                                 pci_reg, 2);
4594         return 0;
4595 }
4596
4597 uint32_t
4598 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
4599         uint32_t *reg_value)
4600 {
4601         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4602                                 pci_reg, 4);
4603         return 0;
4604 }
4605
4606 void
4607 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
4608 {
4609         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4610                 pci_reg, reg_value, 1);
4611         return;
4612 }
4613
4614 void
4615 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
4616         uint16_t reg_value)
4617 {
4618         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4619                 pci_reg, reg_value, 2);
4620         return;
4621 }
4622
4623 void
4624 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
4625         uint32_t reg_value)
4626 {
4627         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4628                 pci_reg, reg_value, 4);
4629         return;
4630 }
4631
4632
4633 int
4634 qlnx_pci_find_capability(void *ecore_dev, int cap)
4635 {
4636         int             reg;
4637         qlnx_host_t     *ha;
4638
4639         ha = ecore_dev;
4640
4641         if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
4642                 return reg;
4643         else {
4644                 QL_DPRINT1(ha, "failed\n");
4645                 return 0;
4646         }
4647 }
4648
4649 uint32_t
4650 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4651 {
4652         uint32_t                data32;
4653         struct ecore_dev        *cdev;
4654         struct ecore_hwfn       *p_hwfn;
4655
4656         p_hwfn = hwfn;
4657
4658         cdev = p_hwfn->p_dev;
4659
4660         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4661                         (uint8_t *)(cdev->regview)) + reg_addr;
4662
4663         data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
4664
4665         return (data32);
4666 }
4667
4668 void
4669 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4670 {
4671         struct ecore_dev        *cdev;
4672         struct ecore_hwfn       *p_hwfn;
4673
4674         p_hwfn = hwfn;
4675
4676         cdev = p_hwfn->p_dev;
4677
4678         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4679                         (uint8_t *)(cdev->regview)) + reg_addr;
4680
4681         bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4682
4683         return;
4684 }
4685
4686 void
4687 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
4688 {
4689         struct ecore_dev        *cdev;
4690         struct ecore_hwfn       *p_hwfn;
4691
4692         p_hwfn = hwfn;
4693
4694         cdev = p_hwfn->p_dev;
4695
4696         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4697                         (uint8_t *)(cdev->regview)) + reg_addr;
4698
4699         bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4700
4701         return;
4702 }
4703
4704 void
4705 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4706 {
4707         struct ecore_dev        *cdev;
4708         struct ecore_hwfn       *p_hwfn;
4709
4710         p_hwfn = hwfn;
4711
4712         cdev = p_hwfn->p_dev;
4713
4714         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
4715                         (uint8_t *)(cdev->doorbells)) + reg_addr;
4716
4717         bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
4718
4719         return;
4720 }
4721
4722 uint32_t
4723 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
4724 {
4725         uint32_t                data32;
4726         uint32_t                offset;
4727         struct ecore_dev        *cdev;
4728
4729         cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4730         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4731
4732         data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
4733
4734         return (data32);
4735 }
4736
4737 void
4738 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
4739 {
4740         uint32_t                offset;
4741         struct ecore_dev        *cdev;
4742
4743         cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4744         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4745
4746         bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4747
4748         return;
4749 }
4750
4751 void *
4752 qlnx_zalloc(uint32_t size)
4753 {
4754         caddr_t va;
4755
4756         va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
4757         bzero(va, size);
4758         return ((void *)va);
4759 }
4760
4761 void
4762 qlnx_barrier(void *p_hwfn)
4763 {
4764         qlnx_host_t     *ha;
4765
4766         ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4767         bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
4768 }
4769
4770 void
4771 qlnx_link_update(void *p_hwfn)
4772 {
4773         qlnx_host_t     *ha;
4774         int             prev_link_state;
4775
4776         ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4777
4778         qlnx_fill_link(p_hwfn, &ha->if_link);
4779
4780         prev_link_state = ha->link_up;
4781         ha->link_up = ha->if_link.link_up;
4782
4783         if (prev_link_state !=  ha->link_up) {
4784                 if (ha->link_up) {
4785                         if_link_state_change(ha->ifp, LINK_STATE_UP);
4786                 } else {
4787                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
4788                 }
4789         }
4790         return;
4791 }
4792
4793 void
4794 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
4795 {
4796         struct ecore_mcp_link_params    link_params;
4797         struct ecore_mcp_link_state     link_state;
4798
4799         memset(if_link, 0, sizeof(*if_link));
4800         memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
4801         memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
4802
4803         /* Prepare source inputs */
4804         /* we only deal with physical functions */
4805         memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
4806                 sizeof(link_params));
4807         memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
4808                 sizeof(link_state));
4809
4810         ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
4811
4812         /* Set the link parameters to pass to protocol driver */
4813         if (link_state.link_up) {
4814                 if_link->link_up = true;
4815                 if_link->speed = link_state.speed;
4816         }
4817
4818         if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
4819
4820         if (link_params.speed.autoneg)
4821                 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
4822
4823         if (link_params.pause.autoneg ||
4824                 (link_params.pause.forced_rx && link_params.pause.forced_tx))
4825                 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
4826
4827         if (link_params.pause.autoneg || link_params.pause.forced_rx ||
4828                 link_params.pause.forced_tx)
4829                 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
4830
4831         if (link_params.speed.advertised_speeds &
4832                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
4833                 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
4834                                            QLNX_LINK_CAP_1000baseT_Full;
4835
4836         if (link_params.speed.advertised_speeds &
4837                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
4838                 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4839
4840         if (link_params.speed.advertised_speeds &
4841                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
4842                 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4843
4844         if (link_params.speed.advertised_speeds &
4845                 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
4846                 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4847
4848         if (link_params.speed.advertised_speeds &
4849                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
4850                 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4851
4852         if (link_params.speed.advertised_speeds &
4853                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
4854                 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4855
4856         if_link->advertised_caps = if_link->supported_caps;
4857
4858         if_link->autoneg = link_params.speed.autoneg;
4859         if_link->duplex = QLNX_LINK_DUPLEX;
4860
4861         /* Link partner capabilities */
4862
4863         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
4864                 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
4865
4866         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
4867                 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
4868
4869         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
4870                 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4871
4872         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
4873                 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4874
4875         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
4876                 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4877
4878         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
4879                 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4880
4881         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
4882                 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4883
4884         if (link_state.an_complete)
4885                 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
4886
4887         if (link_state.partner_adv_pause)
4888                 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
4889
4890         if ((link_state.partner_adv_pause ==
4891                 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
4892                 (link_state.partner_adv_pause ==
4893                         ECORE_LINK_PARTNER_BOTH_PAUSE))
4894                 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
4895
4896         return;
4897 }
4898
4899 static int
4900 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
4901 {
4902         int     rc, i;
4903
4904         for (i = 0; i < cdev->num_hwfns; i++) {
4905                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
4906                 p_hwfn->pf_params = *func_params;
4907         }
4908
4909         rc = ecore_resc_alloc(cdev);
4910         if (rc)
4911                 goto qlnx_nic_setup_exit;
4912
4913         ecore_resc_setup(cdev);
4914
4915 qlnx_nic_setup_exit:
4916
4917         return rc;
4918 }
4919
4920 static int
4921 qlnx_nic_start(struct ecore_dev *cdev)
4922 {
4923         int                             rc;
4924         struct ecore_hw_init_params     params;
4925
4926         bzero(&params, sizeof (struct ecore_hw_init_params));
4927
4928         params.p_tunn = NULL;
4929         params.b_hw_start = true;
4930         params.int_mode = cdev->int_mode;
4931         params.allow_npar_tx_switch = true;
4932         params.bin_fw_data = NULL;
4933
4934         rc = ecore_hw_init(cdev, &params);
4935         if (rc) {
4936                 ecore_resc_free(cdev);
4937                 return rc;
4938         }
4939
4940         return 0;
4941 }
4942
4943 static int
4944 qlnx_slowpath_start(qlnx_host_t *ha)
4945 {
4946         struct ecore_dev        *cdev;
4947         struct ecore_pf_params  pf_params;
4948         int                     rc;
4949
4950         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
4951         pf_params.eth_pf_params.num_cons  =
4952                 (ha->num_rss) * (ha->num_tc + 1);
4953
4954         cdev = &ha->cdev;
4955
4956         rc = qlnx_nic_setup(cdev, &pf_params);
4957         if (rc)
4958                 goto qlnx_slowpath_start_exit;
4959
4960         cdev->int_mode = ECORE_INT_MODE_MSIX;
4961         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
4962
4963 #ifdef QLNX_MAX_COALESCE
4964         cdev->rx_coalesce_usecs = 255;
4965         cdev->tx_coalesce_usecs = 255;
4966 #endif
4967
4968         rc = qlnx_nic_start(cdev);
4969
4970         ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
4971         ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
4972
4973 qlnx_slowpath_start_exit:
4974
4975         return (rc);
4976 }
4977
4978 static int
4979 qlnx_slowpath_stop(qlnx_host_t *ha)
4980 {
4981         struct ecore_dev        *cdev;
4982         device_t                dev = ha->pci_dev;
4983         int                     i;
4984
4985         cdev = &ha->cdev;
4986
4987         ecore_hw_stop(cdev);
4988
4989         for (i = 0; i < ha->cdev.num_hwfns; i++) {
4990
4991                 if (ha->sp_handle[i])
4992                         (void)bus_teardown_intr(dev, ha->sp_irq[i],
4993                                 ha->sp_handle[i]);
4994
4995                 ha->sp_handle[i] = NULL;
4996
4997                 if (ha->sp_irq[i])
4998                         (void) bus_release_resource(dev, SYS_RES_IRQ,
4999                                 ha->sp_irq_rid[i], ha->sp_irq[i]);
5000                 ha->sp_irq[i] = NULL;
5001         }
5002
5003         ecore_resc_free(cdev);
5004
5005         return 0;
5006 }
5007
5008 static void
5009 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5010         char ver_str[VER_SIZE])
5011 {
5012         int     i;
5013
5014         memcpy(cdev->name, name, NAME_SIZE);
5015
5016         for_each_hwfn(cdev, i) {
5017                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5018         }
5019
5020         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5021
5022         return ;
5023 }
5024
5025 void
5026 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5027 {
5028         enum ecore_mcp_protocol_type    type;
5029         union ecore_mcp_protocol_stats  *stats;
5030         struct ecore_eth_stats          eth_stats;
5031         qlnx_host_t                     *ha;
5032
5033         ha = cdev;
5034         stats = proto_stats;
5035         type = proto_type;
5036
5037         switch (type) {
5038
5039         case ECORE_MCP_LAN_STATS:
5040                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5041                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5042                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5043                 stats->lan_stats.fcs_err = -1;
5044                 break;
5045
5046         default:
5047                 ha->err_get_proto_invalid_type++;
5048
5049                 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5050                 break;
5051         }
5052         return;
5053 }
5054
5055 static int
5056 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5057 {
5058         struct ecore_hwfn       *p_hwfn;
5059         struct ecore_ptt        *p_ptt;
5060
5061         p_hwfn = &ha->cdev.hwfns[0];
5062         p_ptt = ecore_ptt_acquire(p_hwfn);
5063
5064         if (p_ptt ==  NULL) {
5065                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5066                 return (-1);
5067         }
5068         ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5069         
5070         ecore_ptt_release(p_hwfn, p_ptt);
5071
5072         return (0);
5073 }
5074
5075 static int
5076 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5077 {
5078         struct ecore_hwfn       *p_hwfn;
5079         struct ecore_ptt        *p_ptt;
5080
5081         p_hwfn = &ha->cdev.hwfns[0];
5082         p_ptt = ecore_ptt_acquire(p_hwfn);
5083
5084         if (p_ptt ==  NULL) {
5085                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5086                 return (-1);
5087         }
5088         ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5089         
5090         ecore_ptt_release(p_hwfn, p_ptt);
5091
5092         return (0);
5093 }
5094
5095 static int
5096 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5097 {
5098         struct ecore_dev        *cdev;
5099
5100         cdev = &ha->cdev;
5101
5102         bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5103         bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5104         bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5105
5106         return 0;
5107 }
5108
5109 static void
5110 qlnx_init_fp(qlnx_host_t *ha)
5111 {
5112         int rss_id, txq_array_index, tc;
5113
5114         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5115
5116                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5117
5118                 fp->rss_id = rss_id;
5119                 fp->edev = ha;
5120                 fp->sb_info = &ha->sb_array[rss_id];
5121                 fp->rxq = &ha->rxq_array[rss_id];
5122                 fp->rxq->rxq_id = rss_id;
5123
5124                 for (tc = 0; tc < ha->num_tc; tc++) {
5125                         txq_array_index = tc * ha->num_rss + rss_id;
5126                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5127                         fp->txq[tc]->index = txq_array_index;
5128                 }
5129
5130                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5131                         rss_id);
5132
5133                 fp->tx_ring_full = 0;
5134
5135                 /* reset all the statistics counters */
5136
5137                 fp->tx_pkts_processed = 0;
5138                 fp->tx_pkts_freed = 0;
5139                 fp->tx_pkts_transmitted = 0;
5140                 fp->tx_pkts_completed = 0;
5141                 fp->tx_lso_wnd_min_len = 0;
5142                 fp->tx_defrag = 0;
5143                 fp->tx_nsegs_gt_elem_left = 0;
5144                 fp->tx_tso_max_nsegs = 0;
5145                 fp->tx_tso_min_nsegs = 0;
5146                 fp->err_tx_nsegs_gt_elem_left = 0;
5147                 fp->err_tx_dmamap_create = 0;
5148                 fp->err_tx_defrag_dmamap_load = 0;
5149                 fp->err_tx_non_tso_max_seg = 0;
5150                 fp->err_tx_dmamap_load = 0;
5151                 fp->err_tx_defrag = 0;
5152                 fp->err_tx_free_pkt_null = 0;
5153                 fp->err_tx_cons_idx_conflict = 0;
5154
5155                 fp->rx_pkts = 0;
5156                 fp->err_m_getcl = 0;
5157                 fp->err_m_getjcl = 0;
5158         }
5159         return;
5160 }
5161
5162 static void
5163 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5164 {
5165         struct ecore_dev        *cdev;
5166
5167         cdev = &ha->cdev;
5168
5169         if (sb_info->sb_virt) {
5170                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5171                         (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5172                 sb_info->sb_virt = NULL;
5173         }
5174 }
5175
5176 static int
5177 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5178         void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5179 {
5180         struct ecore_hwfn       *p_hwfn;
5181         int                     hwfn_index, rc;
5182         u16                     rel_sb_id;
5183
5184         hwfn_index = sb_id % cdev->num_hwfns;
5185         p_hwfn = &cdev->hwfns[hwfn_index];
5186         rel_sb_id = sb_id / cdev->num_hwfns;
5187
5188         QL_DPRINT2(((qlnx_host_t *)cdev), 
5189                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5190                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5191                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5192                 sb_virt_addr, (void *)sb_phy_addr);
5193
5194         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5195                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5196
5197         return rc;
5198 }
5199
5200 /* This function allocates fast-path status block memory */
5201 static int
5202 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5203 {
5204         struct status_block     *sb_virt;
5205         bus_addr_t              sb_phys;
5206         int                     rc;
5207         uint32_t                size;
5208         struct ecore_dev        *cdev;
5209
5210         cdev = &ha->cdev;
5211
5212         size = sizeof(*sb_virt);
5213         sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5214
5215         if (!sb_virt) {
5216                 QL_DPRINT1(ha, "Status block allocation failed\n");
5217                 return -ENOMEM;
5218         }
5219
5220         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5221         if (rc) {
5222                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5223         }
5224
5225         return rc;
5226 }
5227
5228 static void
5229 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5230 {
5231         int                     i;
5232         struct sw_rx_data       *rx_buf;
5233
5234         for (i = 0; i < rxq->num_rx_buffers; i++) {
5235
5236                 rx_buf = &rxq->sw_rx_ring[i];
5237
5238                 if (rx_buf->data != NULL) {
5239                         if (rx_buf->map != NULL) {
5240                                 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5241                                 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5242                                 rx_buf->map = NULL;
5243                         }
5244                         m_freem(rx_buf->data);
5245                         rx_buf->data = NULL;
5246                 }
5247         }
5248         return;
5249 }
5250
5251 static void
5252 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5253 {
5254         struct ecore_dev        *cdev;
5255         int                     i;
5256
5257         cdev = &ha->cdev;
5258
5259         qlnx_free_rx_buffers(ha, rxq);
5260
5261         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5262                 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5263                 if (rxq->tpa_info[i].mpf != NULL)
5264                         m_freem(rxq->tpa_info[i].mpf);
5265         }
5266
5267         bzero((void *)&rxq->sw_rx_ring[0],
5268                 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5269
5270         /* Free the real RQ ring used by FW */
5271         if (rxq->rx_bd_ring.p_virt_addr) {
5272                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5273                 rxq->rx_bd_ring.p_virt_addr = NULL;
5274         }
5275
5276         /* Free the real completion ring used by FW */
5277         if (rxq->rx_comp_ring.p_virt_addr &&
5278                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5279                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5280                 rxq->rx_comp_ring.p_virt_addr = NULL;
5281                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5282         }
5283
5284 #ifdef QLNX_SOFT_LRO
5285         {
5286                 struct lro_ctrl *lro;
5287
5288                 lro = &rxq->lro;
5289                 tcp_lro_free(lro);
5290         }
5291 #endif /* #ifdef QLNX_SOFT_LRO */
5292
5293         return;
5294 }
5295
5296 static int
5297 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5298 {
5299         register struct mbuf    *mp;
5300         uint16_t                rx_buf_size;
5301         struct sw_rx_data       *sw_rx_data;
5302         struct eth_rx_bd        *rx_bd;
5303         dma_addr_t              dma_addr;
5304         bus_dmamap_t            map;
5305         bus_dma_segment_t       segs[1];
5306         int                     nsegs;
5307         int                     ret;
5308         struct ecore_dev        *cdev;
5309
5310         cdev = &ha->cdev;
5311
5312         rx_buf_size = rxq->rx_buf_size;
5313
5314         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5315
5316         if (mp == NULL) {
5317                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5318                 return -ENOMEM;
5319         }
5320
5321         mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5322
5323         map = (bus_dmamap_t)0;
5324
5325         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5326                         BUS_DMA_NOWAIT);
5327         dma_addr = segs[0].ds_addr;
5328
5329         if (ret || !dma_addr || (nsegs != 1)) {
5330                 m_freem(mp);
5331                 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5332                            ret, (long long unsigned int)dma_addr, nsegs);
5333                 return -ENOMEM;
5334         }
5335
5336         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5337         sw_rx_data->data = mp;
5338         sw_rx_data->dma_addr = dma_addr;
5339         sw_rx_data->map = map;
5340
5341         /* Advance PROD and get BD pointer */
5342         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5343         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5344         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5345         bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5346
5347         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5348
5349         return 0;
5350 }
5351
5352 static int
5353 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5354         struct qlnx_agg_info *tpa)
5355 {
5356         struct mbuf             *mp;
5357         dma_addr_t              dma_addr;
5358         bus_dmamap_t            map;
5359         bus_dma_segment_t       segs[1];
5360         int                     nsegs;
5361         int                     ret;
5362         struct sw_rx_data       *rx_buf;
5363
5364         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5365
5366         if (mp == NULL) {
5367                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5368                 return -ENOMEM;
5369         }
5370
5371         mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5372
5373         map = (bus_dmamap_t)0;
5374
5375         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5376                         BUS_DMA_NOWAIT);
5377         dma_addr = segs[0].ds_addr;
5378
5379         if (ret || !dma_addr || (nsegs != 1)) {
5380                 m_freem(mp);
5381                 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5382                         ret, (long long unsigned int)dma_addr, nsegs);
5383                 return -ENOMEM;
5384         }
5385
5386         rx_buf = &tpa->rx_buf;
5387
5388         memset(rx_buf, 0, sizeof (struct sw_rx_data));
5389
5390         rx_buf->data = mp;
5391         rx_buf->dma_addr = dma_addr;
5392         rx_buf->map = map;
5393
5394         bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5395
5396         return (0);
5397 }
5398
5399 static void
5400 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5401 {
5402         struct sw_rx_data       *rx_buf;
5403
5404         rx_buf = &tpa->rx_buf;
5405
5406         if (rx_buf->data != NULL) {
5407                 if (rx_buf->map != NULL) {
5408                         bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5409                         bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5410                         rx_buf->map = NULL;
5411                 }
5412                 m_freem(rx_buf->data);
5413                 rx_buf->data = NULL;
5414         }
5415         return;
5416 }
5417
5418 /* This function allocates all memory needed per Rx queue */
5419 static int
5420 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5421 {
5422         int                     i, rc, num_allocated;
5423         struct ifnet            *ifp;
5424         struct ecore_dev         *cdev;
5425
5426         cdev = &ha->cdev;
5427         ifp = ha->ifp;
5428
5429         rxq->num_rx_buffers = RX_RING_SIZE;
5430
5431         rxq->rx_buf_size = ha->rx_buf_size;
5432
5433         /* Allocate the parallel driver ring for Rx buffers */
5434         bzero((void *)&rxq->sw_rx_ring[0],
5435                 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5436
5437         /* Allocate FW Rx ring  */
5438
5439         rc = ecore_chain_alloc(cdev,
5440                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5441                         ECORE_CHAIN_MODE_NEXT_PTR,
5442                         ECORE_CHAIN_CNT_TYPE_U16,
5443                         RX_RING_SIZE,
5444                         sizeof(struct eth_rx_bd),
5445                         &rxq->rx_bd_ring, NULL);
5446
5447         if (rc)
5448                 goto err;
5449
5450         /* Allocate FW completion ring */
5451         rc = ecore_chain_alloc(cdev,
5452                         ECORE_CHAIN_USE_TO_CONSUME,
5453                         ECORE_CHAIN_MODE_PBL,
5454                         ECORE_CHAIN_CNT_TYPE_U16,
5455                         RX_RING_SIZE,
5456                         sizeof(union eth_rx_cqe),
5457                         &rxq->rx_comp_ring, NULL);
5458
5459         if (rc)
5460                 goto err;
5461
5462         /* Allocate buffers for the Rx ring */
5463
5464         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5465                 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5466                         &rxq->tpa_info[i]);
5467                 if (rc)
5468                         break;
5469
5470         }
5471
5472         for (i = 0; i < rxq->num_rx_buffers; i++) {
5473                 rc = qlnx_alloc_rx_buffer(ha, rxq);
5474                 if (rc)
5475                         break;
5476         }
5477         num_allocated = i;
5478         if (!num_allocated) {
5479                 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5480                 goto err;
5481         } else if (num_allocated < rxq->num_rx_buffers) {
5482                 QL_DPRINT1(ha, "Allocated less buffers than"
5483                         " desired (%d allocated)\n", num_allocated);
5484         }
5485
5486 #ifdef QLNX_SOFT_LRO
5487
5488         {
5489                 struct lro_ctrl *lro;
5490
5491                 lro = &rxq->lro;
5492
5493 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5494                 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5495                         QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5496                                    rxq->rxq_id);
5497                         goto err;
5498                 }
5499 #else
5500                 if (tcp_lro_init(lro)) {
5501                         QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5502                                    rxq->rxq_id);
5503                         goto err;
5504                 }
5505 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5506
5507                 lro->ifp = ha->ifp;
5508         }
5509 #endif /* #ifdef QLNX_SOFT_LRO */
5510         return 0;
5511
5512 err:
5513         qlnx_free_mem_rxq(ha, rxq);
5514         return -ENOMEM;
5515 }
5516
5517
5518 static void
5519 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5520         struct qlnx_tx_queue *txq)
5521 {
5522         struct ecore_dev        *cdev;
5523
5524         cdev = &ha->cdev;
5525
5526         bzero((void *)&txq->sw_tx_ring[0],
5527                 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5528
5529         /* Free the real RQ ring used by FW */
5530         if (txq->tx_pbl.p_virt_addr) {
5531                 ecore_chain_free(cdev, &txq->tx_pbl);
5532                 txq->tx_pbl.p_virt_addr = NULL;
5533         }
5534         return;
5535 }
5536
5537 /* This function allocates all memory needed per Tx queue */
5538 static int
5539 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 
5540         struct qlnx_tx_queue *txq)
5541 {
5542         int                     ret = ECORE_SUCCESS;
5543         union eth_tx_bd_types   *p_virt;
5544         struct ecore_dev        *cdev;
5545
5546         cdev = &ha->cdev;
5547
5548         bzero((void *)&txq->sw_tx_ring[0],
5549                 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5550
5551         /* Allocate the real Tx ring to be used by FW */
5552         ret = ecore_chain_alloc(cdev,
5553                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5554                         ECORE_CHAIN_MODE_PBL,
5555                         ECORE_CHAIN_CNT_TYPE_U16,
5556                         TX_RING_SIZE,
5557                         sizeof(*p_virt),
5558                         &txq->tx_pbl, NULL);
5559
5560         if (ret != ECORE_SUCCESS) {
5561                 goto err;
5562         }
5563
5564         txq->num_tx_buffers = TX_RING_SIZE;
5565
5566         return 0;
5567
5568 err:
5569         qlnx_free_mem_txq(ha, fp, txq);
5570         return -ENOMEM;
5571 }
5572
5573 static void
5574 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5575 {
5576         struct mbuf     *mp;
5577         struct ifnet    *ifp = ha->ifp;
5578
5579         if (mtx_initialized(&fp->tx_mtx)) {
5580
5581                 if (fp->tx_br != NULL) {
5582
5583                         mtx_lock(&fp->tx_mtx);
5584
5585                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
5586                                 fp->tx_pkts_freed++;
5587                                 m_freem(mp);
5588                         }
5589
5590                         mtx_unlock(&fp->tx_mtx);
5591
5592                         buf_ring_free(fp->tx_br, M_DEVBUF);
5593                         fp->tx_br = NULL;
5594                 }
5595                 mtx_destroy(&fp->tx_mtx);
5596         }
5597         return;
5598 }
5599
5600 static void
5601 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5602 {
5603         int     tc;
5604
5605         qlnx_free_mem_sb(ha, fp->sb_info);
5606
5607         qlnx_free_mem_rxq(ha, fp->rxq);
5608
5609         for (tc = 0; tc < ha->num_tc; tc++)
5610                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
5611
5612         return;
5613 }
5614
5615 static int
5616 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5617 {
5618         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5619                 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5620
5621         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5622
5623         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5624                                    M_NOWAIT, &fp->tx_mtx);
5625         if (fp->tx_br == NULL) {
5626                 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
5627                         ha->dev_unit, fp->rss_id);
5628                 return -ENOMEM;
5629         }
5630         return 0;
5631 }
5632
5633 static int
5634 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5635 {
5636         int     rc, tc;
5637
5638         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
5639         if (rc)
5640                 goto err;
5641
5642         if (ha->rx_jumbo_buf_eq_mtu) {
5643                 if (ha->max_frame_size <= MCLBYTES)
5644                         ha->rx_buf_size = MCLBYTES;
5645                 else if (ha->max_frame_size <= MJUMPAGESIZE)
5646                         ha->rx_buf_size = MJUMPAGESIZE;
5647                 else if (ha->max_frame_size <= MJUM9BYTES)
5648                         ha->rx_buf_size = MJUM9BYTES;
5649                 else if (ha->max_frame_size <= MJUM16BYTES)
5650                         ha->rx_buf_size = MJUM16BYTES;
5651         } else {
5652                 if (ha->max_frame_size <= MCLBYTES)
5653                         ha->rx_buf_size = MCLBYTES;
5654                 else
5655                         ha->rx_buf_size = MJUMPAGESIZE;
5656         }
5657
5658         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
5659         if (rc)
5660                 goto err;
5661
5662         for (tc = 0; tc < ha->num_tc; tc++) {
5663                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
5664                 if (rc)
5665                         goto err;
5666         }
5667
5668         return 0;
5669
5670 err:
5671         qlnx_free_mem_fp(ha, fp);
5672         return -ENOMEM;
5673 }
5674
5675 static void
5676 qlnx_free_mem_load(qlnx_host_t *ha)
5677 {
5678         int                     i;
5679         struct ecore_dev        *cdev;
5680
5681         cdev = &ha->cdev;
5682
5683         for (i = 0; i < ha->num_rss; i++) {
5684                 struct qlnx_fastpath *fp = &ha->fp_array[i];
5685
5686                 qlnx_free_mem_fp(ha, fp);
5687         }
5688         return;
5689 }
5690
5691 static int
5692 qlnx_alloc_mem_load(qlnx_host_t *ha)
5693 {
5694         int     rc = 0, rss_id;
5695
5696         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5697                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5698
5699                 rc = qlnx_alloc_mem_fp(ha, fp);
5700                 if (rc)
5701                         break;
5702         }
5703         return (rc);
5704 }
5705
5706 static int
5707 qlnx_start_vport(struct ecore_dev *cdev,
5708                 u8 vport_id,
5709                 u16 mtu,
5710                 u8 drop_ttl0_flg,
5711                 u8 inner_vlan_removal_en_flg,
5712                 u8 tx_switching,
5713                 u8 hw_lro_enable)
5714 {
5715         int                                     rc, i;
5716         struct ecore_sp_vport_start_params      vport_start_params = { 0 };
5717         qlnx_host_t                             *ha;
5718
5719         ha = (qlnx_host_t *)cdev;
5720
5721         vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
5722         vport_start_params.tx_switching = 0;
5723         vport_start_params.handle_ptp_pkts = 0;
5724         vport_start_params.only_untagged = 0;
5725         vport_start_params.drop_ttl0 = drop_ttl0_flg;
5726
5727         vport_start_params.tpa_mode =
5728                 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5729         vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5730
5731         vport_start_params.vport_id = vport_id;
5732         vport_start_params.mtu = mtu;
5733
5734
5735         QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
5736
5737         for_each_hwfn(cdev, i) {
5738                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5739
5740                 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5741                 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5742
5743                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5744
5745                 if (rc) {
5746                         QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
5747                                 " with MTU %d\n" , vport_id, mtu);
5748                         return -ENOMEM;
5749                 }
5750
5751                 ecore_hw_start_fastpath(p_hwfn);
5752
5753                 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
5754                         vport_id, mtu);
5755         }
5756         return 0;
5757 }
5758
5759
5760 static int
5761 qlnx_update_vport(struct ecore_dev *cdev,
5762         struct qlnx_update_vport_params *params)
5763 {
5764         struct ecore_sp_vport_update_params     sp_params;
5765         int                                     rc, i, j, fp_index;
5766         struct ecore_hwfn                       *p_hwfn;
5767         struct ecore_rss_params                 *rss;
5768         qlnx_host_t                             *ha = (qlnx_host_t *)cdev;
5769         struct qlnx_fastpath                    *fp;
5770
5771         memset(&sp_params, 0, sizeof(sp_params));
5772         /* Translate protocol params into sp params */
5773         sp_params.vport_id = params->vport_id;
5774
5775         sp_params.update_vport_active_rx_flg =
5776                 params->update_vport_active_rx_flg;
5777         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
5778
5779         sp_params.update_vport_active_tx_flg =
5780                 params->update_vport_active_tx_flg;
5781         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
5782
5783         sp_params.update_inner_vlan_removal_flg =
5784                 params->update_inner_vlan_removal_flg;
5785         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5786
5787         sp_params.sge_tpa_params = params->sge_tpa_params;
5788
5789         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5790          * We need to re-fix the rss values per engine for CMT.
5791          */
5792         if (params->rss_params->update_rss_config)
5793         sp_params.rss_params = params->rss_params;
5794         else
5795                 sp_params.rss_params =  NULL;
5796
5797         for_each_hwfn(cdev, i) {
5798
5799                 p_hwfn = &cdev->hwfns[i];
5800
5801                 if ((cdev->num_hwfns > 1) &&
5802                         params->rss_params->update_rss_config &&
5803                         params->rss_params->rss_enable) {
5804
5805                         rss = params->rss_params;
5806
5807                         for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
5808
5809                                 fp_index = ((cdev->num_hwfns * j) + i) %
5810                                                 ha->num_rss;
5811
5812                                 fp = &ha->fp_array[fp_index];
5813                                 rss->rss_ind_table[j] = fp->rxq->handle;
5814                         }
5815
5816                         for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5817                                 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
5818                                         rss->rss_ind_table[j],
5819                                         rss->rss_ind_table[j+1],
5820                                         rss->rss_ind_table[j+2],
5821                                         rss->rss_ind_table[j+3],
5822                                         rss->rss_ind_table[j+4],
5823                                         rss->rss_ind_table[j+5],
5824                                         rss->rss_ind_table[j+6],
5825                                         rss->rss_ind_table[j+7]);
5826                                         j += 8;
5827                         }
5828                 }
5829
5830                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5831
5832                 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
5833
5834                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5835                                            ECORE_SPQ_MODE_EBLOCK, NULL);
5836                 if (rc) {
5837                         QL_DPRINT1(ha, "Failed to update VPORT\n");
5838                         return rc;
5839                 }
5840
5841                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
5842                         rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5843                         params->vport_id, params->vport_active_tx_flg,
5844                         params->vport_active_rx_flg,
5845                         params->update_vport_active_tx_flg,
5846                         params->update_vport_active_rx_flg);
5847         }
5848
5849         return 0;
5850 }
5851
5852 static void
5853 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5854 {
5855         struct eth_rx_bd        *rx_bd_cons =
5856                                         ecore_chain_consume(&rxq->rx_bd_ring);
5857         struct eth_rx_bd        *rx_bd_prod =
5858                                         ecore_chain_produce(&rxq->rx_bd_ring);
5859         struct sw_rx_data       *sw_rx_data_cons =
5860                                         &rxq->sw_rx_ring[rxq->sw_rx_cons];
5861         struct sw_rx_data       *sw_rx_data_prod =
5862                                         &rxq->sw_rx_ring[rxq->sw_rx_prod];
5863
5864         sw_rx_data_prod->data = sw_rx_data_cons->data;
5865         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
5866
5867         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
5868         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5869
5870         return;
5871 }
5872
5873 static void
5874 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
5875 {
5876
5877         uint16_t                bd_prod;
5878         uint16_t                cqe_prod;
5879         union {
5880                 struct eth_rx_prod_data rx_prod_data;
5881                 uint32_t                data32;
5882         } rx_prods;
5883
5884         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
5885         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
5886
5887         /* Update producers */
5888         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
5889         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
5890
5891         /* Make sure that the BD and SGE data is updated before updating the
5892          * producers since FW might read the BD/SGE right after the producer
5893          * is updated.
5894          */
5895         wmb();
5896
5897         internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
5898                 sizeof(rx_prods), &rx_prods.data32);
5899
5900         /* mmiowb is needed to synchronize doorbell writes from more than one
5901          * processor. It guarantees that the write arrives to the device before
5902          * the napi lock is released and another qlnx_poll is called (possibly
5903          * on another CPU). Without this barrier, the next doorbell can bypass
5904          * this doorbell. This is applicable to IA64/Altix systems.
5905          */
5906         wmb();
5907
5908         return;
5909 }
5910
5911 static uint32_t qlnx_hash_key[] = {
5912                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
5913                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
5914                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
5915                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
5916                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
5917                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
5918                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
5919                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
5920                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
5921                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
5922
5923 static int
5924 qlnx_start_queues(qlnx_host_t *ha)
5925 {
5926         int                             rc, tc, i, vport_id = 0,
5927                                         drop_ttl0_flg = 1, vlan_removal_en = 1,
5928                                         tx_switching = 0, hw_lro_enable = 0;
5929         struct ecore_dev                *cdev = &ha->cdev;
5930         struct ecore_rss_params         *rss_params = &ha->rss_params;
5931         struct qlnx_update_vport_params vport_update_params;
5932         struct ifnet                    *ifp;
5933         struct ecore_hwfn               *p_hwfn;
5934         struct ecore_sge_tpa_params     tpa_params;
5935         struct ecore_queue_start_common_params qparams;
5936         struct qlnx_fastpath            *fp;
5937
5938         ifp = ha->ifp;
5939
5940         QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
5941
5942         if (!ha->num_rss) {
5943                 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
5944                         " are no Rx queues\n");
5945                 return -EINVAL;
5946         }
5947
5948 #ifndef QLNX_SOFT_LRO
5949         hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
5950 #endif /* #ifndef QLNX_SOFT_LRO */
5951
5952         rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
5953                         vlan_removal_en, tx_switching, hw_lro_enable);
5954
5955         if (rc) {
5956                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
5957                 return rc;
5958         }
5959
5960         QL_DPRINT2(ha, "Start vport ramrod passed, "
5961                 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
5962                 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
5963
5964         for_each_rss(i) {
5965                 struct ecore_rxq_start_ret_params rx_ret_params;
5966                 struct ecore_txq_start_ret_params tx_ret_params;
5967
5968                 fp = &ha->fp_array[i];
5969                 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
5970
5971                 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
5972                 bzero(&rx_ret_params,
5973                         sizeof (struct ecore_rxq_start_ret_params));
5974
5975                 qparams.queue_id = i ;
5976                 qparams.vport_id = vport_id;
5977                 qparams.stats_id = vport_id;
5978                 qparams.p_sb = fp->sb_info;
5979                 qparams.sb_idx = RX_PI;
5980                 
5981
5982                 rc = ecore_eth_rx_queue_start(p_hwfn,
5983                         p_hwfn->hw_info.opaque_fid,
5984                         &qparams,
5985                         fp->rxq->rx_buf_size,   /* bd_max_bytes */
5986                         /* bd_chain_phys_addr */
5987                         fp->rxq->rx_bd_ring.p_phys_addr,
5988                         /* cqe_pbl_addr */
5989                         ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
5990                         /* cqe_pbl_size */
5991                         ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
5992                         &rx_ret_params);
5993
5994                 if (rc) {
5995                         QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
5996                         return rc;
5997                 }
5998
5999                 fp->rxq->hw_rxq_prod_addr       = rx_ret_params.p_prod;
6000                 fp->rxq->handle                 = rx_ret_params.p_handle;
6001                 fp->rxq->hw_cons_ptr            =
6002                                 &fp->sb_info->sb_virt->pi_array[RX_PI];
6003
6004                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6005
6006                 for (tc = 0; tc < ha->num_tc; tc++) {
6007                         struct qlnx_tx_queue *txq = fp->txq[tc];
6008                 
6009                         bzero(&qparams,
6010                                 sizeof(struct ecore_queue_start_common_params));
6011                         bzero(&tx_ret_params,
6012                                 sizeof (struct ecore_txq_start_ret_params));
6013
6014                         qparams.queue_id = txq->index / cdev->num_hwfns ;
6015                         qparams.vport_id = vport_id;
6016                         qparams.stats_id = vport_id;
6017                         qparams.p_sb = fp->sb_info;
6018                         qparams.sb_idx = TX_PI(tc);
6019
6020                         rc = ecore_eth_tx_queue_start(p_hwfn,
6021                                 p_hwfn->hw_info.opaque_fid,
6022                                 &qparams, tc,
6023                                 /* bd_chain_phys_addr */
6024                                 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6025                                 ecore_chain_get_page_cnt(&txq->tx_pbl),
6026                                 &tx_ret_params);
6027
6028                         if (rc) {
6029                                 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6030                                            txq->index, rc);
6031                                 return rc;
6032                         }
6033
6034                         txq->doorbell_addr = tx_ret_params.p_doorbell;
6035                         txq->handle = tx_ret_params.p_handle;
6036
6037                         txq->hw_cons_ptr =
6038                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6039                         SET_FIELD(txq->tx_db.data.params,
6040                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6041                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6042                                   DB_AGG_CMD_SET);
6043                         SET_FIELD(txq->tx_db.data.params,
6044                                   ETH_DB_DATA_AGG_VAL_SEL,
6045                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6046
6047                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6048                 }
6049         }
6050
6051         /* Fill struct with RSS params */
6052         if (ha->num_rss > 1) {
6053
6054                 rss_params->update_rss_config = 1;
6055                 rss_params->rss_enable = 1;
6056                 rss_params->update_rss_capabilities = 1;
6057                 rss_params->update_rss_ind_table = 1;
6058                 rss_params->update_rss_key = 1;
6059                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6060                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6061                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6062
6063                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6064                         fp = &ha->fp_array[(i % ha->num_rss)];
6065                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6066                 }
6067
6068                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6069                         rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6070
6071         } else {
6072                 memset(rss_params, 0, sizeof(*rss_params));
6073         }
6074
6075
6076         /* Prepare and send the vport enable */
6077         memset(&vport_update_params, 0, sizeof(vport_update_params));
6078         vport_update_params.vport_id = vport_id;
6079         vport_update_params.update_vport_active_tx_flg = 1;
6080         vport_update_params.vport_active_tx_flg = 1;
6081         vport_update_params.update_vport_active_rx_flg = 1;
6082         vport_update_params.vport_active_rx_flg = 1;
6083         vport_update_params.rss_params = rss_params;
6084         vport_update_params.update_inner_vlan_removal_flg = 1;
6085         vport_update_params.inner_vlan_removal_flg = 1;
6086
6087         if (hw_lro_enable) {
6088                 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6089
6090                 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6091
6092                 tpa_params.update_tpa_en_flg = 1;
6093                 tpa_params.tpa_ipv4_en_flg = 1;
6094                 tpa_params.tpa_ipv6_en_flg = 1;
6095
6096                 tpa_params.update_tpa_param_flg = 1;
6097                 tpa_params.tpa_pkt_split_flg = 0;
6098                 tpa_params.tpa_hdr_data_split_flg = 0;
6099                 tpa_params.tpa_gro_consistent_flg = 0;
6100                 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6101                 tpa_params.tpa_max_size = (uint16_t)(-1);
6102                 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6103                 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6104
6105                 vport_update_params.sge_tpa_params = &tpa_params;
6106         }
6107
6108         rc = qlnx_update_vport(cdev, &vport_update_params);
6109         if (rc) {
6110                 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6111                 return rc;
6112         }
6113
6114         return 0;
6115 }
6116
6117 static int
6118 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6119         struct qlnx_tx_queue *txq)
6120 {
6121         uint16_t        hw_bd_cons;
6122         uint16_t        ecore_cons_idx;
6123
6124         QL_DPRINT2(ha, "enter\n");
6125
6126         hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6127
6128         while (hw_bd_cons !=
6129                 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6130
6131                 mtx_lock(&fp->tx_mtx);
6132
6133                 (void)qlnx_tx_int(ha, fp, txq);
6134
6135                 mtx_unlock(&fp->tx_mtx);
6136
6137                 qlnx_mdelay(__func__, 2);
6138
6139                 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6140         }
6141
6142         QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6143
6144         return 0;
6145 }
6146
6147 static int
6148 qlnx_stop_queues(qlnx_host_t *ha)
6149 {
6150         struct qlnx_update_vport_params vport_update_params;
6151         struct ecore_dev                *cdev;
6152         struct qlnx_fastpath            *fp;
6153         int                             rc, tc, i;
6154
6155         cdev = &ha->cdev;
6156
6157         /* Disable the vport */
6158
6159         memset(&vport_update_params, 0, sizeof(vport_update_params));
6160
6161         vport_update_params.vport_id = 0;
6162         vport_update_params.update_vport_active_tx_flg = 1;
6163         vport_update_params.vport_active_tx_flg = 0;
6164         vport_update_params.update_vport_active_rx_flg = 1;
6165         vport_update_params.vport_active_rx_flg = 0;
6166         vport_update_params.rss_params = &ha->rss_params;
6167         vport_update_params.rss_params->update_rss_config = 0;
6168         vport_update_params.rss_params->rss_enable = 0;
6169         vport_update_params.update_inner_vlan_removal_flg = 0;
6170         vport_update_params.inner_vlan_removal_flg = 0;
6171
6172         QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6173
6174         rc = qlnx_update_vport(cdev, &vport_update_params);
6175         if (rc) {
6176                 QL_DPRINT1(ha, "Failed to update vport\n");
6177                 return rc;
6178         }
6179
6180         /* Flush Tx queues. If needed, request drain from MCP */
6181         for_each_rss(i) {
6182                 fp = &ha->fp_array[i];
6183
6184                 for (tc = 0; tc < ha->num_tc; tc++) {
6185                         struct qlnx_tx_queue *txq = fp->txq[tc];
6186
6187                         rc = qlnx_drain_txq(ha, fp, txq);
6188                         if (rc)
6189                                 return rc;
6190                 }
6191         }
6192
6193         /* Stop all Queues in reverse order*/
6194         for (i = ha->num_rss - 1; i >= 0; i--) {
6195
6196                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6197
6198                 fp = &ha->fp_array[i];
6199
6200                 /* Stop the Tx Queue(s)*/
6201                 for (tc = 0; tc < ha->num_tc; tc++) {
6202                         int tx_queue_id;
6203
6204                         tx_queue_id = tc * ha->num_rss + i;
6205                         rc = ecore_eth_tx_queue_stop(p_hwfn,
6206                                         fp->txq[tc]->handle);
6207                                         
6208                         if (rc) {
6209                                 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6210                                            tx_queue_id);
6211                                 return rc;
6212                         }
6213                 }
6214
6215                 /* Stop the Rx Queue*/
6216                 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6217                                 false);
6218                 if (rc) {
6219                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6220                         return rc;
6221                 }
6222         }
6223
6224         /* Stop the vport */
6225         for_each_hwfn(cdev, i) {
6226
6227                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6228
6229                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6230
6231                 if (rc) {
6232                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6233                         return rc;
6234                 }
6235         }
6236
6237         return rc;
6238 }
6239
6240 static int
6241 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6242         enum ecore_filter_opcode opcode,
6243         unsigned char mac[ETH_ALEN])
6244 {
6245         struct ecore_filter_ucast       ucast;
6246         struct ecore_dev                *cdev;
6247         int                             rc;
6248
6249         cdev = &ha->cdev;
6250
6251         bzero(&ucast, sizeof(struct ecore_filter_ucast));
6252
6253         ucast.opcode = opcode;
6254         ucast.type = ECORE_FILTER_MAC;
6255         ucast.is_rx_filter = 1;
6256         ucast.vport_to_add_to = 0;
6257         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6258
6259         rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6260
6261         return (rc);
6262 }
6263
6264 static int
6265 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6266 {
6267         struct ecore_filter_ucast       ucast;
6268         struct ecore_dev                *cdev;
6269         int                             rc;
6270
6271         bzero(&ucast, sizeof(struct ecore_filter_ucast));
6272
6273         ucast.opcode = ECORE_FILTER_REPLACE;
6274         ucast.type = ECORE_FILTER_MAC; 
6275         ucast.is_rx_filter = 1;
6276
6277         cdev = &ha->cdev;
6278
6279         rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6280
6281         return (rc);
6282 }
6283
6284 static int
6285 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6286 {
6287         struct ecore_filter_mcast       *mcast;
6288         struct ecore_dev                *cdev;
6289         int                             rc, i;
6290
6291         cdev = &ha->cdev;
6292
6293         mcast = &ha->ecore_mcast;
6294         bzero(mcast, sizeof(struct ecore_filter_mcast));
6295
6296         mcast->opcode = ECORE_FILTER_REMOVE;
6297
6298         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6299
6300                 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6301                         ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6302                         ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6303
6304                         memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6305                         mcast->num_mc_addrs++;
6306                 }
6307         }
6308         mcast = &ha->ecore_mcast;
6309
6310         rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6311
6312         bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6313         ha->nmcast = 0;
6314
6315         return (rc);
6316 }
6317
6318 static int
6319 qlnx_clean_filters(qlnx_host_t *ha)
6320 {
6321         int     rc = 0;
6322
6323         /* Remove all unicast macs */
6324         rc = qlnx_remove_all_ucast_mac(ha);
6325         if (rc)
6326                 return rc;
6327
6328         /* Remove all multicast macs */
6329         rc = qlnx_remove_all_mcast_mac(ha);
6330         if (rc)
6331                 return rc;
6332
6333         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6334
6335         return (rc);
6336 }
6337
6338 static int
6339 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6340 {
6341         struct ecore_filter_accept_flags        accept;
6342         int                                     rc = 0;
6343         struct ecore_dev                        *cdev;
6344
6345         cdev = &ha->cdev;
6346
6347         bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6348
6349         accept.update_rx_mode_config = 1;
6350         accept.rx_accept_filter = filter;
6351
6352         accept.update_tx_mode_config = 1;
6353         accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6354                 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6355
6356         rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6357                         ECORE_SPQ_MODE_CB, NULL);
6358
6359         return (rc);
6360 }
6361
6362 static int
6363 qlnx_set_rx_mode(qlnx_host_t *ha)
6364 {
6365         int     rc = 0;
6366         uint8_t filter;
6367
6368         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6369         if (rc)
6370                 return rc;
6371
6372         rc = qlnx_remove_all_mcast_mac(ha);
6373         if (rc)
6374                 return rc;
6375
6376         filter = ECORE_ACCEPT_UCAST_MATCHED |
6377                         ECORE_ACCEPT_MCAST_MATCHED |
6378                         ECORE_ACCEPT_BCAST;
6379         ha->filter = filter;
6380
6381         rc = qlnx_set_rx_accept_filter(ha, filter);
6382
6383         return (rc);
6384 }
6385
6386 static int
6387 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6388 {
6389         int                     i, rc = 0;
6390         struct ecore_dev        *cdev;
6391         struct ecore_hwfn       *hwfn;
6392         struct ecore_ptt        *ptt;
6393
6394         cdev = &ha->cdev;
6395
6396         for_each_hwfn(cdev, i) {
6397
6398                 hwfn = &cdev->hwfns[i];
6399
6400                 ptt = ecore_ptt_acquire(hwfn);
6401                 if (!ptt)
6402                         return -EBUSY;
6403
6404                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6405
6406                 ecore_ptt_release(hwfn, ptt);
6407
6408                 if (rc)
6409                         return rc;
6410         }
6411         return (rc);
6412 }
6413
6414 #if __FreeBSD_version >= 1100000
6415 static uint64_t
6416 qlnx_get_counter(if_t ifp, ift_counter cnt)
6417 {
6418         qlnx_host_t *ha;
6419         uint64_t count;
6420
6421         ha = (qlnx_host_t *)if_getsoftc(ifp);
6422
6423         switch (cnt) {
6424
6425         case IFCOUNTER_IPACKETS:
6426                 count = ha->hw_stats.common.rx_ucast_pkts +
6427                         ha->hw_stats.common.rx_mcast_pkts +
6428                         ha->hw_stats.common.rx_bcast_pkts;
6429                 break;
6430
6431         case IFCOUNTER_IERRORS:
6432                 count = ha->hw_stats.common.rx_crc_errors +
6433                         ha->hw_stats.common.rx_align_errors +
6434                         ha->hw_stats.common.rx_oversize_packets +
6435                         ha->hw_stats.common.rx_undersize_packets;
6436                 break;
6437
6438         case IFCOUNTER_OPACKETS:
6439                 count = ha->hw_stats.common.tx_ucast_pkts +
6440                         ha->hw_stats.common.tx_mcast_pkts +
6441                         ha->hw_stats.common.tx_bcast_pkts;
6442                 break;
6443
6444         case IFCOUNTER_OERRORS:
6445                 count = ha->hw_stats.common.tx_err_drop_pkts;
6446                 break;
6447
6448         case IFCOUNTER_COLLISIONS:
6449                 return (0);
6450
6451         case IFCOUNTER_IBYTES:
6452                 count = ha->hw_stats.common.rx_ucast_bytes +
6453                         ha->hw_stats.common.rx_mcast_bytes +
6454                         ha->hw_stats.common.rx_bcast_bytes;
6455                 break;
6456
6457         case IFCOUNTER_OBYTES:
6458                 count = ha->hw_stats.common.tx_ucast_bytes +
6459                         ha->hw_stats.common.tx_mcast_bytes +
6460                         ha->hw_stats.common.tx_bcast_bytes;
6461                 break;
6462
6463         case IFCOUNTER_IMCASTS:
6464                 count = ha->hw_stats.common.rx_mcast_bytes;
6465                 break;
6466
6467         case IFCOUNTER_OMCASTS:
6468                 count = ha->hw_stats.common.tx_mcast_bytes;
6469                 break;
6470
6471         case IFCOUNTER_IQDROPS:
6472         case IFCOUNTER_OQDROPS:
6473         case IFCOUNTER_NOPROTO:
6474
6475         default:
6476                 return (if_get_counter_default(ifp, cnt));
6477         }
6478         return (count);
6479 }
6480 #endif
6481
6482
6483 static void
6484 qlnx_timer(void *arg)
6485 {
6486         qlnx_host_t     *ha;
6487
6488         ha = (qlnx_host_t *)arg;
6489
6490         ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6491
6492         if (ha->storm_stats_enable)
6493                 qlnx_sample_storm_stats(ha);
6494
6495         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6496
6497         return;
6498 }
6499
6500 static int
6501 qlnx_load(qlnx_host_t *ha)
6502 {
6503         int                     i;
6504         int                     rc = 0;
6505         struct ecore_dev        *cdev;
6506         device_t                dev;
6507
6508         cdev = &ha->cdev;
6509         dev = ha->pci_dev;
6510
6511         QL_DPRINT2(ha, "enter\n");
6512
6513         rc = qlnx_alloc_mem_arrays(ha);
6514         if (rc)
6515                 goto qlnx_load_exit0;
6516
6517         qlnx_init_fp(ha);
6518
6519         rc = qlnx_alloc_mem_load(ha);
6520         if (rc)
6521                 goto qlnx_load_exit1;
6522
6523         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6524                    ha->num_rss, ha->num_tc);
6525
6526         for (i = 0; i < ha->num_rss; i++) {
6527
6528                 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6529                         (INTR_TYPE_NET | INTR_MPSAFE),
6530                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
6531                         &ha->irq_vec[i].handle))) {
6532
6533                         QL_DPRINT1(ha, "could not setup interrupt\n");
6534                         goto qlnx_load_exit2;
6535                 }
6536
6537                 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6538                          irq %p handle %p\n", i,
6539                         ha->irq_vec[i].irq_rid,
6540                         ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6541
6542                 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6543         }
6544
6545         rc = qlnx_start_queues(ha);
6546         if (rc)
6547                 goto qlnx_load_exit2;
6548
6549         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6550
6551         /* Add primary mac and set Rx filters */
6552         rc = qlnx_set_rx_mode(ha);
6553         if (rc)
6554                 goto qlnx_load_exit2;
6555
6556         /* Ask for link-up using current configuration */
6557         qlnx_set_link(ha, true);
6558
6559         ha->state = QLNX_STATE_OPEN;
6560
6561         bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
6562
6563         if (ha->flags.callout_init)
6564                 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6565
6566         goto qlnx_load_exit0;
6567
6568 qlnx_load_exit2:
6569         qlnx_free_mem_load(ha);
6570
6571 qlnx_load_exit1:
6572         ha->num_rss = 0;
6573
6574 qlnx_load_exit0:
6575         QL_DPRINT2(ha, "exit [%d]\n", rc);
6576         return rc;
6577 }
6578
6579 static void
6580 qlnx_drain_soft_lro(qlnx_host_t *ha)
6581 {
6582 #ifdef QLNX_SOFT_LRO
6583
6584         struct ifnet    *ifp;
6585         int             i;
6586
6587         ifp = ha->ifp;
6588
6589
6590         if (ifp->if_capenable & IFCAP_LRO) {
6591
6592                 for (i = 0; i < ha->num_rss; i++) {
6593
6594                         struct qlnx_fastpath *fp = &ha->fp_array[i];
6595                         struct lro_ctrl *lro;
6596
6597                         lro = &fp->rxq->lro;
6598
6599 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6600
6601                         tcp_lro_flush_all(lro);
6602
6603 #else
6604                         struct lro_entry *queued;
6605
6606                         while ((!SLIST_EMPTY(&lro->lro_active))){
6607                                 queued = SLIST_FIRST(&lro->lro_active);
6608                                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
6609                                 tcp_lro_flush(lro, queued);
6610                         }
6611
6612 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6613
6614                 }
6615         }
6616
6617 #endif /* #ifdef QLNX_SOFT_LRO */
6618
6619         return;
6620 }
6621
6622 static void
6623 qlnx_unload(qlnx_host_t *ha)
6624 {
6625         struct ecore_dev        *cdev;
6626         device_t                dev;
6627         int                     i;
6628
6629         cdev = &ha->cdev;
6630         dev = ha->pci_dev;
6631
6632         QL_DPRINT2(ha, "enter\n");
6633         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
6634
6635         if (ha->state == QLNX_STATE_OPEN) {
6636
6637                 qlnx_set_link(ha, false);
6638                 qlnx_clean_filters(ha);
6639                 qlnx_stop_queues(ha);
6640                 ecore_hw_stop_fastpath(cdev);
6641
6642                 for (i = 0; i < ha->num_rss; i++) {
6643                         if (ha->irq_vec[i].handle) {
6644                                 (void)bus_teardown_intr(dev,
6645                                         ha->irq_vec[i].irq,
6646                                         ha->irq_vec[i].handle);
6647                                 ha->irq_vec[i].handle = NULL;
6648                         }
6649                 }
6650
6651                 qlnx_drain_fp_taskqueues(ha);
6652                 qlnx_drain_soft_lro(ha);
6653                 qlnx_free_mem_load(ha);
6654         }
6655
6656         if (ha->flags.callout_init)
6657                 callout_drain(&ha->qlnx_callout);
6658
6659         qlnx_mdelay(__func__, 1000);
6660
6661         ha->state = QLNX_STATE_CLOSED;
6662
6663         QL_DPRINT2(ha, "exit\n");
6664         return;
6665 }
6666
6667 static int
6668 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6669 {
6670         int                     rval = -1;
6671         struct ecore_hwfn       *p_hwfn;
6672         struct ecore_ptt        *p_ptt;
6673
6674         ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6675
6676         p_hwfn = &ha->cdev.hwfns[hwfn_index];
6677         p_ptt = ecore_ptt_acquire(p_hwfn);
6678
6679         if (!p_ptt) {
6680                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6681                 return (rval);
6682         }
6683
6684         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6685
6686         if (rval == DBG_STATUS_OK)
6687                 rval = 0;
6688         else {
6689                 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
6690                         "[0x%x]\n", rval);
6691         }
6692
6693         ecore_ptt_release(p_hwfn, p_ptt);
6694
6695         return (rval);
6696 }
6697
6698 static int
6699 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6700 {
6701         int                     rval = -1;
6702         struct ecore_hwfn       *p_hwfn;
6703         struct ecore_ptt        *p_ptt;
6704
6705         ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6706
6707         p_hwfn = &ha->cdev.hwfns[hwfn_index];
6708         p_ptt = ecore_ptt_acquire(p_hwfn);
6709
6710         if (!p_ptt) {
6711                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6712                 return (rval);
6713         }
6714
6715         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6716
6717         if (rval == DBG_STATUS_OK)
6718                 rval = 0;
6719         else {
6720                 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
6721                         " [0x%x]\n", rval);
6722         }
6723
6724         ecore_ptt_release(p_hwfn, p_ptt);
6725
6726         return (rval);
6727 }
6728
6729
6730 static void
6731 qlnx_sample_storm_stats(qlnx_host_t *ha)
6732 {
6733         int                     i, index;
6734         struct ecore_dev        *cdev;
6735         qlnx_storm_stats_t      *s_stats;
6736         uint32_t                reg;
6737         struct ecore_ptt        *p_ptt;
6738         struct ecore_hwfn       *hwfn;
6739
6740         if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
6741                 ha->storm_stats_enable = 0;
6742                 return;
6743         }
6744
6745         cdev = &ha->cdev;
6746
6747         for_each_hwfn(cdev, i) {
6748
6749                 hwfn = &cdev->hwfns[i];
6750
6751                 p_ptt = ecore_ptt_acquire(hwfn);
6752                 if (!p_ptt)
6753                         return;
6754
6755                 index = ha->storm_stats_index +
6756                                 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
6757
6758                 s_stats = &ha->storm_stats[index];
6759
6760                 /* XSTORM */
6761                 reg = XSEM_REG_FAST_MEMORY +
6762                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6763                 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6764
6765                 reg = XSEM_REG_FAST_MEMORY +
6766                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6767                 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6768
6769                 reg = XSEM_REG_FAST_MEMORY +
6770                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6771                 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6772
6773                 reg = XSEM_REG_FAST_MEMORY +
6774                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6775                 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6776
6777                 /* YSTORM */
6778                 reg = YSEM_REG_FAST_MEMORY +
6779                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6780                 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6781
6782                 reg = YSEM_REG_FAST_MEMORY +
6783                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6784                 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6785
6786                 reg = YSEM_REG_FAST_MEMORY +
6787                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6788                 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6789
6790                 reg = YSEM_REG_FAST_MEMORY +
6791                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6792                 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6793
6794                 /* PSTORM */
6795                 reg = PSEM_REG_FAST_MEMORY +
6796                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6797                 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6798
6799                 reg = PSEM_REG_FAST_MEMORY +
6800                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6801                 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6802
6803                 reg = PSEM_REG_FAST_MEMORY +
6804                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6805                 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6806
6807                 reg = PSEM_REG_FAST_MEMORY +
6808                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6809                 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6810
6811                 /* TSTORM */
6812                 reg = TSEM_REG_FAST_MEMORY +
6813                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6814                 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6815
6816                 reg = TSEM_REG_FAST_MEMORY +
6817                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6818                 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6819
6820                 reg = TSEM_REG_FAST_MEMORY +
6821                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6822                 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6823
6824                 reg = TSEM_REG_FAST_MEMORY +
6825                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6826                 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6827
6828                 /* MSTORM */
6829                 reg = MSEM_REG_FAST_MEMORY +
6830                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6831                 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6832
6833                 reg = MSEM_REG_FAST_MEMORY +
6834                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6835                 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6836
6837                 reg = MSEM_REG_FAST_MEMORY +
6838                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6839                 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6840
6841                 reg = MSEM_REG_FAST_MEMORY +
6842                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6843                 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6844
6845                 /* USTORM */
6846                 reg = USEM_REG_FAST_MEMORY +
6847                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6848                 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6849
6850                 reg = USEM_REG_FAST_MEMORY +
6851                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6852                 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6853
6854                 reg = USEM_REG_FAST_MEMORY +
6855                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6856                 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6857
6858                 reg = USEM_REG_FAST_MEMORY +
6859                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6860                 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6861
6862                 ecore_ptt_release(hwfn, p_ptt);
6863         }
6864
6865         ha->storm_stats_index++;
6866
6867         return;
6868 }
6869
6870 /*
6871  * Name: qlnx_dump_buf8
6872  * Function: dumps a buffer as bytes
6873  */
6874 static void
6875 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
6876 {
6877         device_t        dev;
6878         uint32_t        i = 0;
6879         uint8_t         *buf;
6880
6881         dev = ha->pci_dev;
6882         buf = dbuf;
6883
6884         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
6885
6886         while (len >= 16) {
6887                 device_printf(dev,"0x%08x:"
6888                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6889                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6890                         buf[0], buf[1], buf[2], buf[3],
6891                         buf[4], buf[5], buf[6], buf[7],
6892                         buf[8], buf[9], buf[10], buf[11],
6893                         buf[12], buf[13], buf[14], buf[15]);
6894                 i += 16;
6895                 len -= 16;
6896                 buf += 16;
6897         }
6898         switch (len) {
6899         case 1:
6900                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
6901                 break;
6902         case 2:
6903                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
6904                 break;
6905         case 3:
6906                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
6907                         i, buf[0], buf[1], buf[2]);
6908                 break;
6909         case 4:
6910                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
6911                         buf[0], buf[1], buf[2], buf[3]);
6912                 break;
6913         case 5:
6914                 device_printf(dev,"0x%08x:"
6915                         " %02x %02x %02x %02x %02x\n", i,
6916                         buf[0], buf[1], buf[2], buf[3], buf[4]);
6917                 break;
6918         case 6:
6919                 device_printf(dev,"0x%08x:"
6920                         " %02x %02x %02x %02x %02x %02x\n", i,
6921                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
6922                 break;
6923         case 7:
6924                 device_printf(dev,"0x%08x:"
6925                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
6926                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
6927                 break;
6928         case 8:
6929                 device_printf(dev,"0x%08x:"
6930                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6931                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6932                         buf[7]);
6933                 break;
6934         case 9:
6935                 device_printf(dev,"0x%08x:"
6936                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6937                         " %02x\n", i,
6938                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6939                         buf[7], buf[8]);
6940                 break;
6941         case 10:
6942                 device_printf(dev,"0x%08x:"
6943                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6944                         " %02x %02x\n", i,
6945                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6946                         buf[7], buf[8], buf[9]);
6947                 break;
6948         case 11:
6949                 device_printf(dev,"0x%08x:"
6950                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6951                         " %02x %02x %02x\n", i,
6952                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6953                         buf[7], buf[8], buf[9], buf[10]);
6954                 break;
6955         case 12:
6956                 device_printf(dev,"0x%08x:"
6957                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6958                         " %02x %02x %02x %02x\n", i,
6959                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6960                         buf[7], buf[8], buf[9], buf[10], buf[11]);
6961                 break;
6962         case 13:
6963                 device_printf(dev,"0x%08x:"
6964                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6965                         " %02x %02x %02x %02x %02x\n", i,
6966                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6967                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
6968                 break;
6969         case 14:
6970                 device_printf(dev,"0x%08x:"
6971                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6972                         " %02x %02x %02x %02x %02x %02x\n", i,
6973                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6974                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
6975                         buf[13]);
6976                 break;
6977         case 15:
6978                 device_printf(dev,"0x%08x:"
6979                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6980                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
6981                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6982                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
6983                         buf[13], buf[14]);
6984                 break;
6985         default:
6986                 break;
6987         }
6988
6989         device_printf(dev, "%s: %s dump end\n", __func__, msg);
6990
6991         return;
6992 }
6993