]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxe/qlnx_os.c
Merge llvm, clang, lld, lldb, compiler-rt and libc++ r307894, and update
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxe / qlnx_os.c
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc. 
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28
29 /*
30  * File: qlnx_os.c
31  * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "qlnx_os.h"
38 #include "bcm_osal.h"
39 #include "reg_addr.h"
40 #include "ecore_gtt_reg_addr.h"
41 #include "ecore.h"
42 #include "ecore_chain.h"
43 #include "ecore_status.h"
44 #include "ecore_hw.h"
45 #include "ecore_rt_defs.h"
46 #include "ecore_init_ops.h"
47 #include "ecore_int.h"
48 #include "ecore_cxt.h"
49 #include "ecore_spq.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
52 #include "ecore_dev_api.h"
53 #include "ecore_l2_api.h"
54 #include "ecore_mcp.h"
55 #include "ecore_hw_defs.h"
56 #include "mcp_public.h"
57 #include "ecore_iro.h"
58 #include "nvm_cfg.h"
59 #include "ecore_dev_api.h"
60 #include "ecore_dbg_fw_funcs.h"
61
62 #include "qlnx_ioctl.h"
63 #include "qlnx_def.h"
64 #include "qlnx_ver.h"
65 #include <sys/smp.h>
66
67
68 /*
69  * static functions
70  */
71 /*
72  * ioctl related functions
73  */
74 static void qlnx_add_sysctls(qlnx_host_t *ha);
75
76 /*
77  * main driver
78  */
79 static void qlnx_release(qlnx_host_t *ha);
80 static void qlnx_fp_isr(void *arg);
81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
82 static void qlnx_init(void *arg);
83 static void qlnx_init_locked(qlnx_host_t *ha);
84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
85 static int qlnx_set_promisc(qlnx_host_t *ha);
86 static int qlnx_set_allmulti(qlnx_host_t *ha);
87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
88 static int qlnx_media_change(struct ifnet *ifp);
89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
90 static void qlnx_stop(qlnx_host_t *ha);
91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
92                 struct mbuf **m_headp);
93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
94 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
95                         struct qlnx_link_output *if_link);
96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp);
97 static void qlnx_qflush(struct ifnet *ifp);
98
99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
105
106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
108
109 static int qlnx_nic_setup(struct ecore_dev *cdev,
110                 struct ecore_pf_params *func_params);
111 static int qlnx_nic_start(struct ecore_dev *cdev);
112 static int qlnx_slowpath_start(qlnx_host_t *ha);
113 static int qlnx_slowpath_stop(qlnx_host_t *ha);
114 static int qlnx_init_hw(qlnx_host_t *ha);
115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
116                 char ver_str[VER_SIZE]);
117 static void qlnx_unload(qlnx_host_t *ha);
118 static int qlnx_load(qlnx_host_t *ha);
119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
120                 uint32_t add_mac);
121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
122                 uint32_t len);
123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn,
126                 struct qlnx_rx_queue *rxq);
127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
129                 int hwfn_index);
130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
131                 int hwfn_index);
132 static void qlnx_timer(void *arg);
133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
135 static void qlnx_trigger_dump(qlnx_host_t *ha);
136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
137                 struct qlnx_tx_queue *txq);
138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
139                 int lro_enable);
140 static void qlnx_fp_taskqueue(void *context, int pending);
141 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
143                 struct qlnx_agg_info *tpa);
144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
145
146 #if __FreeBSD_version >= 1100000
147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt);
148 #endif
149
150
151 /*
152  * Hooks to the Operating Systems
153  */
154 static int qlnx_pci_probe (device_t);
155 static int qlnx_pci_attach (device_t);
156 static int qlnx_pci_detach (device_t);
157
158 static device_method_t qlnx_pci_methods[] = {
159         /* Device interface */
160         DEVMETHOD(device_probe, qlnx_pci_probe),
161         DEVMETHOD(device_attach, qlnx_pci_attach),
162         DEVMETHOD(device_detach, qlnx_pci_detach),
163         { 0, 0 }
164 };
165
166 static driver_t qlnx_pci_driver = {
167         "ql", qlnx_pci_methods, sizeof (qlnx_host_t),
168 };
169
170 static devclass_t qlnx_devclass;
171
172 MODULE_VERSION(if_qlnxe,1);
173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0);
174
175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1);
176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1);
177
178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
179
180
181 char qlnx_dev_str[64];
182 char qlnx_ver_str[VER_SIZE];
183 char qlnx_name_str[NAME_SIZE];
184
185 /*
186  * Some PCI Configuration Space Related Defines
187  */
188
189 #ifndef PCI_VENDOR_QLOGIC
190 #define PCI_VENDOR_QLOGIC               0x1077
191 #endif
192
193 /* 40G Adapter QLE45xxx*/
194 #ifndef QLOGIC_PCI_DEVICE_ID_1634
195 #define QLOGIC_PCI_DEVICE_ID_1634       0x1634
196 #endif
197
198 /* 100G Adapter QLE45xxx*/
199 #ifndef QLOGIC_PCI_DEVICE_ID_1644
200 #define QLOGIC_PCI_DEVICE_ID_1644       0x1644
201 #endif
202
203 /* 25G Adapter QLE45xxx*/
204 #ifndef QLOGIC_PCI_DEVICE_ID_1656
205 #define QLOGIC_PCI_DEVICE_ID_1656       0x1656
206 #endif
207
208 /* 50G Adapter QLE45xxx*/
209 #ifndef QLOGIC_PCI_DEVICE_ID_1654
210 #define QLOGIC_PCI_DEVICE_ID_1654       0x1654
211 #endif
212
213 /* 10G/25G/40G Adapter QLE41xxx*/
214 #ifndef QLOGIC_PCI_DEVICE_ID_8070
215 #define QLOGIC_PCI_DEVICE_ID_8070       0x8070
216 #endif
217
218 static int
219 qlnx_valid_device(device_t dev)
220 {
221         uint16_t        device_id;
222
223         device_id = pci_get_device(dev);
224
225         if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
226                 (device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
227                 (device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
228                 (device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
229                 (device_id == QLOGIC_PCI_DEVICE_ID_8070))
230                 return 0;
231
232         return -1;
233 }
234
235 /*
236  * Name:        qlnx_pci_probe
237  * Function:    Validate the PCI device to be a QLA80XX device
238  */
239 static int
240 qlnx_pci_probe(device_t dev)
241 {
242         snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d",
243                 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD);
244         snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx");
245
246         if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) {
247                 return (ENXIO);
248         }
249
250         switch (pci_get_device(dev)) {
251
252         case QLOGIC_PCI_DEVICE_ID_1644:
253                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
254                         "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
255                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
256                         QLNX_VERSION_BUILD);
257                 device_set_desc_copy(dev, qlnx_dev_str);
258
259                 break;
260
261         case QLOGIC_PCI_DEVICE_ID_1634:
262                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
263                         "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
264                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
265                         QLNX_VERSION_BUILD);
266                 device_set_desc_copy(dev, qlnx_dev_str);
267
268                 break;
269
270         case QLOGIC_PCI_DEVICE_ID_1656:
271                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
272                         "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
273                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
274                         QLNX_VERSION_BUILD);
275                 device_set_desc_copy(dev, qlnx_dev_str);
276
277                 break;
278
279         case QLOGIC_PCI_DEVICE_ID_1654:
280                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
281                         "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
282                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
283                         QLNX_VERSION_BUILD);
284                 device_set_desc_copy(dev, qlnx_dev_str);
285
286                 break;
287
288         case QLOGIC_PCI_DEVICE_ID_8070:
289                 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
290                         "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) "
291                         "Adapter-Ethernet Function",
292                         QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
293                         QLNX_VERSION_BUILD);
294                 device_set_desc_copy(dev, qlnx_dev_str);
295
296                 break;
297
298         default:
299                 return (ENXIO);
300         }
301
302         return (BUS_PROBE_DEFAULT);
303 }
304
305
306 static void
307 qlnx_sp_intr(void *arg)
308 {
309         struct ecore_hwfn       *p_hwfn;
310         qlnx_host_t             *ha;
311         int                     i;
312         
313         p_hwfn = arg;
314
315         if (p_hwfn == NULL) {
316                 printf("%s: spurious slowpath intr\n", __func__);
317                 return;
318         }
319
320         ha = (qlnx_host_t *)p_hwfn->p_dev;
321
322         QL_DPRINT2(ha, "enter\n");
323
324         for (i = 0; i < ha->cdev.num_hwfns; i++) {
325                 if (&ha->cdev.hwfns[i] == p_hwfn) {
326                         taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
327                         break;
328                 }
329         }
330         QL_DPRINT2(ha, "exit\n");
331         
332         return;
333 }
334
335 static void
336 qlnx_sp_taskqueue(void *context, int pending)
337 {
338         struct ecore_hwfn       *p_hwfn;
339
340         p_hwfn = context;
341
342         if (p_hwfn != NULL) {
343                 qlnx_sp_isr(p_hwfn);
344         }
345         return;
346 }
347
348 static int
349 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
350 {
351         int     i;
352         uint8_t tq_name[32];
353
354         for (i = 0; i < ha->cdev.num_hwfns; i++) {
355
356                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
357
358                 bzero(tq_name, sizeof (tq_name));
359                 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i);
360
361                 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
362
363                 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT,
364                          taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
365
366                 if (ha->sp_taskqueue[i] == NULL) 
367                         return (-1);
368
369                 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
370                         tq_name);
371
372                 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
373         }
374
375         return (0);
376 }
377
378 static void
379 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
380 {
381         int     i;
382
383         for (i = 0; i < ha->cdev.num_hwfns; i++) {
384                 if (ha->sp_taskqueue[i] != NULL) {
385                         taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
386                         taskqueue_free(ha->sp_taskqueue[i]);
387                 }
388         }
389         return;
390 }
391
392 static void
393 qlnx_fp_taskqueue(void *context, int pending)
394 {
395         struct qlnx_fastpath    *fp;
396         qlnx_host_t             *ha;
397         struct ifnet            *ifp;
398         struct mbuf             *mp;
399         int                     ret;
400         int                     lro_enable;
401         int                     rx_int = 0, total_rx_count = 0;
402         struct thread           *cthread;
403
404         fp = context;
405
406         if (fp == NULL)
407                 return;
408
409         cthread = curthread;
410
411         thread_lock(cthread);
412
413         if (!sched_is_bound(cthread))
414                 sched_bind(cthread, fp->rss_id);
415
416         thread_unlock(cthread);
417
418         ha = (qlnx_host_t *)fp->edev;
419
420         ifp = ha->ifp;
421
422         lro_enable = ha->ifp->if_capenable & IFCAP_LRO;
423
424         rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable);
425
426         if (rx_int) {
427                 fp->rx_pkts += rx_int;
428                 total_rx_count += rx_int;
429         }
430
431 #ifdef QLNX_SOFT_LRO
432         {
433                 struct lro_ctrl *lro;
434
435                 lro = &fp->rxq->lro;
436
437                 if (lro_enable && total_rx_count) {
438
439 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
440
441                         if (ha->dbg_trace_lro_cnt) {
442                                 if (lro->lro_mbuf_count & ~1023)
443                                         fp->lro_cnt_1024++;
444                                 else if (lro->lro_mbuf_count & ~511)
445                                         fp->lro_cnt_512++;
446                                 else if (lro->lro_mbuf_count & ~255)
447                                         fp->lro_cnt_256++;
448                                 else if (lro->lro_mbuf_count & ~127)
449                                         fp->lro_cnt_128++;
450                                 else if (lro->lro_mbuf_count & ~63)
451                                         fp->lro_cnt_64++;
452                         }
453                         tcp_lro_flush_all(lro);
454
455 #else
456                         struct lro_entry *queued;
457
458                         while ((!SLIST_EMPTY(&lro->lro_active))) {
459                                 queued = SLIST_FIRST(&lro->lro_active);
460                                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
461                                 tcp_lro_flush(lro, queued);
462                         }
463 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
464                 }
465         }
466 #endif /* #ifdef QLNX_SOFT_LRO */
467
468         ecore_sb_update_sb_idx(fp->sb_info);
469         rmb();
470
471         mtx_lock(&fp->tx_mtx);
472
473         if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
474                 IFF_DRV_RUNNING) || (!ha->link_up)) {
475
476                 mtx_unlock(&fp->tx_mtx);
477                 goto qlnx_fp_taskqueue_exit;
478         }
479
480 //      for (tc = 0; tc < ha->num_tc; tc++) {
481 //              (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
482 //      }
483
484         mp = drbr_peek(ifp, fp->tx_br);
485
486         while (mp != NULL) {
487
488                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
489                         ret = qlnx_send(ha, fp, &mp);
490                 } else {
491                         ret = -1;
492                 }
493
494                 if (ret) {
495
496                         if (mp != NULL) {
497                                 drbr_putback(ifp, fp->tx_br, mp);
498                         } else {
499                                 fp->tx_pkts_processed++;
500                                 drbr_advance(ifp, fp->tx_br);
501                         }
502
503                         mtx_unlock(&fp->tx_mtx);
504
505                         goto qlnx_fp_taskqueue_exit;
506
507                 } else {
508                         drbr_advance(ifp, fp->tx_br);
509                         fp->tx_pkts_transmitted++;
510                         fp->tx_pkts_processed++;
511                 }
512
513                 if (fp->tx_ring_full)
514                         break;
515
516                 mp = drbr_peek(ifp, fp->tx_br);
517         }
518
519 //      for (tc = 0; tc < ha->num_tc; tc++) {
520 //              (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
521 //      }
522
523         mtx_unlock(&fp->tx_mtx);
524
525 qlnx_fp_taskqueue_exit:
526         if (rx_int) {
527                 if (fp->fp_taskqueue != NULL)
528                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
529         } else {
530                 if (fp->tx_ring_full) {
531                         qlnx_mdelay(__func__, 100);
532                 }
533                 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
534         }
535
536         QL_DPRINT2(ha, "exit ret = %d\n", ret);
537         return;
538 }
539
540 static int
541 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
542 {
543         int     i;
544         uint8_t tq_name[32];
545         struct qlnx_fastpath *fp;
546
547         for (i = 0; i < ha->num_rss; i++) {
548
549                 fp = &ha->fp_array[i];
550
551                 bzero(tq_name, sizeof (tq_name));
552                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
553
554                 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
555
556                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
557                                         taskqueue_thread_enqueue,
558                                         &fp->fp_taskqueue);
559
560                 if (fp->fp_taskqueue == NULL) 
561                         return (-1);
562
563                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
564                         tq_name);
565
566                 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
567         }
568
569         return (0);
570 }
571
572 static void
573 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
574 {
575         int                     i;
576         struct qlnx_fastpath    *fp;
577
578         for (i = 0; i < ha->num_rss; i++) {
579
580                 fp = &ha->fp_array[i];
581
582                 if (fp->fp_taskqueue != NULL) {
583
584                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
585                         taskqueue_free(fp->fp_taskqueue);
586                         fp->fp_taskqueue = NULL;
587                 }
588         }
589         return;
590 }
591
592 static void
593 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
594 {
595         int                     i;
596         struct qlnx_fastpath    *fp;
597
598         for (i = 0; i < ha->num_rss; i++) {
599                 fp = &ha->fp_array[i];
600
601                 if (fp->fp_taskqueue != NULL) {
602                         QLNX_UNLOCK(ha);
603                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
604                         QLNX_LOCK(ha);
605                 }
606         }
607         return;
608 }
609
610 /*
611  * Name:        qlnx_pci_attach
612  * Function:    attaches the device to the operating system
613  */
614 static int
615 qlnx_pci_attach(device_t dev)
616 {
617         qlnx_host_t     *ha = NULL;
618         uint32_t        rsrc_len_reg = 0;
619         uint32_t        rsrc_len_dbells = 0;
620         uint32_t        rsrc_len_msix = 0;
621         int             i;
622         uint32_t        mfw_ver;
623
624         if ((ha = device_get_softc(dev)) == NULL) {
625                 device_printf(dev, "cannot get softc\n");
626                 return (ENOMEM);
627         }
628
629         memset(ha, 0, sizeof (qlnx_host_t));
630
631         if (qlnx_valid_device(dev) != 0) {
632                 device_printf(dev, "device is not valid device\n");
633                 return (ENXIO);
634         }
635         ha->pci_func = pci_get_function(dev);
636
637         ha->pci_dev = dev;
638
639         mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
640
641         ha->flags.lock_init = 1;
642
643         pci_enable_busmaster(dev);
644
645         /*
646          * map the PCI BARs
647          */
648
649         ha->reg_rid = PCIR_BAR(0);
650         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
651                                 RF_ACTIVE);
652
653         if (ha->pci_reg == NULL) {
654                 device_printf(dev, "unable to map BAR0\n");
655                 goto qlnx_pci_attach_err;
656         }
657
658         rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
659                                         ha->reg_rid);
660
661         ha->dbells_rid = PCIR_BAR(2);
662         ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
663                         &ha->dbells_rid, RF_ACTIVE);
664
665         if (ha->pci_dbells == NULL) {
666                 device_printf(dev, "unable to map BAR1\n");
667                 goto qlnx_pci_attach_err;
668         }
669
670         rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
671                                         ha->dbells_rid);
672
673         ha->dbells_phys_addr = (uint64_t)
674                 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);;
675         ha->dbells_size = rsrc_len_dbells;
676
677         ha->msix_rid = PCIR_BAR(4);
678         ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
679                         &ha->msix_rid, RF_ACTIVE);
680
681         if (ha->msix_bar == NULL) {
682                 device_printf(dev, "unable to map BAR2\n");
683                 goto qlnx_pci_attach_err;
684         }
685
686         rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
687                                         ha->msix_rid);
688         /*
689          * allocate dma tags
690          */
691
692         if (qlnx_alloc_parent_dma_tag(ha))
693                 goto qlnx_pci_attach_err;
694
695         if (qlnx_alloc_tx_dma_tag(ha))
696                 goto qlnx_pci_attach_err;
697
698         if (qlnx_alloc_rx_dma_tag(ha))
699                 goto qlnx_pci_attach_err;
700                 
701
702         if (qlnx_init_hw(ha) != 0)
703                 goto qlnx_pci_attach_err;
704
705         /*
706          * Allocate MSI-x vectors
707          */
708         ha->num_rss = QLNX_MAX_RSS;
709         ha->num_tc = QLNX_MAX_TC;
710
711         ha->msix_count = pci_msix_count(dev);
712
713         if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns))
714                 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns;
715
716         if (!ha->msix_count ||
717                 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) {
718                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
719                         ha->msix_count);
720                 goto qlnx_pci_attach_err;
721         }
722
723         if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
724                 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
725         else
726                 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
727
728         QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
729                 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
730                 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
731                 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
732                  ha->pci_reg, rsrc_len_reg,
733                 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
734                 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
735                 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
736         if (pci_alloc_msix(dev, &ha->msix_count)) {
737                 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
738                         ha->msix_count);
739                 ha->msix_count = 0;
740                 goto qlnx_pci_attach_err;
741         }
742
743         /*
744          * Initialize slow path interrupt and task queue
745          */
746         if (qlnx_create_sp_taskqueues(ha) != 0)
747                 goto qlnx_pci_attach_err;
748
749         for (i = 0; i < ha->cdev.num_hwfns; i++) {
750
751                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
752
753                 ha->sp_irq_rid[i] = i + 1;
754                 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
755                                 &ha->sp_irq_rid[i],
756                                 (RF_ACTIVE | RF_SHAREABLE));
757                 if (ha->sp_irq[i] == NULL) {
758                         device_printf(dev,
759                                 "could not allocate mbx interrupt\n");
760                         goto qlnx_pci_attach_err;
761                 }
762
763                 if (bus_setup_intr(dev, ha->sp_irq[i],
764                                 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
765                                 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
766                         device_printf(dev,
767                                 "could not setup slow path interrupt\n");
768                         goto qlnx_pci_attach_err;
769                 }
770
771                 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
772                         " sp_irq %p sp_handle %p\n", p_hwfn,
773                         ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
774
775         }
776
777         /*
778          * initialize fast path interrupt
779          */
780         if (qlnx_create_fp_taskqueues(ha) != 0)
781                 goto qlnx_pci_attach_err;
782
783         for (i = 0; i < ha->num_rss; i++) {
784                 ha->irq_vec[i].rss_idx = i;
785                 ha->irq_vec[i].ha = ha;
786                 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i;
787
788                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
789                                 &ha->irq_vec[i].irq_rid,
790                                 (RF_ACTIVE | RF_SHAREABLE));
791
792                 if (ha->irq_vec[i].irq == NULL) {
793                         device_printf(dev,
794                                 "could not allocate interrupt[%d]\n", i);
795                         goto qlnx_pci_attach_err;
796                 }
797                 
798                 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
799                         device_printf(dev, "could not allocate tx_br[%d]\n", i);
800                         goto qlnx_pci_attach_err;
801
802                 }
803         }
804
805         callout_init(&ha->qlnx_callout, 1);
806         ha->flags.callout_init = 1;
807
808         for (i = 0; i < ha->cdev.num_hwfns; i++) {
809
810                 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
811                         goto qlnx_pci_attach_err;
812                 if (ha->grcdump_size[i] == 0)
813                         goto qlnx_pci_attach_err;
814
815                 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
816                 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
817                         i, ha->grcdump_size[i]);
818
819                 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
820                 if (ha->grcdump[i] == NULL) {
821                         device_printf(dev, "grcdump alloc[%d] failed\n", i);
822                         goto qlnx_pci_attach_err;
823                 }
824
825                 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
826                         goto qlnx_pci_attach_err;
827                 if (ha->idle_chk_size[i] == 0)
828                         goto qlnx_pci_attach_err;
829
830                 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
831                 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
832                         i, ha->idle_chk_size[i]);
833
834                 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
835
836                 if (ha->idle_chk[i] == NULL) {
837                         device_printf(dev, "idle_chk alloc failed\n");
838                         goto qlnx_pci_attach_err;
839                 }
840         }
841
842         if (qlnx_slowpath_start(ha) != 0) {
843
844                 qlnx_mdelay(__func__, 1000);
845                 qlnx_trigger_dump(ha);
846
847                 goto qlnx_pci_attach_err0;
848         } else
849                 ha->flags.slowpath_start = 1;
850
851         if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
852                 qlnx_mdelay(__func__, 1000);
853                 qlnx_trigger_dump(ha);
854
855                 goto qlnx_pci_attach_err0;
856         }
857
858         if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
859                 qlnx_mdelay(__func__, 1000);
860                 qlnx_trigger_dump(ha);
861
862                 goto qlnx_pci_attach_err0;
863         }
864         snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
865                 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
866                 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
867         snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
868                 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
869                 FW_ENGINEERING_VERSION);
870
871         QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
872                  ha->stormfw_ver, ha->mfw_ver);
873
874         qlnx_init_ifnet(dev, ha);
875
876         /*
877          * add sysctls
878          */ 
879         qlnx_add_sysctls(ha);
880
881 qlnx_pci_attach_err0:
882         /*
883          * create ioctl device interface
884          */
885         if (qlnx_make_cdev(ha)) {
886                 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
887                 goto qlnx_pci_attach_err;
888         }
889
890         QL_DPRINT2(ha, "success\n");
891
892         return (0);
893
894 qlnx_pci_attach_err:
895
896         qlnx_release(ha);
897
898         return (ENXIO);
899 }
900
901 /*
902  * Name:        qlnx_pci_detach
903  * Function:    Unhooks the device from the operating system
904  */
905 static int
906 qlnx_pci_detach(device_t dev)
907 {
908         qlnx_host_t     *ha = NULL;
909
910         if ((ha = device_get_softc(dev)) == NULL) {
911                 device_printf(dev, "cannot get softc\n");
912                 return (ENOMEM);
913         }
914
915         QLNX_LOCK(ha);
916         qlnx_stop(ha);
917         QLNX_UNLOCK(ha);
918
919         qlnx_release(ha);
920
921         return (0);
922 }
923
924 static int
925 qlnx_init_hw(qlnx_host_t *ha)
926 {
927         int                             rval = 0;
928         struct ecore_hw_prepare_params  params;
929
930         ecore_init_struct(&ha->cdev);
931
932         /* ha->dp_module = ECORE_MSG_PROBE |
933                                 ECORE_MSG_INTR |
934                                 ECORE_MSG_SP |
935                                 ECORE_MSG_LINK |
936                                 ECORE_MSG_SPQ |
937                                 ECORE_MSG_RDMA;
938         ha->dp_level = ECORE_LEVEL_VERBOSE;*/
939         ha->dp_level = ECORE_LEVEL_NOTICE;
940
941         ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
942
943         ha->cdev.regview = ha->pci_reg;
944         ha->cdev.doorbells = ha->pci_dbells;
945         ha->cdev.db_phys_addr = ha->dbells_phys_addr;
946         ha->cdev.db_size = ha->dbells_size;
947
948         bzero(&params, sizeof (struct ecore_hw_prepare_params));
949
950         ha->personality = ECORE_PCI_DEFAULT;
951
952         params.personality = ha->personality;
953
954         params.drv_resc_alloc = false;
955         params.chk_reg_fifo = false;
956         params.initiate_pf_flr = true;
957         params.epoch = 0;
958
959         ecore_hw_prepare(&ha->cdev, &params);
960
961         qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
962
963         return (rval);
964 }
965
966 static void
967 qlnx_release(qlnx_host_t *ha)
968 {
969         device_t        dev;
970         int             i;
971
972         dev = ha->pci_dev;
973
974         QL_DPRINT2(ha, "enter\n");
975
976         for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
977                 if (ha->idle_chk[i] != NULL) {
978                         free(ha->idle_chk[i], M_QLNXBUF);
979                         ha->idle_chk[i] = NULL;
980                 }
981
982                 if (ha->grcdump[i] != NULL) {
983                         free(ha->grcdump[i], M_QLNXBUF);
984                         ha->grcdump[i] = NULL;
985                 }
986         }
987
988         if (ha->flags.callout_init)
989                 callout_drain(&ha->qlnx_callout);
990
991         if (ha->flags.slowpath_start) {
992                 qlnx_slowpath_stop(ha);
993         }
994
995         ecore_hw_remove(&ha->cdev);
996
997         qlnx_del_cdev(ha);
998
999         if (ha->ifp != NULL)
1000                 ether_ifdetach(ha->ifp);
1001
1002         qlnx_free_tx_dma_tag(ha);
1003
1004         qlnx_free_rx_dma_tag(ha);
1005
1006         qlnx_free_parent_dma_tag(ha);
1007
1008         for (i = 0; i < ha->num_rss; i++) {
1009                 struct qlnx_fastpath *fp = &ha->fp_array[i];
1010
1011                 if (ha->irq_vec[i].handle) {
1012                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1013                                         ha->irq_vec[i].handle);
1014                 }
1015
1016                 if (ha->irq_vec[i].irq) {
1017                         (void)bus_release_resource(dev, SYS_RES_IRQ,
1018                                 ha->irq_vec[i].irq_rid,
1019                                 ha->irq_vec[i].irq);
1020                 }
1021
1022                 qlnx_free_tx_br(ha, fp);
1023         }
1024         qlnx_destroy_fp_taskqueues(ha);
1025
1026         for (i = 0; i < ha->cdev.num_hwfns; i++) {
1027                 if (ha->sp_handle[i])
1028                         (void)bus_teardown_intr(dev, ha->sp_irq[i],
1029                                 ha->sp_handle[i]);
1030
1031                 if (ha->sp_irq[i])
1032                         (void) bus_release_resource(dev, SYS_RES_IRQ,
1033                                 ha->sp_irq_rid[i], ha->sp_irq[i]);
1034         }
1035
1036         qlnx_destroy_sp_taskqueues(ha);
1037
1038         if (ha->msix_count)
1039                 pci_release_msi(dev);
1040
1041         if (ha->flags.lock_init) {
1042                 mtx_destroy(&ha->hw_lock);
1043         }
1044
1045         if (ha->pci_reg)
1046                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1047                                 ha->pci_reg);
1048
1049         if (ha->pci_dbells)
1050                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1051                                 ha->pci_dbells);
1052
1053         if (ha->msix_bar)
1054                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1055                                 ha->msix_bar);
1056
1057         QL_DPRINT2(ha, "exit\n");
1058         return;
1059 }
1060
1061 static void
1062 qlnx_trigger_dump(qlnx_host_t *ha)
1063 {
1064         int     i;
1065
1066         if (ha->ifp != NULL)
1067                 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1068
1069         QL_DPRINT2(ha, "enter\n");
1070
1071         for (i = 0; i < ha->cdev.num_hwfns; i++) {
1072                 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1073                 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1074         }
1075
1076         QL_DPRINT2(ha, "exit\n");
1077
1078         return;
1079 }
1080
1081 static int
1082 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1083 {
1084         int             err, ret = 0;
1085         qlnx_host_t     *ha;
1086
1087         err = sysctl_handle_int(oidp, &ret, 0, req);
1088
1089         if (err || !req->newptr)
1090                 return (err);
1091
1092         if (ret == 1) {
1093                 ha = (qlnx_host_t *)arg1;
1094                 qlnx_trigger_dump(ha);
1095         }
1096         return (err);
1097 }
1098
1099 static int
1100 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS)
1101 {
1102         int                     err, i, ret = 0, usecs = 0;
1103         qlnx_host_t             *ha;
1104         struct ecore_hwfn       *p_hwfn;
1105         struct qlnx_fastpath    *fp;
1106
1107         err = sysctl_handle_int(oidp, &usecs, 0, req);
1108
1109         if (err || !req->newptr || !usecs || (usecs > 255))
1110                 return (err);
1111
1112         ha = (qlnx_host_t *)arg1;
1113
1114         for (i = 0; i < ha->num_rss; i++) {
1115
1116                 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1117
1118                 fp = &ha->fp_array[i];
1119
1120                 if (fp->txq[0]->handle != NULL) {
1121                         ret = ecore_set_queue_coalesce(p_hwfn, 0,
1122                                         (uint16_t)usecs, fp->txq[0]->handle);
1123                 }
1124         }
1125
1126         if (!ret)
1127                 ha->tx_coalesce_usecs = (uint8_t)usecs;
1128
1129         return (err);
1130 }
1131
1132 static int
1133 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS)
1134 {
1135         int                     err, i, ret = 0, usecs = 0;
1136         qlnx_host_t             *ha;
1137         struct ecore_hwfn       *p_hwfn;
1138         struct qlnx_fastpath    *fp;
1139
1140         err = sysctl_handle_int(oidp, &usecs, 0, req);
1141
1142         if (err || !req->newptr || !usecs || (usecs > 255))
1143                 return (err);
1144
1145         ha = (qlnx_host_t *)arg1;
1146
1147         for (i = 0; i < ha->num_rss; i++) {
1148
1149                 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1150
1151                 fp = &ha->fp_array[i];
1152
1153                 if (fp->rxq->handle != NULL) {
1154                         ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs,
1155                                          0, fp->rxq->handle);
1156                 }
1157         }
1158
1159         if (!ret)
1160                 ha->rx_coalesce_usecs = (uint8_t)usecs;
1161
1162         return (err);
1163 }
1164
1165 static void
1166 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1167 {
1168         struct sysctl_ctx_list  *ctx;
1169         struct sysctl_oid_list  *children;
1170         struct sysctl_oid       *ctx_oid;
1171
1172         ctx = device_get_sysctl_ctx(ha->pci_dev);
1173         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1174
1175         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat",
1176                         CTLFLAG_RD, NULL, "spstat");
1177         children = SYSCTL_CHILDREN(ctx_oid);
1178
1179         SYSCTL_ADD_QUAD(ctx, children,
1180                 OID_AUTO, "sp_interrupts",
1181                 CTLFLAG_RD, &ha->sp_interrupts,
1182                 "No. of slowpath interrupts");
1183
1184         return;
1185 }
1186
1187 static void
1188 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1189 {
1190         struct sysctl_ctx_list  *ctx;
1191         struct sysctl_oid_list  *children;
1192         struct sysctl_oid_list  *node_children;
1193         struct sysctl_oid       *ctx_oid;
1194         int                     i, j;
1195         uint8_t                 name_str[16];
1196
1197         ctx = device_get_sysctl_ctx(ha->pci_dev);
1198         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1199
1200         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat",
1201                         CTLFLAG_RD, NULL, "fpstat");
1202         children = SYSCTL_CHILDREN(ctx_oid);
1203
1204         for (i = 0; i < ha->num_rss; i++) {
1205
1206                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1207                 snprintf(name_str, sizeof(name_str), "%d", i);
1208
1209                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
1210                         CTLFLAG_RD, NULL, name_str);
1211                 node_children = SYSCTL_CHILDREN(ctx_oid);
1212
1213                 /* Tx Related */
1214
1215                 SYSCTL_ADD_QUAD(ctx, node_children,
1216                         OID_AUTO, "tx_pkts_processed",
1217                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1218                         "No. of packets processed for transmission");
1219
1220                 SYSCTL_ADD_QUAD(ctx, node_children,
1221                         OID_AUTO, "tx_pkts_freed",
1222                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1223                         "No. of freed packets");
1224
1225                 SYSCTL_ADD_QUAD(ctx, node_children,
1226                         OID_AUTO, "tx_pkts_transmitted",
1227                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1228                         "No. of transmitted packets");
1229
1230                 SYSCTL_ADD_QUAD(ctx, node_children,
1231                         OID_AUTO, "tx_pkts_completed",
1232                         CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1233                         "No. of transmit completions");
1234
1235                 SYSCTL_ADD_QUAD(ctx, node_children,
1236                         OID_AUTO, "tx_lso_wnd_min_len",
1237                         CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1238                         "tx_lso_wnd_min_len");
1239
1240                 SYSCTL_ADD_QUAD(ctx, node_children,
1241                         OID_AUTO, "tx_defrag",
1242                         CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1243                         "tx_defrag");
1244
1245                 SYSCTL_ADD_QUAD(ctx, node_children,
1246                         OID_AUTO, "tx_nsegs_gt_elem_left",
1247                         CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1248                         "tx_nsegs_gt_elem_left");
1249
1250                 SYSCTL_ADD_UINT(ctx, node_children,
1251                         OID_AUTO, "tx_tso_max_nsegs",
1252                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1253                         ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1254
1255                 SYSCTL_ADD_UINT(ctx, node_children,
1256                         OID_AUTO, "tx_tso_min_nsegs",
1257                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1258                         ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1259
1260                 SYSCTL_ADD_UINT(ctx, node_children,
1261                         OID_AUTO, "tx_tso_max_pkt_len",
1262                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1263                         ha->fp_array[i].tx_tso_max_pkt_len,
1264                         "tx_tso_max_pkt_len");
1265
1266                 SYSCTL_ADD_UINT(ctx, node_children,
1267                         OID_AUTO, "tx_tso_min_pkt_len",
1268                         CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1269                         ha->fp_array[i].tx_tso_min_pkt_len,
1270                         "tx_tso_min_pkt_len");
1271
1272                 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) {
1273
1274                         bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
1275                         snprintf(name_str, sizeof(name_str),
1276                                 "tx_pkts_nseg_%02d", (j+1));
1277
1278                         SYSCTL_ADD_QUAD(ctx, node_children,
1279                                 OID_AUTO, name_str, CTLFLAG_RD,
1280                                 &ha->fp_array[i].tx_pkts[j], name_str);
1281                 }
1282
1283                 SYSCTL_ADD_QUAD(ctx, node_children,
1284                         OID_AUTO, "err_tx_nsegs_gt_elem_left",
1285                         CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1286                         "err_tx_nsegs_gt_elem_left");
1287
1288                 SYSCTL_ADD_QUAD(ctx, node_children,
1289                         OID_AUTO, "err_tx_dmamap_create",
1290                         CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1291                         "err_tx_dmamap_create");
1292
1293                 SYSCTL_ADD_QUAD(ctx, node_children,
1294                         OID_AUTO, "err_tx_defrag_dmamap_load",
1295                         CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1296                         "err_tx_defrag_dmamap_load");
1297
1298                 SYSCTL_ADD_QUAD(ctx, node_children,
1299                         OID_AUTO, "err_tx_non_tso_max_seg",
1300                         CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1301                         "err_tx_non_tso_max_seg");
1302
1303                 SYSCTL_ADD_QUAD(ctx, node_children,
1304                         OID_AUTO, "err_tx_dmamap_load",
1305                         CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1306                         "err_tx_dmamap_load");
1307
1308                 SYSCTL_ADD_QUAD(ctx, node_children,
1309                         OID_AUTO, "err_tx_defrag",
1310                         CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1311                         "err_tx_defrag");
1312
1313                 SYSCTL_ADD_QUAD(ctx, node_children,
1314                         OID_AUTO, "err_tx_free_pkt_null",
1315                         CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1316                         "err_tx_free_pkt_null");
1317
1318                 SYSCTL_ADD_QUAD(ctx, node_children,
1319                         OID_AUTO, "err_tx_cons_idx_conflict",
1320                         CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1321                         "err_tx_cons_idx_conflict");
1322
1323                 SYSCTL_ADD_QUAD(ctx, node_children,
1324                         OID_AUTO, "lro_cnt_64",
1325                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1326                         "lro_cnt_64");
1327
1328                 SYSCTL_ADD_QUAD(ctx, node_children,
1329                         OID_AUTO, "lro_cnt_128",
1330                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1331                         "lro_cnt_128");
1332
1333                 SYSCTL_ADD_QUAD(ctx, node_children,
1334                         OID_AUTO, "lro_cnt_256",
1335                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1336                         "lro_cnt_256");
1337
1338                 SYSCTL_ADD_QUAD(ctx, node_children,
1339                         OID_AUTO, "lro_cnt_512",
1340                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1341                         "lro_cnt_512");
1342
1343                 SYSCTL_ADD_QUAD(ctx, node_children,
1344                         OID_AUTO, "lro_cnt_1024",
1345                         CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1346                         "lro_cnt_1024");
1347
1348                 /* Rx Related */
1349
1350                 SYSCTL_ADD_QUAD(ctx, node_children,
1351                         OID_AUTO, "rx_pkts",
1352                         CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1353                         "No. of received packets");
1354
1355                 SYSCTL_ADD_QUAD(ctx, node_children,
1356                         OID_AUTO, "tpa_start",
1357                         CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1358                         "No. of tpa_start packets");
1359
1360                 SYSCTL_ADD_QUAD(ctx, node_children,
1361                         OID_AUTO, "tpa_cont",
1362                         CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1363                         "No. of tpa_cont packets");
1364
1365                 SYSCTL_ADD_QUAD(ctx, node_children,
1366                         OID_AUTO, "tpa_end",
1367                         CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1368                         "No. of tpa_end packets");
1369
1370                 SYSCTL_ADD_QUAD(ctx, node_children,
1371                         OID_AUTO, "err_m_getcl",
1372                         CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1373                         "err_m_getcl");
1374
1375                 SYSCTL_ADD_QUAD(ctx, node_children,
1376                         OID_AUTO, "err_m_getjcl",
1377                         CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1378                         "err_m_getjcl");
1379
1380                 SYSCTL_ADD_QUAD(ctx, node_children,
1381                         OID_AUTO, "err_rx_hw_errors",
1382                         CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1383                         "err_rx_hw_errors");
1384
1385                 SYSCTL_ADD_QUAD(ctx, node_children,
1386                         OID_AUTO, "err_rx_alloc_errors",
1387                         CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1388                         "err_rx_alloc_errors");
1389         }
1390
1391         return;
1392 }
1393
1394 static void
1395 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1396 {
1397         struct sysctl_ctx_list  *ctx;
1398         struct sysctl_oid_list  *children;
1399         struct sysctl_oid       *ctx_oid;
1400
1401         ctx = device_get_sysctl_ctx(ha->pci_dev);
1402         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1403
1404         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat",
1405                         CTLFLAG_RD, NULL, "hwstat");
1406         children = SYSCTL_CHILDREN(ctx_oid);
1407
1408         SYSCTL_ADD_QUAD(ctx, children,
1409                 OID_AUTO, "no_buff_discards",
1410                 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1411                 "No. of packets discarded due to lack of buffer");
1412
1413         SYSCTL_ADD_QUAD(ctx, children,
1414                 OID_AUTO, "packet_too_big_discard",
1415                 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1416                 "No. of packets discarded because packet was too big");
1417
1418         SYSCTL_ADD_QUAD(ctx, children,
1419                 OID_AUTO, "ttl0_discard",
1420                 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1421                 "ttl0_discard");
1422
1423         SYSCTL_ADD_QUAD(ctx, children,
1424                 OID_AUTO, "rx_ucast_bytes",
1425                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1426                 "rx_ucast_bytes");
1427
1428         SYSCTL_ADD_QUAD(ctx, children,
1429                 OID_AUTO, "rx_mcast_bytes",
1430                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1431                 "rx_mcast_bytes");
1432
1433         SYSCTL_ADD_QUAD(ctx, children,
1434                 OID_AUTO, "rx_bcast_bytes",
1435                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1436                 "rx_bcast_bytes");
1437
1438         SYSCTL_ADD_QUAD(ctx, children,
1439                 OID_AUTO, "rx_ucast_pkts",
1440                 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1441                 "rx_ucast_pkts");
1442
1443         SYSCTL_ADD_QUAD(ctx, children,
1444                 OID_AUTO, "rx_mcast_pkts",
1445                 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1446                 "rx_mcast_pkts");
1447
1448         SYSCTL_ADD_QUAD(ctx, children,
1449                 OID_AUTO, "rx_bcast_pkts",
1450                 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1451                 "rx_bcast_pkts");
1452
1453         SYSCTL_ADD_QUAD(ctx, children,
1454                 OID_AUTO, "mftag_filter_discards",
1455                 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1456                 "mftag_filter_discards");
1457
1458         SYSCTL_ADD_QUAD(ctx, children,
1459                 OID_AUTO, "mac_filter_discards",
1460                 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1461                 "mac_filter_discards");
1462
1463         SYSCTL_ADD_QUAD(ctx, children,
1464                 OID_AUTO, "tx_ucast_bytes",
1465                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1466                 "tx_ucast_bytes");
1467
1468         SYSCTL_ADD_QUAD(ctx, children,
1469                 OID_AUTO, "tx_mcast_bytes",
1470                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1471                 "tx_mcast_bytes");
1472
1473         SYSCTL_ADD_QUAD(ctx, children,
1474                 OID_AUTO, "tx_bcast_bytes",
1475                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1476                 "tx_bcast_bytes");
1477
1478         SYSCTL_ADD_QUAD(ctx, children,
1479                 OID_AUTO, "tx_ucast_pkts",
1480                 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1481                 "tx_ucast_pkts");
1482
1483         SYSCTL_ADD_QUAD(ctx, children,
1484                 OID_AUTO, "tx_mcast_pkts",
1485                 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1486                 "tx_mcast_pkts");
1487
1488         SYSCTL_ADD_QUAD(ctx, children,
1489                 OID_AUTO, "tx_bcast_pkts",
1490                 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1491                 "tx_bcast_pkts");
1492
1493         SYSCTL_ADD_QUAD(ctx, children,
1494                 OID_AUTO, "tx_err_drop_pkts",
1495                 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1496                 "tx_err_drop_pkts");
1497
1498         SYSCTL_ADD_QUAD(ctx, children,
1499                 OID_AUTO, "tpa_coalesced_pkts",
1500                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1501                 "tpa_coalesced_pkts");
1502
1503         SYSCTL_ADD_QUAD(ctx, children,
1504                 OID_AUTO, "tpa_coalesced_events",
1505                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1506                 "tpa_coalesced_events");
1507
1508         SYSCTL_ADD_QUAD(ctx, children,
1509                 OID_AUTO, "tpa_aborts_num",
1510                 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1511                 "tpa_aborts_num");
1512
1513         SYSCTL_ADD_QUAD(ctx, children,
1514                 OID_AUTO, "tpa_not_coalesced_pkts",
1515                 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1516                 "tpa_not_coalesced_pkts");
1517
1518         SYSCTL_ADD_QUAD(ctx, children,
1519                 OID_AUTO, "tpa_coalesced_bytes",
1520                 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1521                 "tpa_coalesced_bytes");
1522
1523         SYSCTL_ADD_QUAD(ctx, children,
1524                 OID_AUTO, "rx_64_byte_packets",
1525                 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1526                 "rx_64_byte_packets");
1527
1528         SYSCTL_ADD_QUAD(ctx, children,
1529                 OID_AUTO, "rx_65_to_127_byte_packets",
1530                 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1531                 "rx_65_to_127_byte_packets");
1532
1533         SYSCTL_ADD_QUAD(ctx, children,
1534                 OID_AUTO, "rx_128_to_255_byte_packets",
1535                 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1536                 "rx_128_to_255_byte_packets");
1537
1538         SYSCTL_ADD_QUAD(ctx, children,
1539                 OID_AUTO, "rx_256_to_511_byte_packets",
1540                 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1541                 "rx_256_to_511_byte_packets");
1542
1543         SYSCTL_ADD_QUAD(ctx, children,
1544                 OID_AUTO, "rx_512_to_1023_byte_packets",
1545                 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1546                 "rx_512_to_1023_byte_packets");
1547
1548         SYSCTL_ADD_QUAD(ctx, children,
1549                 OID_AUTO, "rx_1024_to_1518_byte_packets",
1550                 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1551                 "rx_1024_to_1518_byte_packets");
1552
1553         SYSCTL_ADD_QUAD(ctx, children,
1554                 OID_AUTO, "rx_1519_to_1522_byte_packets",
1555                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1556                 "rx_1519_to_1522_byte_packets");
1557
1558         SYSCTL_ADD_QUAD(ctx, children,
1559                 OID_AUTO, "rx_1523_to_2047_byte_packets",
1560                 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1561                 "rx_1523_to_2047_byte_packets");
1562
1563         SYSCTL_ADD_QUAD(ctx, children,
1564                 OID_AUTO, "rx_2048_to_4095_byte_packets",
1565                 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1566                 "rx_2048_to_4095_byte_packets");
1567
1568         SYSCTL_ADD_QUAD(ctx, children,
1569                 OID_AUTO, "rx_4096_to_9216_byte_packets",
1570                 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1571                 "rx_4096_to_9216_byte_packets");
1572
1573         SYSCTL_ADD_QUAD(ctx, children,
1574                 OID_AUTO, "rx_9217_to_16383_byte_packets",
1575                 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1576                 "rx_9217_to_16383_byte_packets");
1577
1578         SYSCTL_ADD_QUAD(ctx, children,
1579                 OID_AUTO, "rx_crc_errors",
1580                 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1581                 "rx_crc_errors");
1582
1583         SYSCTL_ADD_QUAD(ctx, children,
1584                 OID_AUTO, "rx_mac_crtl_frames",
1585                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1586                 "rx_mac_crtl_frames");
1587
1588         SYSCTL_ADD_QUAD(ctx, children,
1589                 OID_AUTO, "rx_pause_frames",
1590                 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1591                 "rx_pause_frames");
1592
1593         SYSCTL_ADD_QUAD(ctx, children,
1594                 OID_AUTO, "rx_pfc_frames",
1595                 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1596                 "rx_pfc_frames");
1597
1598         SYSCTL_ADD_QUAD(ctx, children,
1599                 OID_AUTO, "rx_align_errors",
1600                 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1601                 "rx_align_errors");
1602
1603         SYSCTL_ADD_QUAD(ctx, children,
1604                 OID_AUTO, "rx_carrier_errors",
1605                 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1606                 "rx_carrier_errors");
1607
1608         SYSCTL_ADD_QUAD(ctx, children,
1609                 OID_AUTO, "rx_oversize_packets",
1610                 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1611                 "rx_oversize_packets");
1612
1613         SYSCTL_ADD_QUAD(ctx, children,
1614                 OID_AUTO, "rx_jabbers",
1615                 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1616                 "rx_jabbers");
1617
1618         SYSCTL_ADD_QUAD(ctx, children,
1619                 OID_AUTO, "rx_undersize_packets",
1620                 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
1621                 "rx_undersize_packets");
1622
1623         SYSCTL_ADD_QUAD(ctx, children,
1624                 OID_AUTO, "rx_fragments",
1625                 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
1626                 "rx_fragments");
1627
1628         SYSCTL_ADD_QUAD(ctx, children,
1629                 OID_AUTO, "tx_64_byte_packets",
1630                 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
1631                 "tx_64_byte_packets");
1632
1633         SYSCTL_ADD_QUAD(ctx, children,
1634                 OID_AUTO, "tx_65_to_127_byte_packets",
1635                 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
1636                 "tx_65_to_127_byte_packets");
1637
1638         SYSCTL_ADD_QUAD(ctx, children,
1639                 OID_AUTO, "tx_128_to_255_byte_packets",
1640                 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
1641                 "tx_128_to_255_byte_packets");
1642
1643         SYSCTL_ADD_QUAD(ctx, children,
1644                 OID_AUTO, "tx_256_to_511_byte_packets",
1645                 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
1646                 "tx_256_to_511_byte_packets");
1647
1648         SYSCTL_ADD_QUAD(ctx, children,
1649                 OID_AUTO, "tx_512_to_1023_byte_packets",
1650                 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
1651                 "tx_512_to_1023_byte_packets");
1652
1653         SYSCTL_ADD_QUAD(ctx, children,
1654                 OID_AUTO, "tx_1024_to_1518_byte_packets",
1655                 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
1656                 "tx_1024_to_1518_byte_packets");
1657
1658         SYSCTL_ADD_QUAD(ctx, children,
1659                 OID_AUTO, "tx_1519_to_2047_byte_packets",
1660                 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
1661                 "tx_1519_to_2047_byte_packets");
1662
1663         SYSCTL_ADD_QUAD(ctx, children,
1664                 OID_AUTO, "tx_2048_to_4095_byte_packets",
1665                 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
1666                 "tx_2048_to_4095_byte_packets");
1667
1668         SYSCTL_ADD_QUAD(ctx, children,
1669                 OID_AUTO, "tx_4096_to_9216_byte_packets",
1670                 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
1671                 "tx_4096_to_9216_byte_packets");
1672
1673         SYSCTL_ADD_QUAD(ctx, children,
1674                 OID_AUTO, "tx_9217_to_16383_byte_packets",
1675                 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
1676                 "tx_9217_to_16383_byte_packets");
1677
1678         SYSCTL_ADD_QUAD(ctx, children,
1679                 OID_AUTO, "tx_pause_frames",
1680                 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
1681                 "tx_pause_frames");
1682
1683         SYSCTL_ADD_QUAD(ctx, children,
1684                 OID_AUTO, "tx_pfc_frames",
1685                 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
1686                 "tx_pfc_frames");
1687
1688         SYSCTL_ADD_QUAD(ctx, children,
1689                 OID_AUTO, "tx_lpi_entry_count",
1690                 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
1691                 "tx_lpi_entry_count");
1692
1693         SYSCTL_ADD_QUAD(ctx, children,
1694                 OID_AUTO, "tx_total_collisions",
1695                 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
1696                 "tx_total_collisions");
1697
1698         SYSCTL_ADD_QUAD(ctx, children,
1699                 OID_AUTO, "brb_truncates",
1700                 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
1701                 "brb_truncates");
1702
1703         SYSCTL_ADD_QUAD(ctx, children,
1704                 OID_AUTO, "brb_discards",
1705                 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
1706                 "brb_discards");
1707
1708         SYSCTL_ADD_QUAD(ctx, children,
1709                 OID_AUTO, "rx_mac_bytes",
1710                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
1711                 "rx_mac_bytes");
1712
1713         SYSCTL_ADD_QUAD(ctx, children,
1714                 OID_AUTO, "rx_mac_uc_packets",
1715                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
1716                 "rx_mac_uc_packets");
1717
1718         SYSCTL_ADD_QUAD(ctx, children,
1719                 OID_AUTO, "rx_mac_mc_packets",
1720                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
1721                 "rx_mac_mc_packets");
1722
1723         SYSCTL_ADD_QUAD(ctx, children,
1724                 OID_AUTO, "rx_mac_bc_packets",
1725                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
1726                 "rx_mac_bc_packets");
1727
1728         SYSCTL_ADD_QUAD(ctx, children,
1729                 OID_AUTO, "rx_mac_frames_ok",
1730                 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
1731                 "rx_mac_frames_ok");
1732
1733         SYSCTL_ADD_QUAD(ctx, children,
1734                 OID_AUTO, "tx_mac_bytes",
1735                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
1736                 "tx_mac_bytes");
1737
1738         SYSCTL_ADD_QUAD(ctx, children,
1739                 OID_AUTO, "tx_mac_uc_packets",
1740                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
1741                 "tx_mac_uc_packets");
1742
1743         SYSCTL_ADD_QUAD(ctx, children,
1744                 OID_AUTO, "tx_mac_mc_packets",
1745                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
1746                 "tx_mac_mc_packets");
1747
1748         SYSCTL_ADD_QUAD(ctx, children,
1749                 OID_AUTO, "tx_mac_bc_packets",
1750                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
1751                 "tx_mac_bc_packets");
1752
1753         SYSCTL_ADD_QUAD(ctx, children,
1754                 OID_AUTO, "tx_mac_ctrl_frames",
1755                 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
1756                 "tx_mac_ctrl_frames");
1757         return;
1758 }
1759
1760 static void
1761 qlnx_add_sysctls(qlnx_host_t *ha)
1762 {
1763         device_t                dev = ha->pci_dev;
1764         struct sysctl_ctx_list  *ctx;
1765         struct sysctl_oid_list  *children;
1766
1767         ctx = device_get_sysctl_ctx(dev);
1768         children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1769
1770         qlnx_add_fp_stats_sysctls(ha);
1771         qlnx_add_sp_stats_sysctls(ha);
1772         qlnx_add_hw_stats_sysctls(ha);
1773
1774         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version",
1775                 CTLFLAG_RD, qlnx_ver_str, 0,
1776                 "Driver Version");
1777
1778         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version",
1779                 CTLFLAG_RD, ha->stormfw_ver, 0,
1780                 "STORM Firmware Version");
1781
1782         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version",
1783                 CTLFLAG_RD, ha->mfw_ver, 0,
1784                 "Management Firmware Version");
1785
1786         SYSCTL_ADD_UINT(ctx, children,
1787                 OID_AUTO, "personality", CTLFLAG_RD,
1788                 &ha->personality, ha->personality,
1789                 "\tpersonality = 0 => Ethernet Only\n"
1790                 "\tpersonality = 3 => Ethernet and RoCE\n"
1791                 "\tpersonality = 4 => Ethernet and iWARP\n"
1792                 "\tpersonality = 6 => Default in Shared Memory\n");
1793
1794         ha->dbg_level = 0;
1795         SYSCTL_ADD_UINT(ctx, children,
1796                 OID_AUTO, "debug", CTLFLAG_RW,
1797                 &ha->dbg_level, ha->dbg_level, "Debug Level");
1798
1799         ha->dp_level = 0x01;
1800         SYSCTL_ADD_UINT(ctx, children,
1801                 OID_AUTO, "dp_level", CTLFLAG_RW,
1802                 &ha->dp_level, ha->dp_level, "DP Level");
1803
1804         ha->dbg_trace_lro_cnt = 0;
1805         SYSCTL_ADD_UINT(ctx, children,
1806                 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1807                 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
1808                 "Trace LRO Counts");
1809
1810         ha->dbg_trace_tso_pkt_len = 0;
1811         SYSCTL_ADD_UINT(ctx, children,
1812                 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW,
1813                 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
1814                 "Trace TSO packet lengths");
1815
1816         ha->dp_module = 0;
1817         SYSCTL_ADD_UINT(ctx, children,
1818                 OID_AUTO, "dp_module", CTLFLAG_RW,
1819                 &ha->dp_module, ha->dp_module, "DP Module");
1820
1821         ha->err_inject = 0;
1822
1823         SYSCTL_ADD_UINT(ctx, children,
1824                 OID_AUTO, "err_inject", CTLFLAG_RW,
1825                 &ha->err_inject, ha->err_inject, "Error Inject");
1826
1827         ha->storm_stats_enable = 0;
1828
1829         SYSCTL_ADD_UINT(ctx, children,
1830                 OID_AUTO, "storm_stats_enable", CTLFLAG_RW,
1831                 &ha->storm_stats_enable, ha->storm_stats_enable,
1832                 "Enable Storm Statistics Gathering");
1833
1834         ha->storm_stats_index = 0;
1835
1836         SYSCTL_ADD_UINT(ctx, children,
1837                 OID_AUTO, "storm_stats_index", CTLFLAG_RD,
1838                 &ha->storm_stats_index, ha->storm_stats_index,
1839                 "Enable Storm Statistics Gathering Current Index");
1840
1841         ha->grcdump_taken = 0;
1842         SYSCTL_ADD_UINT(ctx, children,
1843                 OID_AUTO, "grcdump_taken", CTLFLAG_RD,
1844                 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken");
1845
1846         ha->idle_chk_taken = 0;
1847         SYSCTL_ADD_UINT(ctx, children,
1848                 OID_AUTO, "idle_chk_taken", CTLFLAG_RD,
1849                 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken");
1850
1851         SYSCTL_ADD_UINT(ctx, children,
1852                 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD,
1853                 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
1854                 "rx_coalesce_usecs");
1855
1856         SYSCTL_ADD_UINT(ctx, children,
1857                 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD,
1858                 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
1859                 "tx_coalesce_usecs");
1860
1861         ha->rx_pkt_threshold = 128;
1862         SYSCTL_ADD_UINT(ctx, children,
1863                 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW,
1864                 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
1865                 "No. of Rx Pkts to process at a time");
1866
1867         ha->rx_jumbo_buf_eq_mtu = 0;
1868         SYSCTL_ADD_UINT(ctx, children,
1869                 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW,
1870                 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
1871                 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n"
1872                 "otherwise Rx Jumbo buffers are set to >= MTU size\n");
1873
1874         SYSCTL_ADD_PROC(ctx, children,
1875                 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW,
1876                 (void *)ha, 0,
1877                 qlnx_trigger_dump_sysctl, "I", "trigger_dump");
1878
1879         SYSCTL_ADD_PROC(ctx, children,
1880                 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1881                 (void *)ha, 0,
1882                 qlnx_set_rx_coalesce, "I",
1883                 "rx interrupt coalesce period microseconds");
1884
1885         SYSCTL_ADD_PROC(ctx, children,
1886                 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW,
1887                 (void *)ha, 0,
1888                 qlnx_set_tx_coalesce, "I",
1889                 "tx interrupt coalesce period microseconds");
1890
1891         SYSCTL_ADD_QUAD(ctx, children,
1892                 OID_AUTO, "err_illegal_intr", CTLFLAG_RD,
1893                 &ha->err_illegal_intr, "err_illegal_intr");
1894
1895         SYSCTL_ADD_QUAD(ctx, children,
1896                 OID_AUTO, "err_fp_null", CTLFLAG_RD,
1897                 &ha->err_fp_null, "err_fp_null");
1898
1899         SYSCTL_ADD_QUAD(ctx, children,
1900                 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD,
1901                 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
1902         return;
1903 }
1904
1905
1906
1907 /*****************************************************************************
1908  * Operating System Network Interface Functions
1909  *****************************************************************************/
1910
1911 static void
1912 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
1913 {
1914         uint16_t        device_id;
1915         struct ifnet    *ifp;
1916
1917         ifp = ha->ifp = if_alloc(IFT_ETHER);
1918
1919         if (ifp == NULL)
1920                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
1921
1922         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1923
1924         device_id = pci_get_device(ha->pci_dev);
1925
1926 #if __FreeBSD_version >= 1000000
1927
1928         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 
1929                 ifp->if_baudrate = IF_Gbps(40);
1930         else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
1931                         (device_id == QLOGIC_PCI_DEVICE_ID_8070))
1932                 ifp->if_baudrate = IF_Gbps(25);
1933         else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
1934                 ifp->if_baudrate = IF_Gbps(50);
1935         else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
1936                 ifp->if_baudrate = IF_Gbps(100);
1937
1938         ifp->if_capabilities = IFCAP_LINKSTATE;
1939 #else
1940         ifp->if_mtu = ETHERMTU;
1941         ifp->if_baudrate = (1 * 1000 * 1000 *1000);
1942
1943 #endif /* #if __FreeBSD_version >= 1000000 */
1944
1945         ifp->if_init = qlnx_init;
1946         ifp->if_softc = ha;
1947         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1948         ifp->if_ioctl = qlnx_ioctl;
1949         ifp->if_transmit = qlnx_transmit;
1950         ifp->if_qflush = qlnx_qflush;
1951
1952         IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha));
1953         ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha);
1954         IFQ_SET_READY(&ifp->if_snd);
1955
1956 #if __FreeBSD_version >= 1100036
1957         if_setgetcounterfn(ifp, qlnx_get_counter);
1958 #endif
1959
1960         ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1961
1962         memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
1963         ether_ifattach(ifp, ha->primary_mac);
1964         bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
1965
1966         ifp->if_capabilities = IFCAP_HWCSUM;
1967         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1968
1969         ifp->if_capabilities |= IFCAP_VLAN_MTU;
1970         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1971         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1972         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1973         ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1974         ifp->if_capabilities |= IFCAP_TSO4;
1975         ifp->if_capabilities |= IFCAP_TSO6;
1976         ifp->if_capabilities |= IFCAP_LRO;
1977
1978         ifp->if_capenable = ifp->if_capabilities;
1979
1980         ifp->if_hwassist = CSUM_IP;
1981         ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
1982         ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
1983         ifp->if_hwassist |= CSUM_TSO;
1984
1985         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1986
1987         ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
1988                 qlnx_media_status);
1989
1990         if (device_id == QLOGIC_PCI_DEVICE_ID_1634) {
1991                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
1992                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
1993                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
1994         } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
1995                         (device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
1996                 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
1997                 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
1998         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
1999                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2000                 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2001         } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
2002                 ifmedia_add(&ha->media,
2003                         (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
2004                 ifmedia_add(&ha->media,
2005                         (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
2006                 ifmedia_add(&ha->media,
2007                         (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
2008         }
2009
2010         ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2011         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2012
2013
2014         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2015
2016         QL_DPRINT2(ha, "exit\n");
2017
2018         return;
2019 }
2020
2021 static void
2022 qlnx_init_locked(qlnx_host_t *ha)
2023 {
2024         struct ifnet    *ifp = ha->ifp;
2025
2026         QL_DPRINT1(ha, "Driver Initialization start \n");
2027
2028         qlnx_stop(ha);
2029
2030         if (qlnx_load(ha) == 0) {
2031                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2032                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2033         }
2034
2035         return;
2036 }
2037
2038 static void
2039 qlnx_init(void *arg)
2040 {
2041         qlnx_host_t     *ha;
2042
2043         ha = (qlnx_host_t *)arg;
2044
2045         QL_DPRINT2(ha, "enter\n");
2046
2047         QLNX_LOCK(ha);
2048         qlnx_init_locked(ha);
2049         QLNX_UNLOCK(ha);
2050
2051         QL_DPRINT2(ha, "exit\n");
2052
2053         return;
2054 }
2055
2056 static int
2057 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2058 {
2059         struct ecore_filter_mcast       *mcast;
2060         struct ecore_dev                *cdev;
2061         int                             rc;
2062
2063         cdev = &ha->cdev;
2064
2065         mcast = &ha->ecore_mcast;
2066         bzero(mcast, sizeof(struct ecore_filter_mcast));
2067
2068         if (add_mac)
2069                 mcast->opcode = ECORE_FILTER_ADD;
2070         else
2071                 mcast->opcode = ECORE_FILTER_REMOVE;
2072
2073         mcast->num_mc_addrs = 1;
2074         memcpy(mcast->mac, mac_addr, ETH_ALEN);
2075
2076         rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
2077
2078         return (rc);
2079 }
2080
2081 static int
2082 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2083 {
2084         int     i;
2085
2086         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2087
2088                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2089                         return 0; /* its been already added */
2090         }
2091
2092         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2093
2094                 if ((ha->mcast[i].addr[0] == 0) &&
2095                         (ha->mcast[i].addr[1] == 0) &&
2096                         (ha->mcast[i].addr[2] == 0) &&
2097                         (ha->mcast[i].addr[3] == 0) &&
2098                         (ha->mcast[i].addr[4] == 0) &&
2099                         (ha->mcast[i].addr[5] == 0)) {
2100
2101                         if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2102                                 return (-1);
2103
2104                         bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2105                         ha->nmcast++;
2106
2107                         return 0;
2108                 }
2109         }
2110         return 0;
2111 }
2112
2113 static int
2114 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2115 {
2116         int     i;
2117
2118         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
2119                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2120
2121                         if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2122                                 return (-1);
2123
2124                         ha->mcast[i].addr[0] = 0;
2125                         ha->mcast[i].addr[1] = 0;
2126                         ha->mcast[i].addr[2] = 0;
2127                         ha->mcast[i].addr[3] = 0;
2128                         ha->mcast[i].addr[4] = 0;
2129                         ha->mcast[i].addr[5] = 0;
2130
2131                         ha->nmcast--;
2132
2133                         return 0;
2134                 }
2135         }
2136         return 0;
2137 }
2138
2139 /*
2140  * Name: qls_hw_set_multi
2141  * Function: Sets the Multicast Addresses provided the host O.S into the
2142  *      hardware (for the given interface)
2143  */
2144 static void
2145 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2146         uint32_t add_mac)
2147 {
2148         int     i;
2149
2150         for (i = 0; i < mcnt; i++) {
2151                 if (add_mac) {
2152                         if (qlnx_hw_add_mcast(ha, mta))
2153                                 break;
2154                 } else {
2155                         if (qlnx_hw_del_mcast(ha, mta))
2156                                 break;
2157                 }
2158
2159                 mta += ETHER_HDR_LEN;
2160         }
2161         return;
2162 }
2163
2164
2165 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN)
2166 static int
2167 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2168 {
2169         uint8_t                 mta[QLNX_MCAST_ADDRS_SIZE];
2170         struct ifmultiaddr      *ifma;
2171         int                     mcnt = 0;
2172         struct ifnet            *ifp = ha->ifp;
2173         int                     ret = 0;
2174
2175         if_maddr_rlock(ifp);
2176
2177         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2178
2179                 if (ifma->ifma_addr->sa_family != AF_LINK)
2180                         continue;
2181
2182                 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS)
2183                         break;
2184
2185                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2186                         &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN);
2187
2188                 mcnt++;
2189         }
2190
2191         if_maddr_runlock(ifp);
2192
2193         QLNX_LOCK(ha);
2194         qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2195         QLNX_UNLOCK(ha);
2196
2197         return (ret);
2198 }
2199
2200 static int
2201 qlnx_set_promisc(qlnx_host_t *ha)
2202 {
2203         int     rc = 0;
2204         uint8_t filter;
2205
2206         filter = ha->filter;
2207         filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2208         filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
2209
2210         rc = qlnx_set_rx_accept_filter(ha, filter);
2211         return (rc);
2212 }
2213
2214 static int
2215 qlnx_set_allmulti(qlnx_host_t *ha)
2216 {
2217         int     rc = 0;
2218         uint8_t filter;
2219
2220         filter = ha->filter;
2221         filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
2222         rc = qlnx_set_rx_accept_filter(ha, filter);
2223
2224         return (rc);
2225 }
2226
2227
2228 static int
2229 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2230 {
2231         int             ret = 0, mask;
2232         struct ifreq    *ifr = (struct ifreq *)data;
2233         struct ifaddr   *ifa = (struct ifaddr *)data;
2234         qlnx_host_t     *ha;
2235
2236         ha = (qlnx_host_t *)ifp->if_softc;
2237
2238         switch (cmd) {
2239         case SIOCSIFADDR:
2240                 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2241
2242                 if (ifa->ifa_addr->sa_family == AF_INET) {
2243                         ifp->if_flags |= IFF_UP;
2244                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2245                                 QLNX_LOCK(ha);
2246                                 qlnx_init_locked(ha);
2247                                 QLNX_UNLOCK(ha);
2248                         }
2249                         QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2250                                    cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2251
2252                         arp_ifinit(ifp, ifa);
2253                 } else {
2254                         ether_ioctl(ifp, cmd, data);
2255                 }
2256                 break;
2257
2258         case SIOCSIFMTU:
2259                 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2260
2261                 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2262                         ret = EINVAL;
2263                 } else {
2264                         QLNX_LOCK(ha);
2265                         ifp->if_mtu = ifr->ifr_mtu;
2266                         ha->max_frame_size =
2267                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2268                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2269                                 qlnx_init_locked(ha);
2270                         }
2271
2272                         QLNX_UNLOCK(ha);
2273                 }
2274
2275                 break;
2276
2277         case SIOCSIFFLAGS:
2278                 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2279
2280                 QLNX_LOCK(ha);
2281
2282                 if (ifp->if_flags & IFF_UP) {
2283                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2284                                 if ((ifp->if_flags ^ ha->if_flags) &
2285                                         IFF_PROMISC) {
2286                                         ret = qlnx_set_promisc(ha);
2287                                 } else if ((ifp->if_flags ^ ha->if_flags) &
2288                                         IFF_ALLMULTI) {
2289                                         ret = qlnx_set_allmulti(ha);
2290                                 }
2291                         } else {
2292                                 ha->max_frame_size = ifp->if_mtu +
2293                                         ETHER_HDR_LEN + ETHER_CRC_LEN;
2294                                 qlnx_init_locked(ha);
2295                         }
2296                 } else {
2297                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2298                                 qlnx_stop(ha);
2299                         ha->if_flags = ifp->if_flags;
2300                 }
2301
2302                 QLNX_UNLOCK(ha);
2303                 break;
2304
2305         case SIOCADDMULTI:
2306                 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2307
2308                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2309                         if (qlnx_set_multi(ha, 1))
2310                                 ret = EINVAL;
2311                 }
2312                 break;
2313
2314         case SIOCDELMULTI:
2315                 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2316
2317                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2318                         if (qlnx_set_multi(ha, 0))
2319                                 ret = EINVAL;
2320                 }
2321                 break;
2322
2323         case SIOCSIFMEDIA:
2324         case SIOCGIFMEDIA:
2325                 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2326
2327                 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2328                 break;
2329
2330         case SIOCSIFCAP:
2331                 
2332                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2333
2334                 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2335
2336                 if (mask & IFCAP_HWCSUM)
2337                         ifp->if_capenable ^= IFCAP_HWCSUM;
2338                 if (mask & IFCAP_TSO4)
2339                         ifp->if_capenable ^= IFCAP_TSO4;
2340                 if (mask & IFCAP_TSO6)
2341                         ifp->if_capenable ^= IFCAP_TSO6;
2342                 if (mask & IFCAP_VLAN_HWTAGGING)
2343                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2344                 if (mask & IFCAP_VLAN_HWTSO)
2345                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2346                 if (mask & IFCAP_LRO)
2347                         ifp->if_capenable ^= IFCAP_LRO;
2348
2349                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2350                         qlnx_init(ha);
2351
2352                 VLAN_CAPABILITIES(ifp);
2353                 break;
2354
2355 #if (__FreeBSD_version >= 1100101)
2356
2357         case SIOCGI2C:
2358         {
2359                 struct ifi2creq i2c;
2360                 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2361                 struct ecore_ptt *p_ptt;
2362
2363                 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2364
2365                 if (ret)
2366                         break;
2367
2368                 if ((i2c.len > sizeof (i2c.data)) ||
2369                         (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2370                         ret = EINVAL;
2371                         break;
2372                 }
2373
2374                 p_ptt = ecore_ptt_acquire(p_hwfn);
2375
2376                 if (!p_ptt) {
2377                         QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2378                         ret = -1;
2379                         break;
2380                 }
2381
2382                 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2383                         (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2384                         i2c.len, &i2c.data[0]);
2385
2386                 ecore_ptt_release(p_hwfn, p_ptt);
2387
2388                 if (ret) {
2389                         ret = -1;
2390                         break;
2391                 }
2392
2393                 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2394
2395                 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2396                          len = %d addr = 0x%02x offset = 0x%04x \
2397                          data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \
2398                          0x%02x 0x%02x 0x%02x\n",
2399                         ret, i2c.len, i2c.dev_addr, i2c.offset,
2400                         i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2401                         i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]);
2402                 break;
2403         }
2404 #endif /* #if (__FreeBSD_version >= 1100101) */
2405
2406         default:
2407                 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2408                 ret = ether_ioctl(ifp, cmd, data);
2409                 break;
2410         }
2411
2412         return (ret);
2413 }
2414
2415 static int
2416 qlnx_media_change(struct ifnet *ifp)
2417 {
2418         qlnx_host_t     *ha;
2419         struct ifmedia  *ifm;
2420         int             ret = 0;
2421
2422         ha = (qlnx_host_t *)ifp->if_softc;
2423
2424         QL_DPRINT2(ha, "enter\n");
2425
2426         ifm = &ha->media;
2427
2428         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2429                 ret = EINVAL;
2430
2431         QL_DPRINT2(ha, "exit\n");
2432
2433         return (ret);
2434 }
2435
2436 static void
2437 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2438 {
2439         qlnx_host_t             *ha;
2440
2441         ha = (qlnx_host_t *)ifp->if_softc;
2442
2443         QL_DPRINT2(ha, "enter\n");
2444
2445         ifmr->ifm_status = IFM_AVALID;
2446         ifmr->ifm_active = IFM_ETHER;
2447
2448         if (ha->link_up) {
2449                 ifmr->ifm_status |= IFM_ACTIVE;
2450                 ifmr->ifm_active |=
2451                         (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2452
2453                 if (ha->if_link.link_partner_caps &
2454                         (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2455                         ifmr->ifm_active |=
2456                                 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2457         }
2458
2459         QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2460
2461         return;
2462 }
2463
2464
2465 static void
2466 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2467         struct qlnx_tx_queue *txq)
2468 {
2469         u16                     idx;
2470         struct mbuf             *mp;
2471         bus_dmamap_t            map;
2472         int                     i;
2473         struct eth_tx_bd        *tx_data_bd;
2474         struct eth_tx_1st_bd    *first_bd;
2475         int                     nbds = 0;
2476
2477         idx = txq->sw_tx_cons;
2478         mp = txq->sw_tx_ring[idx].mp;
2479         map = txq->sw_tx_ring[idx].map;
2480
2481         if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2482
2483                 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2484
2485                 QL_DPRINT1(ha, "(mp == NULL) "
2486                         " tx_idx = 0x%x"
2487                         " ecore_prod_idx = 0x%x"
2488                         " ecore_cons_idx = 0x%x"
2489                         " hw_bd_cons = 0x%x"
2490                         " txq_db_last = 0x%x"
2491                         " elem_left = 0x%x\n",
2492                         fp->rss_id,
2493                         ecore_chain_get_prod_idx(&txq->tx_pbl),
2494                         ecore_chain_get_cons_idx(&txq->tx_pbl),
2495                         le16toh(*txq->hw_cons_ptr),
2496                         txq->tx_db.raw,
2497                         ecore_chain_get_elem_left(&txq->tx_pbl));
2498
2499                 fp->err_tx_free_pkt_null++;
2500
2501                 //DEBUG
2502                 qlnx_trigger_dump(ha);
2503
2504                 return;
2505         } else {
2506
2507                 QLNX_INC_OPACKETS((ha->ifp));
2508                 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2509
2510                 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2511                 bus_dmamap_unload(ha->tx_tag, map);
2512
2513                 fp->tx_pkts_freed++;
2514                 fp->tx_pkts_completed++;
2515
2516                 m_freem(mp);
2517         }
2518
2519         first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2520         nbds = first_bd->data.nbds;
2521
2522 //      BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0);
2523
2524         for (i = 1; i < nbds; i++) {
2525                 tx_data_bd = ecore_chain_consume(&txq->tx_pbl);
2526 //              BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0);
2527         }
2528         txq->sw_tx_ring[idx].flags = 0;
2529         txq->sw_tx_ring[idx].mp = NULL;
2530         txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2531
2532         return;
2533 }
2534
2535 static void
2536 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2537         struct qlnx_tx_queue *txq)
2538 {
2539         u16 hw_bd_cons;
2540         u16 ecore_cons_idx;
2541         uint16_t diff;
2542
2543         hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2544
2545         while (hw_bd_cons !=
2546                 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2547
2548                 if (hw_bd_cons < ecore_cons_idx) {
2549                         diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons);
2550                 } else {
2551                         diff = hw_bd_cons - ecore_cons_idx;
2552                 }
2553                 if ((diff > TX_RING_SIZE) ||
2554                         QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2555
2556                         QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2557
2558                         QL_DPRINT1(ha, "(diff = 0x%x) "
2559                                 " tx_idx = 0x%x"
2560                                 " ecore_prod_idx = 0x%x"
2561                                 " ecore_cons_idx = 0x%x"
2562                                 " hw_bd_cons = 0x%x"
2563                                 " txq_db_last = 0x%x"
2564                                 " elem_left = 0x%x\n",
2565                                 diff,
2566                                 fp->rss_id,
2567                                 ecore_chain_get_prod_idx(&txq->tx_pbl),
2568                                 ecore_chain_get_cons_idx(&txq->tx_pbl),
2569                                 le16toh(*txq->hw_cons_ptr),
2570                                 txq->tx_db.raw,
2571                                 ecore_chain_get_elem_left(&txq->tx_pbl));
2572
2573                         fp->err_tx_cons_idx_conflict++;
2574
2575                         //DEBUG
2576                         qlnx_trigger_dump(ha);
2577                 }
2578
2579                 qlnx_free_tx_pkt(ha, fp, txq);
2580
2581                 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2582         }
2583         return;
2584 }
2585
2586 static int
2587 qlnx_transmit(struct ifnet *ifp, struct mbuf  *mp)
2588 {
2589         qlnx_host_t             *ha = (qlnx_host_t *)ifp->if_softc;
2590         struct qlnx_fastpath    *fp;
2591         int                     rss_id = 0, ret = 0;
2592
2593         QL_DPRINT2(ha, "enter\n");
2594
2595 #if __FreeBSD_version >= 1100000
2596         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2597 #else
2598         if (mp->m_flags & M_FLOWID)
2599 #endif
2600                 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2601                                         ha->num_rss;
2602
2603         fp = &ha->fp_array[rss_id];
2604
2605         if (fp->tx_br == NULL) {
2606                 ret = EINVAL;
2607                 goto qlnx_transmit_exit;
2608         }
2609
2610         if (mp != NULL) {
2611                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
2612         }
2613
2614         if (fp->fp_taskqueue != NULL)
2615                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2616
2617         ret = 0;
2618
2619 qlnx_transmit_exit:
2620
2621         QL_DPRINT2(ha, "exit ret = %d\n", ret);
2622         return ret;
2623 }
2624
2625 static void
2626 qlnx_qflush(struct ifnet *ifp)
2627 {
2628         int                     rss_id;
2629         struct qlnx_fastpath    *fp;
2630         struct mbuf             *mp;
2631         qlnx_host_t             *ha;
2632
2633         ha = (qlnx_host_t *)ifp->if_softc;
2634
2635         QL_DPRINT2(ha, "enter\n");
2636
2637         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2638
2639                 fp = &ha->fp_array[rss_id];
2640
2641                 if (fp == NULL)
2642                         continue;
2643
2644                 if (fp->tx_br) {
2645                         mtx_lock(&fp->tx_mtx);
2646
2647                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 
2648                                 fp->tx_pkts_freed++;
2649                                 m_freem(mp);                    
2650                         }
2651                         mtx_unlock(&fp->tx_mtx);
2652                 }
2653         }
2654         QL_DPRINT2(ha, "exit\n");
2655
2656         return;
2657 }
2658
2659 static void
2660 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2661 {
2662         struct ecore_dev        *cdev;
2663         uint32_t                offset;
2664
2665         cdev = &ha->cdev;
2666                 
2667         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells);
2668
2669         bus_write_4(ha->pci_dbells, offset, value);
2670         bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_READ);
2671         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
2672
2673         return;
2674 }
2675
2676 static uint32_t
2677 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
2678 {
2679         struct ether_vlan_header        *eh = NULL;
2680         struct ip                       *ip = NULL;
2681         struct ip6_hdr                  *ip6 = NULL;
2682         struct tcphdr                   *th = NULL;
2683         uint32_t                        ehdrlen = 0, ip_hlen = 0, offset = 0;
2684         uint16_t                        etype = 0;
2685         device_t                        dev;
2686         uint8_t                         buf[sizeof(struct ip6_hdr)];
2687
2688         dev = ha->pci_dev;
2689
2690         eh = mtod(mp, struct ether_vlan_header *);
2691
2692         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2693                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2694                 etype = ntohs(eh->evl_proto);
2695         } else {
2696                 ehdrlen = ETHER_HDR_LEN;
2697                 etype = ntohs(eh->evl_encap_proto);
2698         }
2699
2700         switch (etype) {
2701
2702                 case ETHERTYPE_IP:
2703                         ip = (struct ip *)(mp->m_data + ehdrlen);
2704
2705                         ip_hlen = sizeof (struct ip);
2706
2707                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2708                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2709                                 ip = (struct ip *)buf;
2710                         }
2711
2712                         th = (struct tcphdr *)(ip + 1);
2713                         offset = ip_hlen + ehdrlen + (th->th_off << 2);
2714                 break;
2715
2716                 case ETHERTYPE_IPV6:
2717                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2718
2719                         ip_hlen = sizeof(struct ip6_hdr);
2720
2721                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2722                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2723                                         buf);
2724                                 ip6 = (struct ip6_hdr *)buf;
2725                         }
2726                         th = (struct tcphdr *)(ip6 + 1);
2727                         offset = ip_hlen + ehdrlen + (th->th_off << 2);
2728                 break;
2729
2730                 default:
2731                 break;
2732         }
2733
2734         return (offset);
2735 }
2736
2737 static __inline int
2738 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs,
2739         uint32_t offset)
2740 {
2741         int                     i;
2742         uint32_t                sum, nbds_in_hdr = 1;
2743         bus_dma_segment_t       *t_segs = segs;
2744
2745         /* count the number of segments spanned by TCP header */
2746
2747         i = 0;
2748         while ((i < nsegs) && (offset > t_segs->ds_len)) {
2749                 nbds_in_hdr++;
2750                 offset = offset - t_segs->ds_len;
2751                 t_segs++;
2752                 i++;
2753         }
2754
2755         while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) {
2756
2757                 sum = 0;
2758
2759                 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){
2760                         sum += segs->ds_len;
2761                         segs++;
2762                 }
2763
2764                 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) {
2765                         fp->tx_lso_wnd_min_len++;
2766                         return (-1);
2767                 }
2768
2769                 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO;
2770         }
2771
2772         return (0);
2773 }
2774
2775 static int
2776 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
2777 {
2778         bus_dma_segment_t       *segs;
2779         bus_dmamap_t            map = 0;
2780         uint32_t                nsegs = 0;
2781         int                     ret = -1;
2782         struct mbuf             *m_head = *m_headp;
2783         uint16_t                idx = 0;
2784         uint16_t                elem_left;
2785
2786         uint8_t                 nbd = 0;
2787         struct qlnx_tx_queue    *txq;
2788
2789         struct eth_tx_1st_bd    *first_bd;
2790         struct eth_tx_2nd_bd    *second_bd;
2791         struct eth_tx_3rd_bd    *third_bd;
2792         struct eth_tx_bd        *tx_data_bd;
2793
2794         int                     seg_idx = 0;
2795         uint32_t                nbds_in_hdr = 0;
2796         uint32_t                offset = 0;
2797
2798         QL_DPRINT8(ha, "enter\n");
2799
2800         if (!ha->link_up)
2801                 return (-1);
2802
2803         first_bd        = NULL;
2804         second_bd       = NULL;
2805         third_bd        = NULL;
2806         tx_data_bd      = NULL;
2807
2808         txq = fp->txq[0];
2809
2810         if (fp->tx_ring_full) {
2811                 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
2812
2813                 if (elem_left < (TX_RING_SIZE >> 4)) 
2814                         return (-1);
2815                 else 
2816                         fp->tx_ring_full = 0;
2817         }
2818
2819         idx = txq->sw_tx_prod;
2820
2821         map = txq->sw_tx_ring[idx].map;
2822         segs = txq->segs;
2823
2824         ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
2825                         BUS_DMA_NOWAIT);
2826
2827         if (ha->dbg_trace_tso_pkt_len) {
2828                 if (!fp->tx_tso_min_pkt_len) {
2829                         fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2830                         fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2831                 } else {
2832                         if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
2833                                 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
2834                         if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
2835                                 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len;
2836                 }
2837         }
2838
2839         if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2840                 offset = qlnx_tcp_offset(ha, m_head);
2841
2842         if ((ret == EFBIG) ||
2843                 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2844                         (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2845                 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2846                         qlnx_tso_check(fp, segs, nsegs, offset))))) {
2847
2848                 struct mbuf *m;
2849
2850                 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
2851
2852                 fp->tx_defrag++;
2853
2854                 m = m_defrag(m_head, M_NOWAIT);
2855                 if (m == NULL) {
2856                         fp->err_tx_defrag++;
2857                         fp->tx_pkts_freed++;
2858                         m_freem(m_head);
2859                         *m_headp = NULL;
2860                         QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
2861                         return (ENOBUFS);
2862                 }
2863
2864                 m_head = m;
2865                 *m_headp = m_head;
2866
2867                 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2868                                 segs, &nsegs, BUS_DMA_NOWAIT))) {
2869
2870                         fp->err_tx_defrag_dmamap_load++;
2871
2872                         QL_DPRINT1(ha,
2873                                 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n",
2874                                 ret, m_head->m_pkthdr.len);
2875
2876                         fp->tx_pkts_freed++;
2877                         m_freem(m_head);
2878                         *m_headp = NULL;
2879
2880                         return (ret);
2881                 }
2882
2883                 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2884                         !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2885
2886                         fp->err_tx_non_tso_max_seg++;
2887
2888                         QL_DPRINT1(ha,
2889                                 "(%d) nsegs too many for non-TSO [%d, %d]\n",
2890                                 ret, nsegs, m_head->m_pkthdr.len);
2891
2892                         fp->tx_pkts_freed++;
2893                         m_freem(m_head);
2894                         *m_headp = NULL;
2895
2896                         return (ret);
2897                 }
2898                 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2899                         offset = qlnx_tcp_offset(ha, m_head);
2900
2901         } else if (ret) {
2902
2903                 fp->err_tx_dmamap_load++;
2904
2905                 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
2906                            ret, m_head->m_pkthdr.len);
2907                 fp->tx_pkts_freed++;
2908                 m_freem(m_head);
2909                 *m_headp = NULL;
2910                 return (ret);
2911         }
2912
2913         QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2914
2915         if (ha->dbg_trace_tso_pkt_len) {
2916                 if (nsegs < QLNX_FP_MAX_SEGS)
2917                         fp->tx_pkts[(nsegs - 1)]++;
2918                 else
2919                         fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 
2920         }
2921
2922         if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2923                 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2924
2925                 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
2926                         " in chain[%d] trying to free packets\n",
2927                         nsegs, elem_left, fp->rss_id);
2928
2929                 fp->tx_nsegs_gt_elem_left++;
2930
2931                 (void)qlnx_tx_int(ha, fp, txq);
2932
2933                 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2934                         ecore_chain_get_elem_left(&txq->tx_pbl))) {
2935
2936                         QL_DPRINT1(ha,
2937                                 "(%d, 0x%x) insuffient BDs in chain[%d]\n",
2938                                 nsegs, elem_left, fp->rss_id);
2939
2940                         fp->err_tx_nsegs_gt_elem_left++;
2941                         fp->tx_ring_full = 1;
2942                         ha->storm_stats_enable = 1;
2943                         return (ENOBUFS);
2944                 }
2945         }
2946
2947         bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
2948
2949         txq->sw_tx_ring[idx].mp = m_head;
2950
2951         first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2952
2953         memset(first_bd, 0, sizeof(*first_bd));
2954
2955         first_bd->data.bd_flags.bitfields =
2956                 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2957
2958         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
2959
2960         nbd++;
2961
2962         if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
2963                 first_bd->data.bd_flags.bitfields |=
2964                         (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2965         }
2966
2967         if (m_head->m_pkthdr.csum_flags &
2968                 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
2969                 first_bd->data.bd_flags.bitfields |=
2970                         (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
2971         }
2972
2973         if (m_head->m_flags & M_VLANTAG) {
2974                 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
2975                 first_bd->data.bd_flags.bitfields |=
2976                         (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT);
2977         }
2978
2979         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2980
2981                 first_bd->data.bd_flags.bitfields |=
2982                         (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
2983                 first_bd->data.bd_flags.bitfields |=
2984                         (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
2985
2986                 nbds_in_hdr = 1;
2987
2988                 if (offset == segs->ds_len) {
2989                         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
2990                         segs++;
2991                         seg_idx++;
2992
2993                         second_bd = (struct eth_tx_2nd_bd *)
2994                                         ecore_chain_produce(&txq->tx_pbl);
2995                         memset(second_bd, 0, sizeof(*second_bd));
2996                         nbd++;
2997
2998                         if (seg_idx < nsegs) {
2999                                 BD_SET_UNMAP_ADDR_LEN(second_bd, \
3000                                         (segs->ds_addr), (segs->ds_len));
3001                                 segs++;
3002                                 seg_idx++;
3003                         }
3004
3005                         third_bd = (struct eth_tx_3rd_bd *)
3006                                         ecore_chain_produce(&txq->tx_pbl);
3007                         memset(third_bd, 0, sizeof(*third_bd));
3008                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3009                         third_bd->data.bitfields |=
3010                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3011                         nbd++;
3012
3013                         if (seg_idx < nsegs) {
3014                                 BD_SET_UNMAP_ADDR_LEN(third_bd, \
3015                                         (segs->ds_addr), (segs->ds_len));
3016                                 segs++;
3017                                 seg_idx++;
3018                         }
3019
3020                         for (; seg_idx < nsegs; seg_idx++) {
3021                                 tx_data_bd = (struct eth_tx_bd *)
3022                                         ecore_chain_produce(&txq->tx_pbl);
3023                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3024                                 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3025                                         segs->ds_addr,\
3026                                         segs->ds_len);
3027                                 segs++;
3028                                 nbd++;
3029                         }
3030
3031                 } else if (offset < segs->ds_len) {
3032                         BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3033
3034                         second_bd = (struct eth_tx_2nd_bd *)
3035                                         ecore_chain_produce(&txq->tx_pbl);
3036                         memset(second_bd, 0, sizeof(*second_bd));
3037                         BD_SET_UNMAP_ADDR_LEN(second_bd, \
3038                                 (segs->ds_addr + offset),\
3039                                 (segs->ds_len - offset));
3040                         nbd++;
3041                         segs++;
3042
3043                         third_bd = (struct eth_tx_3rd_bd *)
3044                                         ecore_chain_produce(&txq->tx_pbl);
3045                         memset(third_bd, 0, sizeof(*third_bd));
3046
3047                         BD_SET_UNMAP_ADDR_LEN(third_bd, \
3048                                         segs->ds_addr,\
3049                                         segs->ds_len);
3050                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3051                         third_bd->data.bitfields |=
3052                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3053                         segs++;
3054                         nbd++;
3055
3056                         for (seg_idx = 2; seg_idx < nsegs; seg_idx++) {
3057                                 tx_data_bd = (struct eth_tx_bd *)
3058                                         ecore_chain_produce(&txq->tx_pbl);
3059                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3060                                 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \
3061                                         segs->ds_addr,\
3062                                         segs->ds_len);
3063                                 segs++;
3064                                 nbd++;
3065                         }
3066
3067                 } else {
3068                         offset = offset - segs->ds_len;
3069                         segs++;
3070
3071                         for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3072
3073                                 if (offset)
3074                                         nbds_in_hdr++;
3075
3076                                 tx_data_bd = (struct eth_tx_bd *)
3077                                         ecore_chain_produce(&txq->tx_pbl);
3078                                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3079
3080                                 if (second_bd == NULL) {
3081                                         second_bd = (struct eth_tx_2nd_bd *)
3082                                                                 tx_data_bd;
3083                                 } else if (third_bd == NULL) {
3084                                         third_bd = (struct eth_tx_3rd_bd *)
3085                                                                 tx_data_bd;
3086                                 }
3087                                 
3088                                 if (offset && (offset < segs->ds_len)) {
3089                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3090                                                 segs->ds_addr, offset);
3091
3092                                         tx_data_bd = (struct eth_tx_bd *)
3093                                         ecore_chain_produce(&txq->tx_pbl);
3094
3095                                         memset(tx_data_bd, 0,
3096                                                 sizeof(*tx_data_bd));
3097
3098                                         if (second_bd == NULL) {
3099                                                 second_bd =
3100                                         (struct eth_tx_2nd_bd *)tx_data_bd;
3101                                         } else if (third_bd == NULL) {
3102                                                 third_bd =
3103                                         (struct eth_tx_3rd_bd *)tx_data_bd;
3104                                         }
3105                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3106                                                 (segs->ds_addr + offset), \
3107                                                 (segs->ds_len - offset));
3108                                         nbd++;
3109                                         offset = 0;
3110                                 } else {
3111                                         if (offset)
3112                                                 offset = offset - segs->ds_len;
3113                                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\
3114                                                 segs->ds_addr, segs->ds_len);
3115                                 }
3116                                 segs++;
3117                                 nbd++;
3118                         }
3119
3120                         if (third_bd == NULL) {
3121                                 third_bd = (struct eth_tx_3rd_bd *)
3122                                         ecore_chain_produce(&txq->tx_pbl);
3123                                 memset(third_bd, 0, sizeof(*third_bd));
3124                         }
3125
3126                         third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3127                         third_bd->data.bitfields |=
3128                                 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
3129                 }
3130         } else {
3131                 segs++;
3132                 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) {
3133                         tx_data_bd = (struct eth_tx_bd *)
3134                                         ecore_chain_produce(&txq->tx_pbl);
3135                         memset(tx_data_bd, 0, sizeof(*tx_data_bd));
3136                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3137                                 segs->ds_len);
3138                         segs++;
3139                         nbd++;
3140                 }
3141                 first_bd->data.bitfields =
3142                         (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3143                                  << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
3144                 first_bd->data.bitfields =
3145                         htole16(first_bd->data.bitfields);
3146         }
3147
3148
3149         first_bd->data.nbds = nbd;
3150
3151         if (ha->dbg_trace_tso_pkt_len) {
3152                 if (fp->tx_tso_max_nsegs < nsegs)
3153                         fp->tx_tso_max_nsegs = nsegs;
3154
3155                 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3156                         fp->tx_tso_min_nsegs = nsegs;
3157         }
3158
3159         txq->sw_tx_ring[idx].nsegs = nsegs;
3160         txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3161
3162         txq->tx_db.data.bd_prod =
3163                 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3164
3165         qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3166    
3167         QL_DPRINT8(ha, "exit\n");
3168         return (0);
3169 }
3170
3171 static void
3172 qlnx_stop(qlnx_host_t *ha)
3173 {
3174         struct ifnet    *ifp = ha->ifp;
3175         device_t        dev;
3176         int             i;
3177
3178         dev = ha->pci_dev;
3179
3180         ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3181
3182         /*
3183          * We simply lock and unlock each fp->tx_mtx to
3184          * propagate the if_drv_flags
3185          * state to each tx thread
3186          */
3187         QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3188
3189         if (ha->state == QLNX_STATE_OPEN) {
3190                 for (i = 0; i < ha->num_rss; i++) {
3191                         struct qlnx_fastpath *fp = &ha->fp_array[i];
3192
3193                         mtx_lock(&fp->tx_mtx);
3194                         mtx_unlock(&fp->tx_mtx);
3195
3196                         if (fp->fp_taskqueue != NULL)
3197                                 taskqueue_enqueue(fp->fp_taskqueue,
3198                                         &fp->fp_task);
3199                 }
3200         }
3201
3202         qlnx_unload(ha);
3203
3204         return;
3205 }
3206
3207 static int
3208 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3209 {
3210         return(TX_RING_SIZE - 1);
3211 }
3212
3213 uint8_t *
3214 qlnx_get_mac_addr(qlnx_host_t *ha)
3215 {
3216         struct ecore_hwfn       *p_hwfn;
3217
3218         p_hwfn = &ha->cdev.hwfns[0];
3219         return (p_hwfn->hw_info.hw_mac_addr);
3220 }
3221
3222 static uint32_t
3223 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3224 {
3225         uint32_t        ifm_type = 0;
3226
3227         switch (if_link->media_type) {
3228
3229         case MEDIA_MODULE_FIBER:
3230         case MEDIA_UNSPECIFIED:
3231                 if (if_link->speed == (100 * 1000))
3232                         ifm_type = QLNX_IFM_100G_SR4;
3233                 else if (if_link->speed == (40 * 1000))
3234                         ifm_type = IFM_40G_SR4;
3235                 else if (if_link->speed == (25 * 1000))
3236                         ifm_type = QLNX_IFM_25G_SR;
3237                 else if (if_link->speed == (10 * 1000))
3238                         ifm_type = (IFM_10G_LR | IFM_10G_SR);
3239                 else if (if_link->speed == (1 * 1000))
3240                         ifm_type = (IFM_1000_SX | IFM_1000_LX);
3241
3242                 break;
3243
3244         case MEDIA_DA_TWINAX:
3245                 if (if_link->speed == (100 * 1000))
3246                         ifm_type = QLNX_IFM_100G_CR4;
3247                 else if (if_link->speed == (40 * 1000))
3248                         ifm_type = IFM_40G_CR4;
3249                 else if (if_link->speed == (25 * 1000))
3250                         ifm_type = QLNX_IFM_25G_CR;
3251                 else if (if_link->speed == (10 * 1000))
3252                         ifm_type = IFM_10G_TWINAX;
3253
3254                 break;
3255
3256         default :
3257                 ifm_type = IFM_UNKNOWN;
3258                 break;
3259         }
3260         return (ifm_type);
3261 }
3262
3263
3264
3265 /*****************************************************************************
3266  * Interrupt Service Functions
3267  *****************************************************************************/
3268
3269 static int
3270 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3271         struct mbuf *mp_head, uint16_t len)
3272 {
3273         struct mbuf             *mp, *mpf, *mpl;
3274         struct sw_rx_data       *sw_rx_data;
3275         struct qlnx_rx_queue    *rxq;
3276         uint16_t                len_in_buffer;
3277
3278         rxq = fp->rxq;
3279         mpf = mpl = mp = NULL;
3280
3281         while (len) {
3282
3283                 rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3284
3285                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3286                 mp = sw_rx_data->data;
3287
3288                 if (mp == NULL) {
3289                         QL_DPRINT1(ha, "mp = NULL\n");
3290                         fp->err_rx_mp_null++;
3291                         rxq->sw_rx_cons  =
3292                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3293
3294                         if (mpf != NULL)
3295                                 m_freem(mpf);
3296
3297                         return (-1);
3298                 }
3299                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3300                         BUS_DMASYNC_POSTREAD);
3301
3302                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3303
3304                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3305                                 " incoming packet and reusing its buffer\n");
3306
3307                         qlnx_reuse_rx_data(rxq);
3308                         fp->err_rx_alloc_errors++;
3309
3310                         if (mpf != NULL)
3311                                 m_freem(mpf);
3312
3313                         return (-1);
3314                 }
3315                 ecore_chain_consume(&rxq->rx_bd_ring);
3316
3317                 if (len > rxq->rx_buf_size)
3318                         len_in_buffer = rxq->rx_buf_size;
3319                 else
3320                         len_in_buffer = len;
3321
3322                 len = len - len_in_buffer;
3323
3324                 mp->m_flags &= ~M_PKTHDR;
3325                 mp->m_next = NULL;
3326                 mp->m_len = len_in_buffer;
3327
3328                 if (mpf == NULL)
3329                         mpf = mpl = mp;
3330                 else {
3331                         mpl->m_next = mp;
3332                         mpl = mp;
3333                 }
3334         }
3335
3336         if (mpf != NULL)
3337                 mp_head->m_next = mpf;
3338
3339         return (0);
3340 }
3341
3342 static void
3343 qlnx_tpa_start(qlnx_host_t *ha,
3344         struct qlnx_fastpath *fp,
3345         struct qlnx_rx_queue *rxq,
3346         struct eth_fast_path_rx_tpa_start_cqe *cqe)
3347 {
3348         uint32_t                agg_index;
3349         struct ifnet            *ifp = ha->ifp;
3350         struct mbuf             *mp;
3351         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3352         struct sw_rx_data       *sw_rx_data;
3353         dma_addr_t              addr;
3354         bus_dmamap_t            map;
3355         struct eth_rx_bd        *rx_bd;
3356         int                     i;
3357         device_t                dev;
3358 #if __FreeBSD_version >= 1100000
3359         uint8_t                 hash_type;
3360 #endif /* #if __FreeBSD_version >= 1100000 */
3361
3362         dev = ha->pci_dev;
3363         agg_index = cqe->tpa_agg_index;
3364
3365         QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3366                 \t type = 0x%x\n \
3367                 \t bitfields = 0x%x\n \
3368                 \t seg_len = 0x%x\n \
3369                 \t pars_flags = 0x%x\n \
3370                 \t vlan_tag = 0x%x\n \
3371                 \t rss_hash = 0x%x\n \
3372                 \t len_on_first_bd = 0x%x\n \
3373                 \t placement_offset = 0x%x\n \
3374                 \t tpa_agg_index = 0x%x\n \
3375                 \t header_len = 0x%x\n \
3376                 \t ext_bd_len_list[0] = 0x%x\n \
3377                 \t ext_bd_len_list[1] = 0x%x\n \
3378                 \t ext_bd_len_list[2] = 0x%x\n \
3379                 \t ext_bd_len_list[3] = 0x%x\n \
3380                 \t ext_bd_len_list[4] = 0x%x\n",
3381                 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3382                 cqe->pars_flags.flags, cqe->vlan_tag,
3383                 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3384                 cqe->tpa_agg_index, cqe->header_len,
3385                 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3386                 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3387                 cqe->ext_bd_len_list[4]);
3388
3389         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3390                 fp->err_rx_tpa_invalid_agg_num++;
3391                 return;
3392         }
3393
3394         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3395         bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3396         mp = sw_rx_data->data;
3397
3398         QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3399
3400         if (mp == NULL) {
3401                 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3402                 fp->err_rx_mp_null++;
3403                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3404
3405                 return;
3406         }
3407
3408         if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3409
3410                 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3411                         " flags = %x, dropping incoming packet\n", fp->rss_id,
3412                         rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3413
3414                 fp->err_rx_hw_errors++;
3415
3416                 qlnx_reuse_rx_data(rxq);
3417
3418                 QLNX_INC_IERRORS(ifp);
3419
3420                 return;
3421         }
3422
3423         if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3424
3425                 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3426                         " dropping incoming packet and reusing its buffer\n",
3427                         fp->rss_id);
3428
3429                 fp->err_rx_alloc_errors++;
3430                 QLNX_INC_IQDROPS(ifp);
3431
3432                 /*
3433                  * Load the tpa mbuf into the rx ring and save the 
3434                  * posted mbuf
3435                  */
3436
3437                 map = sw_rx_data->map;
3438                 addr = sw_rx_data->dma_addr;
3439
3440                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
3441
3442                 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
3443                 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
3444                 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
3445
3446                 rxq->tpa_info[agg_index].rx_buf.data = mp;
3447                 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
3448                 rxq->tpa_info[agg_index].rx_buf.map = map;
3449
3450                 rx_bd = (struct eth_rx_bd *)
3451                                 ecore_chain_produce(&rxq->rx_bd_ring);
3452
3453                 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
3454                 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
3455
3456                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3457                         BUS_DMASYNC_PREREAD);
3458
3459                 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
3460                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3461
3462                 ecore_chain_consume(&rxq->rx_bd_ring);
3463
3464                 /* Now reuse any buffers posted in ext_bd_len_list */
3465                 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3466
3467                         if (cqe->ext_bd_len_list[i] == 0)
3468                                 break;
3469
3470                         qlnx_reuse_rx_data(rxq);
3471                 }
3472
3473                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3474                 return;
3475         }
3476
3477         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3478
3479                 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
3480                         " dropping incoming packet and reusing its buffer\n",
3481                         fp->rss_id);
3482
3483                 QLNX_INC_IQDROPS(ifp);
3484
3485                 /* if we already have mbuf head in aggregation free it */
3486                 if (rxq->tpa_info[agg_index].mpf) {
3487                         m_freem(rxq->tpa_info[agg_index].mpf);
3488                         rxq->tpa_info[agg_index].mpl = NULL;
3489                 }
3490                 rxq->tpa_info[agg_index].mpf = mp;
3491                 rxq->tpa_info[agg_index].mpl = NULL;
3492
3493                 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3494                 ecore_chain_consume(&rxq->rx_bd_ring);
3495
3496                 /* Now reuse any buffers posted in ext_bd_len_list */
3497                 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3498
3499                         if (cqe->ext_bd_len_list[i] == 0)
3500                                 break;
3501
3502                         qlnx_reuse_rx_data(rxq);
3503                 }
3504                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3505
3506                 return;
3507         }
3508
3509         /*
3510          * first process the ext_bd_len_list 
3511          * if this fails then we simply drop the packet
3512          */
3513         ecore_chain_consume(&rxq->rx_bd_ring);
3514         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3515
3516         for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3517
3518                 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
3519
3520                 if (cqe->ext_bd_len_list[i] == 0)
3521                         break;
3522
3523                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3524                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3525                         BUS_DMASYNC_POSTREAD);
3526
3527                 mpc = sw_rx_data->data;
3528
3529                 if (mpc == NULL) {
3530                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3531                         fp->err_rx_mp_null++;
3532                         if (mpf != NULL)
3533                                 m_freem(mpf);
3534                         mpf = mpl = NULL;
3535                         rxq->tpa_info[agg_index].agg_state =
3536                                                 QLNX_AGG_STATE_ERROR;
3537                         ecore_chain_consume(&rxq->rx_bd_ring);
3538                         rxq->sw_rx_cons =
3539                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3540                         continue;
3541                 }
3542
3543                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3544                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3545                                 " dropping incoming packet and reusing its"
3546                                 " buffer\n", fp->rss_id);
3547
3548                         qlnx_reuse_rx_data(rxq);
3549
3550                         if (mpf != NULL)
3551                                 m_freem(mpf);
3552                         mpf = mpl = NULL;
3553
3554                         rxq->tpa_info[agg_index].agg_state =
3555                                                 QLNX_AGG_STATE_ERROR;
3556
3557                         ecore_chain_consume(&rxq->rx_bd_ring);
3558                         rxq->sw_rx_cons =
3559                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3560
3561                         continue;
3562                 }
3563
3564                 mpc->m_flags &= ~M_PKTHDR;
3565                 mpc->m_next = NULL;
3566                 mpc->m_len = cqe->ext_bd_len_list[i];
3567
3568
3569                 if (mpf == NULL) {
3570                         mpf = mpl = mpc;
3571                 } else {
3572                         mpl->m_len = ha->rx_buf_size;
3573                         mpl->m_next = mpc;
3574                         mpl = mpc;
3575                 }
3576
3577                 ecore_chain_consume(&rxq->rx_bd_ring);
3578                 rxq->sw_rx_cons =
3579                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3580         }
3581
3582         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3583
3584                 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
3585                         " incoming packet and reusing its buffer\n",
3586                         fp->rss_id);
3587
3588                 QLNX_INC_IQDROPS(ifp);
3589
3590                 rxq->tpa_info[agg_index].mpf = mp;
3591                 rxq->tpa_info[agg_index].mpl = NULL;
3592
3593                 return;
3594         }
3595            
3596         rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
3597
3598         if (mpf != NULL) {
3599                 mp->m_len = ha->rx_buf_size;
3600                 mp->m_next = mpf;
3601                 rxq->tpa_info[agg_index].mpf = mp;
3602                 rxq->tpa_info[agg_index].mpl = mpl;
3603         } else {
3604                 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
3605                 rxq->tpa_info[agg_index].mpf = mp;
3606                 rxq->tpa_info[agg_index].mpl = mp;
3607                 mp->m_next = NULL;
3608         }
3609
3610         mp->m_flags |= M_PKTHDR;
3611
3612         /* assign packet to this interface interface */
3613         mp->m_pkthdr.rcvif = ifp;
3614
3615         /* assume no hardware checksum has complated */
3616         mp->m_pkthdr.csum_flags = 0;
3617
3618         //mp->m_pkthdr.flowid = fp->rss_id;
3619         mp->m_pkthdr.flowid = cqe->rss_hash;
3620
3621 #if __FreeBSD_version >= 1100000
3622
3623         hash_type = cqe->bitfields &
3624                         (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
3625                         ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
3626
3627         switch (hash_type) {
3628
3629         case RSS_HASH_TYPE_IPV4:
3630                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
3631                 break;
3632
3633         case RSS_HASH_TYPE_TCP_IPV4:
3634                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
3635                 break;
3636
3637         case RSS_HASH_TYPE_IPV6:
3638                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
3639                 break;
3640
3641         case RSS_HASH_TYPE_TCP_IPV6:
3642                 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
3643                 break;
3644
3645         default:
3646                 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
3647                 break;
3648         }
3649
3650 #else
3651         mp->m_flags |= M_FLOWID;
3652 #endif
3653
3654         mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
3655                                         CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3656
3657         mp->m_pkthdr.csum_data = 0xFFFF;
3658
3659         if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3660                 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3661                 mp->m_flags |= M_VLANTAG;
3662         }
3663
3664         rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3665
3666         QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
3667                 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
3668                 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
3669
3670         return;
3671 }
3672
3673 static void
3674 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3675         struct qlnx_rx_queue *rxq,
3676         struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3677 {
3678         struct sw_rx_data       *sw_rx_data;
3679         int                     i;
3680         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3681         struct mbuf             *mp;
3682         uint32_t                agg_index;
3683         device_t                dev;
3684
3685         dev = ha->pci_dev;
3686
3687         QL_DPRINT7(ha, "[%d]: enter\n \
3688                 \t type = 0x%x\n \
3689                 \t tpa_agg_index = 0x%x\n \
3690                 \t len_list[0] = 0x%x\n \
3691                 \t len_list[1] = 0x%x\n \
3692                 \t len_list[2] = 0x%x\n \
3693                 \t len_list[3] = 0x%x\n \
3694                 \t len_list[4] = 0x%x\n \
3695                 \t len_list[5] = 0x%x\n",
3696                 fp->rss_id, cqe->type, cqe->tpa_agg_index,
3697                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3698                 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
3699
3700         agg_index = cqe->tpa_agg_index;
3701
3702         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3703                 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3704                 fp->err_rx_tpa_invalid_agg_num++;
3705                 return;
3706         }
3707
3708
3709         for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3710
3711                 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3712
3713                 if (cqe->len_list[i] == 0)
3714                         break;
3715
3716                 if (rxq->tpa_info[agg_index].agg_state != 
3717                         QLNX_AGG_STATE_START) {
3718                         qlnx_reuse_rx_data(rxq);
3719                         continue;
3720                 }
3721
3722                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3723                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3724                         BUS_DMASYNC_POSTREAD);
3725
3726                 mpc = sw_rx_data->data;
3727
3728                 if (mpc == NULL) {
3729
3730                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3731
3732                         fp->err_rx_mp_null++;
3733                         if (mpf != NULL)
3734                                 m_freem(mpf);
3735                         mpf = mpl = NULL;
3736                         rxq->tpa_info[agg_index].agg_state =
3737                                                 QLNX_AGG_STATE_ERROR;
3738                         ecore_chain_consume(&rxq->rx_bd_ring);
3739                         rxq->sw_rx_cons =
3740                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3741                         continue;
3742                 }
3743
3744                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3745
3746                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3747                                 " dropping incoming packet and reusing its"
3748                                 " buffer\n", fp->rss_id);
3749
3750                         qlnx_reuse_rx_data(rxq);
3751
3752                         if (mpf != NULL)
3753                                 m_freem(mpf);
3754                         mpf = mpl = NULL;
3755
3756                         rxq->tpa_info[agg_index].agg_state =
3757                                                 QLNX_AGG_STATE_ERROR;
3758
3759                         ecore_chain_consume(&rxq->rx_bd_ring);
3760                         rxq->sw_rx_cons =
3761                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3762
3763                         continue;
3764                 }
3765
3766                 mpc->m_flags &= ~M_PKTHDR;
3767                 mpc->m_next = NULL;
3768                 mpc->m_len = cqe->len_list[i];
3769
3770
3771                 if (mpf == NULL) {
3772                         mpf = mpl = mpc;
3773                 } else {
3774                         mpl->m_len = ha->rx_buf_size;
3775                         mpl->m_next = mpc;
3776                         mpl = mpc;
3777                 }
3778
3779                 ecore_chain_consume(&rxq->rx_bd_ring);
3780                 rxq->sw_rx_cons =
3781                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3782         }
3783
3784         QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3785                   fp->rss_id, mpf, mpl);
3786
3787         if (mpf != NULL) {
3788                 mp = rxq->tpa_info[agg_index].mpl;
3789                 mp->m_len = ha->rx_buf_size;
3790                 mp->m_next = mpf;
3791                 rxq->tpa_info[agg_index].mpl = mpl;
3792         }
3793
3794         return;
3795 }
3796
3797 static int
3798 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3799         struct qlnx_rx_queue *rxq,
3800         struct eth_fast_path_rx_tpa_end_cqe *cqe)
3801 {
3802         struct sw_rx_data       *sw_rx_data;
3803         int                     i;
3804         struct mbuf             *mpf = NULL, *mpl = NULL, *mpc = NULL;
3805         struct mbuf             *mp;
3806         uint32_t                agg_index;
3807         uint32_t                len = 0;
3808         struct ifnet            *ifp = ha->ifp;
3809         device_t                dev;
3810
3811         dev = ha->pci_dev;
3812
3813         QL_DPRINT7(ha, "[%d]: enter\n \
3814                 \t type = 0x%x\n \
3815                 \t tpa_agg_index = 0x%x\n \
3816                 \t total_packet_len = 0x%x\n \
3817                 \t num_of_bds = 0x%x\n \
3818                 \t end_reason = 0x%x\n \
3819                 \t num_of_coalesced_segs = 0x%x\n \
3820                 \t ts_delta = 0x%x\n \
3821                 \t len_list[0] = 0x%x\n \
3822                 \t len_list[1] = 0x%x\n \
3823                 \t len_list[2] = 0x%x\n \
3824                 \t len_list[3] = 0x%x\n",
3825                  fp->rss_id, cqe->type, cqe->tpa_agg_index,
3826                 cqe->total_packet_len, cqe->num_of_bds,
3827                 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3828                 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3829                 cqe->len_list[3]);
3830
3831         agg_index = cqe->tpa_agg_index;
3832
3833         if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3834
3835                 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
3836
3837                 fp->err_rx_tpa_invalid_agg_num++;
3838                 return (0);
3839         }
3840
3841
3842         for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3843
3844                 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
3845
3846                 if (cqe->len_list[i] == 0)
3847                         break;
3848
3849                 if (rxq->tpa_info[agg_index].agg_state != 
3850                         QLNX_AGG_STATE_START) {
3851
3852                         QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
3853         
3854                         qlnx_reuse_rx_data(rxq);
3855                         continue;
3856                 }
3857
3858                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3859                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3860                         BUS_DMASYNC_POSTREAD);
3861
3862                 mpc = sw_rx_data->data;
3863
3864                 if (mpc == NULL) {
3865
3866                         QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
3867
3868                         fp->err_rx_mp_null++;
3869                         if (mpf != NULL)
3870                                 m_freem(mpf);
3871                         mpf = mpl = NULL;
3872                         rxq->tpa_info[agg_index].agg_state =
3873                                                 QLNX_AGG_STATE_ERROR;
3874                         ecore_chain_consume(&rxq->rx_bd_ring);
3875                         rxq->sw_rx_cons =
3876                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3877                         continue;
3878                 }
3879
3880                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3881                         QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
3882                                 " dropping incoming packet and reusing its"
3883                                 " buffer\n", fp->rss_id);
3884
3885                         qlnx_reuse_rx_data(rxq);
3886
3887                         if (mpf != NULL)
3888                                 m_freem(mpf);
3889                         mpf = mpl = NULL;
3890
3891                         rxq->tpa_info[agg_index].agg_state =
3892                                                 QLNX_AGG_STATE_ERROR;
3893
3894                         ecore_chain_consume(&rxq->rx_bd_ring);
3895                         rxq->sw_rx_cons =
3896                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3897
3898                         continue;
3899                 }
3900
3901                 mpc->m_flags &= ~M_PKTHDR;
3902                 mpc->m_next = NULL;
3903                 mpc->m_len = cqe->len_list[i];
3904
3905
3906                 if (mpf == NULL) {
3907                         mpf = mpl = mpc;
3908                 } else {
3909                         mpl->m_len = ha->rx_buf_size;
3910                         mpl->m_next = mpc;
3911                         mpl = mpc;
3912                 }
3913
3914                 ecore_chain_consume(&rxq->rx_bd_ring);
3915                 rxq->sw_rx_cons =
3916                         (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3917         }
3918
3919         QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
3920
3921         if (mpf != NULL) {
3922
3923                 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
3924
3925                 mp = rxq->tpa_info[agg_index].mpl;
3926                 mp->m_len = ha->rx_buf_size;
3927                 mp->m_next = mpf;
3928         }
3929
3930         if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3931
3932                 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
3933
3934                 if (rxq->tpa_info[agg_index].mpf != NULL)
3935                         m_freem(rxq->tpa_info[agg_index].mpf);
3936                 rxq->tpa_info[agg_index].mpf = NULL;
3937                 rxq->tpa_info[agg_index].mpl = NULL;
3938                 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3939                 return (0);
3940         }
3941
3942         mp = rxq->tpa_info[agg_index].mpf;
3943         m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
3944         mp->m_pkthdr.len = cqe->total_packet_len;
3945
3946         if (mp->m_next  == NULL)
3947                 mp->m_len = mp->m_pkthdr.len;
3948         else {
3949                 /* compute the total packet length */
3950                 mpf = mp;
3951                 while (mpf != NULL) {
3952                         len += mpf->m_len;
3953                         mpf = mpf->m_next;
3954                 }
3955
3956                 if (cqe->total_packet_len > len) {
3957                         mpl = rxq->tpa_info[agg_index].mpl;
3958                         mpl->m_len += (cqe->total_packet_len - len);
3959                 }
3960         }
3961
3962         QLNX_INC_IPACKETS(ifp);
3963         QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3964
3965         QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \
3966                 m_len = 0x%x m_pkthdr_len = 0x%x\n",
3967                 fp->rss_id, mp->m_pkthdr.csum_data,
3968                 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
3969
3970         (*ifp->if_input)(ifp, mp);
3971
3972         rxq->tpa_info[agg_index].mpf = NULL;
3973         rxq->tpa_info[agg_index].mpl = NULL;
3974         rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3975
3976         return (cqe->num_of_coalesced_segs);
3977 }
3978
3979 static int
3980 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
3981         int lro_enable)
3982 {
3983         uint16_t                hw_comp_cons, sw_comp_cons;
3984         int                     rx_pkt = 0;
3985         struct qlnx_rx_queue    *rxq = fp->rxq;
3986         struct ifnet            *ifp = ha->ifp;
3987         struct ecore_dev        *cdev = &ha->cdev;
3988         struct ecore_hwfn       *p_hwfn;
3989
3990 #ifdef QLNX_SOFT_LRO
3991         struct lro_ctrl         *lro;
3992
3993         lro = &rxq->lro;
3994 #endif /* #ifdef QLNX_SOFT_LRO */
3995
3996         hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
3997         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
3998
3999         p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4000
4001         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
4002          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4003          * read before it is written by FW, then FW writes CQE and SB, and then
4004          * the CPU reads the hw_comp_cons, it will use an old CQE.
4005          */
4006
4007         /* Loop to complete all indicated BDs */
4008         while (sw_comp_cons != hw_comp_cons) {
4009                 union eth_rx_cqe                *cqe;
4010                 struct eth_fast_path_rx_reg_cqe *fp_cqe;
4011                 struct sw_rx_data               *sw_rx_data;
4012                 register struct mbuf            *mp;
4013                 enum eth_rx_cqe_type            cqe_type;
4014                 uint16_t                        len, pad, len_on_first_bd;
4015                 uint8_t                         *data;
4016 #if __FreeBSD_version >= 1100000
4017                 uint8_t                         hash_type;
4018 #endif /* #if __FreeBSD_version >= 1100000 */
4019
4020                 /* Get the CQE from the completion ring */
4021                 cqe = (union eth_rx_cqe *)
4022                         ecore_chain_consume(&rxq->rx_comp_ring);
4023                 cqe_type = cqe->fast_path_regular.type;
4024
4025                 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4026                         QL_DPRINT3(ha, "Got a slowath CQE\n");
4027
4028                         ecore_eth_cqe_completion(p_hwfn,
4029                                         (struct eth_slow_path_rx_cqe *)cqe);
4030                         goto next_cqe;
4031                 }
4032
4033                 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4034
4035                         switch (cqe_type) {
4036
4037                         case ETH_RX_CQE_TYPE_TPA_START:
4038                                 qlnx_tpa_start(ha, fp, rxq,
4039                                         &cqe->fast_path_tpa_start);
4040                                 fp->tpa_start++;
4041                                 break;
4042
4043                         case ETH_RX_CQE_TYPE_TPA_CONT:
4044                                 qlnx_tpa_cont(ha, fp, rxq,
4045                                         &cqe->fast_path_tpa_cont);
4046                                 fp->tpa_cont++;
4047                                 break;
4048
4049                         case ETH_RX_CQE_TYPE_TPA_END:
4050                                 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4051                                                 &cqe->fast_path_tpa_end);
4052                                 fp->tpa_end++;
4053                                 break;
4054
4055                         default:
4056                                 break;
4057                         }
4058
4059                         goto next_cqe;
4060                 }
4061
4062                 /* Get the data from the SW ring */
4063                 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4064                 mp = sw_rx_data->data;
4065
4066                 if (mp == NULL) {
4067                         QL_DPRINT1(ha, "mp = NULL\n");
4068                         fp->err_rx_mp_null++;
4069                         rxq->sw_rx_cons  =
4070                                 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4071                         goto next_cqe;
4072                 }
4073                 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4074                         BUS_DMASYNC_POSTREAD);
4075
4076                 /* non GRO */
4077                 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4078                 len =  le16toh(fp_cqe->pkt_len);
4079                 pad = fp_cqe->placement_offset;
4080
4081                 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4082                         " len %u, parsing flags = %d pad  = %d\n",
4083                         cqe_type, fp_cqe->bitfields,
4084                         le16toh(fp_cqe->vlan_tag),
4085                         len, le16toh(fp_cqe->pars_flags.flags), pad);
4086
4087                 data = mtod(mp, uint8_t *);
4088                 data = data + pad;
4089
4090                 if (0)
4091                         qlnx_dump_buf8(ha, __func__, data, len);
4092
4093                 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4094                  * is always with a fixed size. If allocation fails, we take the
4095                  * consumed BD and return it to the ring in the PROD position.
4096                  * The packet that was received on that BD will be dropped (and
4097                  * not passed to the upper stack).
4098                  */
4099                 /* If this is an error packet then drop it */
4100                 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4101                         CQE_FLAGS_ERR) {
4102
4103                         QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4104                                 " dropping incoming packet\n", sw_comp_cons,
4105                         le16toh(cqe->fast_path_regular.pars_flags.flags));
4106                         fp->err_rx_hw_errors++;
4107
4108                         qlnx_reuse_rx_data(rxq);
4109
4110                         QLNX_INC_IERRORS(ifp);
4111
4112                         goto next_cqe;
4113                 }
4114
4115                 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4116
4117                         QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4118                                 " incoming packet and reusing its buffer\n");
4119                         qlnx_reuse_rx_data(rxq);
4120
4121                         fp->err_rx_alloc_errors++;
4122
4123                         QLNX_INC_IQDROPS(ifp);
4124
4125                         goto next_cqe;
4126                 }
4127
4128                 ecore_chain_consume(&rxq->rx_bd_ring);
4129
4130                 len_on_first_bd = fp_cqe->len_on_first_bd;
4131                 m_adj(mp, pad);
4132                 mp->m_pkthdr.len = len;
4133
4134                 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n",
4135                            len, len_on_first_bd);
4136                 if ((len > 60 ) && (len > len_on_first_bd)) {
4137
4138                         mp->m_len = len_on_first_bd;
4139
4140                         if (qlnx_rx_jumbo_chain(ha, fp, mp,
4141                                 (len - len_on_first_bd)) != 0) {
4142
4143                                 m_freem(mp);
4144
4145                                 QLNX_INC_IQDROPS(ifp);
4146
4147                                 goto next_cqe;
4148                         }
4149
4150                 } else if (len_on_first_bd < len) {
4151                         fp->err_rx_jumbo_chain_pkts++;
4152                 } else {
4153                         mp->m_len = len;
4154                 }
4155
4156                 mp->m_flags |= M_PKTHDR;
4157
4158                 /* assign packet to this interface interface */
4159                 mp->m_pkthdr.rcvif = ifp;
4160
4161                 /* assume no hardware checksum has complated */
4162                 mp->m_pkthdr.csum_flags = 0;
4163
4164                 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4165
4166 #if __FreeBSD_version >= 1100000
4167
4168                 hash_type = fp_cqe->bitfields &
4169                                 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK <<
4170                                 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT);
4171
4172                 switch (hash_type) {
4173
4174                 case RSS_HASH_TYPE_IPV4:
4175                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4);
4176                         break;
4177
4178                 case RSS_HASH_TYPE_TCP_IPV4:
4179                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4);
4180                         break;
4181
4182                 case RSS_HASH_TYPE_IPV6:
4183                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6);
4184                         break;
4185
4186                 case RSS_HASH_TYPE_TCP_IPV6:
4187                         M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6);
4188                         break;
4189
4190                 default:
4191                         M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
4192                         break;
4193                 }
4194
4195 #else
4196                 mp->m_flags |= M_FLOWID;
4197 #endif
4198
4199                 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4200                         mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4201                 }
4202
4203                 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4204                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4205                 }
4206
4207                 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4208                         mp->m_pkthdr.csum_data = 0xFFFF;
4209                         mp->m_pkthdr.csum_flags |=
4210                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4211                 }
4212
4213                 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4214                         mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4215                         mp->m_flags |= M_VLANTAG;
4216                 }
4217
4218                 QLNX_INC_IPACKETS(ifp);
4219                 QLNX_INC_IBYTES(ifp, len);
4220
4221 #ifdef QLNX_SOFT_LRO
4222
4223                 if (lro_enable) {
4224
4225 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
4226
4227                         tcp_lro_queue_mbuf(lro, mp);
4228
4229 #else
4230
4231                         if (tcp_lro_rx(lro, mp, 0))
4232                                 (*ifp->if_input)(ifp, mp);
4233
4234 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
4235
4236                 } else {
4237                         (*ifp->if_input)(ifp, mp);
4238                 }
4239 #else
4240
4241                 (*ifp->if_input)(ifp, mp);
4242
4243 #endif /* #ifdef QLNX_SOFT_LRO */
4244
4245                 rx_pkt++;
4246
4247                 rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4248
4249 next_cqe:       /* don't consume bd rx buffer */
4250                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4251                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4252
4253                 /* CR TPA - revisit how to handle budget in TPA perhaps
4254                    increase on "end" */
4255                 if (rx_pkt == budget)
4256                         break;
4257         } /* repeat while sw_comp_cons != hw_comp_cons... */
4258
4259         /* Update producers */
4260         qlnx_update_rx_prod(p_hwfn, rxq);
4261
4262         return rx_pkt;
4263 }
4264
4265 /*
4266  * fast path interrupt
4267  */
4268
4269 static void
4270 qlnx_fp_isr(void *arg)
4271 {
4272         qlnx_ivec_t             *ivec = arg;
4273         qlnx_host_t             *ha;
4274         struct qlnx_fastpath    *fp = NULL;
4275         int                     idx;
4276
4277         ha = ivec->ha;
4278
4279         if (ha->state != QLNX_STATE_OPEN) {
4280                 return;
4281         }
4282
4283         idx = ivec->rss_idx;
4284
4285         if ((idx = ivec->rss_idx) >= ha->num_rss) {
4286                 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4287                 ha->err_illegal_intr++;
4288                 return;
4289         }
4290         fp = &ha->fp_array[idx];
4291
4292         if (fp == NULL) {
4293                 ha->err_fp_null++;
4294         } else {
4295                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4296                 if (fp->fp_taskqueue != NULL)
4297                         taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4298         }
4299
4300         return;
4301 }
4302
4303
4304 /*
4305  * slow path interrupt processing function
4306  * can be invoked in polled mode or in interrupt mode via taskqueue.
4307  */
4308 void
4309 qlnx_sp_isr(void *arg)
4310 {
4311         struct ecore_hwfn       *p_hwfn;
4312         qlnx_host_t             *ha;
4313         
4314         p_hwfn = arg;
4315
4316         ha = (qlnx_host_t *)p_hwfn->p_dev;
4317
4318         ha->sp_interrupts++;
4319
4320         QL_DPRINT2(ha, "enter\n");
4321
4322         ecore_int_sp_dpc(p_hwfn);
4323
4324         QL_DPRINT2(ha, "exit\n");
4325         
4326         return;
4327 }
4328
4329 /*****************************************************************************
4330  * Support Functions for DMA'able Memory
4331  *****************************************************************************/
4332
4333 static void
4334 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
4335 {
4336         *((bus_addr_t *)arg) = 0;
4337
4338         if (error) {
4339                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
4340                 return;
4341         }
4342
4343         *((bus_addr_t *)arg) = segs[0].ds_addr;
4344
4345         return;
4346 }
4347
4348 static int
4349 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4350 {
4351         int             ret = 0;
4352         device_t        dev;
4353         bus_addr_t      b_addr;
4354
4355         dev = ha->pci_dev;
4356
4357         ret = bus_dma_tag_create(
4358                         ha->parent_tag,/* parent */
4359                         dma_buf->alignment,
4360                         ((bus_size_t)(1ULL << 32)),/* boundary */
4361                         BUS_SPACE_MAXADDR,      /* lowaddr */
4362                         BUS_SPACE_MAXADDR,      /* highaddr */
4363                         NULL, NULL,             /* filter, filterarg */
4364                         dma_buf->size,          /* maxsize */
4365                         1,                      /* nsegments */
4366                         dma_buf->size,          /* maxsegsize */
4367                         0,                      /* flags */
4368                         NULL, NULL,             /* lockfunc, lockarg */
4369                         &dma_buf->dma_tag);
4370
4371         if (ret) {
4372                 QL_DPRINT1(ha, "could not create dma tag\n");
4373                 goto qlnx_alloc_dmabuf_exit;
4374         }
4375         ret = bus_dmamem_alloc(dma_buf->dma_tag,
4376                         (void **)&dma_buf->dma_b,
4377                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4378                         &dma_buf->dma_map);
4379         if (ret) {
4380                 bus_dma_tag_destroy(dma_buf->dma_tag);
4381                 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4382                 goto qlnx_alloc_dmabuf_exit;
4383         }
4384
4385         ret = bus_dmamap_load(dma_buf->dma_tag,
4386                         dma_buf->dma_map,
4387                         dma_buf->dma_b,
4388                         dma_buf->size,
4389                         qlnx_dmamap_callback,
4390                         &b_addr, BUS_DMA_NOWAIT);
4391
4392         if (ret || !b_addr) {
4393                 bus_dma_tag_destroy(dma_buf->dma_tag);
4394                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4395                         dma_buf->dma_map);
4396                 ret = -1;
4397                 goto qlnx_alloc_dmabuf_exit;
4398         }
4399
4400         dma_buf->dma_addr = b_addr;
4401
4402 qlnx_alloc_dmabuf_exit:
4403
4404         return ret;
4405 }
4406
4407 static void
4408 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4409 {
4410         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4411         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4412         bus_dma_tag_destroy(dma_buf->dma_tag);
4413         return;
4414 }
4415
4416 void *
4417 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size)
4418 {
4419         qlnx_dma_t      dma_buf;
4420         qlnx_dma_t      *dma_p;
4421         qlnx_host_t     *ha;
4422         device_t        dev;
4423
4424         ha = (qlnx_host_t *)ecore_dev;
4425         dev = ha->pci_dev;
4426
4427         size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4428
4429         memset(&dma_buf, 0, sizeof (qlnx_dma_t));
4430
4431         dma_buf.size = size + PAGE_SIZE;
4432         dma_buf.alignment = 8;
4433
4434         if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0)
4435                 return (NULL);
4436         bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4437
4438         *phys = dma_buf.dma_addr;
4439
4440         dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4441
4442         memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4443 /*
4444         QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4445                 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4446                 dma_buf.dma_b, (void *)dma_buf.dma_addr, size);
4447 */
4448         return (dma_buf.dma_b);
4449 }
4450
4451 void
4452 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4453         uint32_t size)
4454 {
4455         qlnx_dma_t dma_buf, *dma_p;
4456         qlnx_host_t     *ha;
4457         device_t        dev;
4458
4459         ha = (qlnx_host_t *)ecore_dev;
4460         dev = ha->pci_dev;
4461
4462         if (v_addr == NULL)
4463                 return;
4464
4465         size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4466
4467         dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4468 /*
4469         QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
4470                 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4471                 dma_p->dma_b, (void *)dma_p->dma_addr, size);
4472 */
4473         dma_buf = *dma_p;
4474
4475         qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4476         return;
4477 }
4478
4479 static int
4480 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
4481 {
4482         int             ret;
4483         device_t        dev;
4484
4485         dev = ha->pci_dev;
4486
4487         /*
4488          * Allocate parent DMA Tag
4489          */
4490         ret = bus_dma_tag_create(
4491                         bus_get_dma_tag(dev),   /* parent */
4492                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
4493                         BUS_SPACE_MAXADDR,      /* lowaddr */
4494                         BUS_SPACE_MAXADDR,      /* highaddr */
4495                         NULL, NULL,             /* filter, filterarg */
4496                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4497                         0,                      /* nsegments */
4498                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4499                         0,                      /* flags */
4500                         NULL, NULL,             /* lockfunc, lockarg */
4501                         &ha->parent_tag);
4502
4503         if (ret) {
4504                 QL_DPRINT1(ha, "could not create parent dma tag\n");
4505                 return (-1);
4506         }
4507
4508         ha->flags.parent_tag = 1;
4509
4510         return (0);
4511 }
4512
4513 static void
4514 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
4515 {
4516         if (ha->parent_tag != NULL) {
4517                 bus_dma_tag_destroy(ha->parent_tag);
4518                 ha->parent_tag = NULL;
4519         }
4520         return;
4521 }
4522
4523 static int
4524 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
4525 {
4526         if (bus_dma_tag_create(NULL,    /* parent */
4527                 1, 0,    /* alignment, bounds */
4528                 BUS_SPACE_MAXADDR,       /* lowaddr */
4529                 BUS_SPACE_MAXADDR,       /* highaddr */
4530                 NULL, NULL,      /* filter, filterarg */
4531                 QLNX_MAX_TSO_FRAME_SIZE,     /* maxsize */
4532                 QLNX_MAX_SEGMENTS,        /* nsegments */
4533                 (PAGE_SIZE * 4),        /* maxsegsize */
4534                 BUS_DMA_ALLOCNOW,        /* flags */
4535                 NULL,    /* lockfunc */
4536                 NULL,    /* lockfuncarg */
4537                 &ha->tx_tag)) {
4538
4539                 QL_DPRINT1(ha, "tx_tag alloc failed\n");
4540                 return (-1);
4541         }
4542
4543         return (0);
4544 }
4545
4546 static void
4547 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
4548 {
4549         if (ha->tx_tag != NULL) {
4550                 bus_dma_tag_destroy(ha->tx_tag);
4551                 ha->tx_tag = NULL;
4552         }
4553         return;
4554 }
4555
4556 static int
4557 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
4558 {
4559         if (bus_dma_tag_create(NULL,    /* parent */
4560                         1, 0,    /* alignment, bounds */
4561                         BUS_SPACE_MAXADDR,       /* lowaddr */
4562                         BUS_SPACE_MAXADDR,       /* highaddr */
4563                         NULL, NULL,      /* filter, filterarg */
4564                         MJUM9BYTES,     /* maxsize */
4565                         1,        /* nsegments */
4566                         MJUM9BYTES,        /* maxsegsize */
4567                         BUS_DMA_ALLOCNOW,        /* flags */
4568                         NULL,    /* lockfunc */
4569                         NULL,    /* lockfuncarg */
4570                         &ha->rx_tag)) {
4571
4572                 QL_DPRINT1(ha, " rx_tag alloc failed\n");
4573
4574                 return (-1);
4575         }
4576         return (0);
4577 }
4578
4579 static void
4580 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
4581 {
4582         if (ha->rx_tag != NULL) {
4583                 bus_dma_tag_destroy(ha->rx_tag);
4584                 ha->rx_tag = NULL;
4585         }
4586         return;
4587 }
4588
4589 /*********************************
4590  * Exported functions
4591  *********************************/
4592 uint32_t
4593 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id)
4594 {
4595         uint32_t bar_size;
4596
4597         bar_id = bar_id * 2;
4598
4599         bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
4600                                 SYS_RES_MEMORY,
4601                                 PCIR_BAR(bar_id));
4602
4603         return (bar_size);
4604 }
4605
4606 uint32_t
4607 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value)
4608 {
4609         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4610                                 pci_reg, 1);
4611         return 0;
4612 }
4613
4614 uint32_t
4615 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
4616         uint16_t *reg_value)
4617 {
4618         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4619                                 pci_reg, 2);
4620         return 0;
4621 }
4622
4623 uint32_t
4624 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
4625         uint32_t *reg_value)
4626 {
4627         *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4628                                 pci_reg, 4);
4629         return 0;
4630 }
4631
4632 void
4633 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value)
4634 {
4635         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4636                 pci_reg, reg_value, 1);
4637         return;
4638 }
4639
4640 void
4641 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
4642         uint16_t reg_value)
4643 {
4644         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4645                 pci_reg, reg_value, 2);
4646         return;
4647 }
4648
4649 void
4650 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
4651         uint32_t reg_value)
4652 {
4653         pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
4654                 pci_reg, reg_value, 4);
4655         return;
4656 }
4657
4658
4659 int
4660 qlnx_pci_find_capability(void *ecore_dev, int cap)
4661 {
4662         int             reg;
4663         qlnx_host_t     *ha;
4664
4665         ha = ecore_dev;
4666
4667         if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
4668                 return reg;
4669         else {
4670                 QL_DPRINT1(ha, "failed\n");
4671                 return 0;
4672         }
4673 }
4674
4675 uint32_t
4676 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4677 {
4678         uint32_t                data32;
4679         struct ecore_dev        *cdev;
4680         struct ecore_hwfn       *p_hwfn;
4681
4682         p_hwfn = hwfn;
4683
4684         cdev = p_hwfn->p_dev;
4685
4686         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4687                         (uint8_t *)(cdev->regview)) + reg_addr;
4688
4689         data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr);
4690
4691         return (data32);
4692 }
4693
4694 void
4695 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4696 {
4697         struct ecore_dev        *cdev;
4698         struct ecore_hwfn       *p_hwfn;
4699
4700         p_hwfn = hwfn;
4701
4702         cdev = p_hwfn->p_dev;
4703
4704         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4705                         (uint8_t *)(cdev->regview)) + reg_addr;
4706
4707         bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4708
4709         return;
4710 }
4711
4712 void
4713 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value)
4714 {
4715         struct ecore_dev        *cdev;
4716         struct ecore_hwfn       *p_hwfn;
4717
4718         p_hwfn = hwfn;
4719
4720         cdev = p_hwfn->p_dev;
4721
4722         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) -
4723                         (uint8_t *)(cdev->regview)) + reg_addr;
4724
4725         bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value);
4726
4727         return;
4728 }
4729
4730 void
4731 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value)
4732 {
4733         struct ecore_dev        *cdev;
4734         struct ecore_hwfn       *p_hwfn;
4735
4736         p_hwfn = hwfn;
4737
4738         cdev = p_hwfn->p_dev;
4739
4740         reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) -
4741                         (uint8_t *)(cdev->doorbells)) + reg_addr;
4742
4743         bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value);
4744
4745         return;
4746 }
4747
4748 uint32_t
4749 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr)
4750 {
4751         uint32_t                data32;
4752         uint32_t                offset;
4753         struct ecore_dev        *cdev;
4754
4755         cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4756         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4757
4758         data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
4759
4760         return (data32);
4761 }
4762
4763 void
4764 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
4765 {
4766         uint32_t                offset;
4767         struct ecore_dev        *cdev;
4768
4769         cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4770         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4771
4772         bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4773
4774         return;
4775 }
4776
4777 void
4778 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
4779 {
4780         uint32_t                offset;
4781         struct ecore_dev        *cdev;
4782
4783         cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
4784         offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
4785
4786         bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
4787         return;
4788 }
4789
4790 void *
4791 qlnx_zalloc(uint32_t size)
4792 {
4793         caddr_t va;
4794
4795         va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT);
4796         bzero(va, size);
4797         return ((void *)va);
4798 }
4799
4800 void
4801 qlnx_barrier(void *p_hwfn)
4802 {
4803         qlnx_host_t     *ha;
4804
4805         ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4806         bus_barrier(ha->pci_reg,  0, 0, BUS_SPACE_BARRIER_WRITE);
4807 }
4808
4809 void
4810 qlnx_link_update(void *p_hwfn)
4811 {
4812         qlnx_host_t     *ha;
4813         int             prev_link_state;
4814
4815         ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
4816
4817         qlnx_fill_link(p_hwfn, &ha->if_link);
4818
4819         prev_link_state = ha->link_up;
4820         ha->link_up = ha->if_link.link_up;
4821
4822         if (prev_link_state !=  ha->link_up) {
4823                 if (ha->link_up) {
4824                         if_link_state_change(ha->ifp, LINK_STATE_UP);
4825                 } else {
4826                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
4827                 }
4828         }
4829         return;
4830 }
4831
4832 void
4833 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link)
4834 {
4835         struct ecore_mcp_link_params    link_params;
4836         struct ecore_mcp_link_state     link_state;
4837
4838         memset(if_link, 0, sizeof(*if_link));
4839         memset(&link_params, 0, sizeof(struct ecore_mcp_link_params));
4840         memset(&link_state, 0, sizeof(struct ecore_mcp_link_state));
4841
4842         /* Prepare source inputs */
4843         /* we only deal with physical functions */
4844         memcpy(&link_params, ecore_mcp_get_link_params(hwfn),
4845                 sizeof(link_params));
4846         memcpy(&link_state, ecore_mcp_get_link_state(hwfn),
4847                 sizeof(link_state));
4848
4849         ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type);
4850
4851         /* Set the link parameters to pass to protocol driver */
4852         if (link_state.link_up) {
4853                 if_link->link_up = true;
4854                 if_link->speed = link_state.speed;
4855         }
4856
4857         if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
4858
4859         if (link_params.speed.autoneg)
4860                 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
4861
4862         if (link_params.pause.autoneg ||
4863                 (link_params.pause.forced_rx && link_params.pause.forced_tx))
4864                 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
4865
4866         if (link_params.pause.autoneg || link_params.pause.forced_rx ||
4867                 link_params.pause.forced_tx)
4868                 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
4869
4870         if (link_params.speed.advertised_speeds &
4871                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
4872                 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
4873                                            QLNX_LINK_CAP_1000baseT_Full;
4874
4875         if (link_params.speed.advertised_speeds &
4876                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
4877                 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4878
4879         if (link_params.speed.advertised_speeds &
4880                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
4881                 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4882
4883         if (link_params.speed.advertised_speeds &
4884                 NVM_CFG1_PORT_DRV_LINK_SPEED_40G)
4885                 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4886
4887         if (link_params.speed.advertised_speeds &
4888                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
4889                 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4890
4891         if (link_params.speed.advertised_speeds &
4892                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
4893                 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4894
4895         if_link->advertised_caps = if_link->supported_caps;
4896
4897         if_link->autoneg = link_params.speed.autoneg;
4898         if_link->duplex = QLNX_LINK_DUPLEX;
4899
4900         /* Link partner capabilities */
4901
4902         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD)
4903                 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
4904
4905         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD)
4906                 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
4907
4908         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G)
4909                 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
4910
4911         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G)
4912                 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
4913
4914         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G)
4915                 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
4916
4917         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G)
4918                 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
4919
4920         if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G)
4921                 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
4922
4923         if (link_state.an_complete)
4924                 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
4925
4926         if (link_state.partner_adv_pause)
4927                 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
4928
4929         if ((link_state.partner_adv_pause ==
4930                 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) ||
4931                 (link_state.partner_adv_pause ==
4932                         ECORE_LINK_PARTNER_BOTH_PAUSE))
4933                 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
4934
4935         return;
4936 }
4937
4938 static int
4939 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params)
4940 {
4941         int     rc, i;
4942
4943         for (i = 0; i < cdev->num_hwfns; i++) {
4944                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
4945                 p_hwfn->pf_params = *func_params;
4946         }
4947
4948         rc = ecore_resc_alloc(cdev);
4949         if (rc)
4950                 goto qlnx_nic_setup_exit;
4951
4952         ecore_resc_setup(cdev);
4953
4954 qlnx_nic_setup_exit:
4955
4956         return rc;
4957 }
4958
4959 static int
4960 qlnx_nic_start(struct ecore_dev *cdev)
4961 {
4962         int                             rc;
4963         struct ecore_hw_init_params     params;
4964
4965         bzero(&params, sizeof (struct ecore_hw_init_params));
4966
4967         params.p_tunn = NULL;
4968         params.b_hw_start = true;
4969         params.int_mode = cdev->int_mode;
4970         params.allow_npar_tx_switch = true;
4971         params.bin_fw_data = NULL;
4972
4973         rc = ecore_hw_init(cdev, &params);
4974         if (rc) {
4975                 ecore_resc_free(cdev);
4976                 return rc;
4977         }
4978
4979         return 0;
4980 }
4981
4982 static int
4983 qlnx_slowpath_start(qlnx_host_t *ha)
4984 {
4985         struct ecore_dev        *cdev;
4986         struct ecore_pf_params  pf_params;
4987         int                     rc;
4988
4989         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
4990         pf_params.eth_pf_params.num_cons  =
4991                 (ha->num_rss) * (ha->num_tc + 1);
4992
4993         cdev = &ha->cdev;
4994
4995         rc = qlnx_nic_setup(cdev, &pf_params);
4996         if (rc)
4997                 goto qlnx_slowpath_start_exit;
4998
4999         cdev->int_mode = ECORE_INT_MODE_MSIX;
5000         cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5001
5002 #ifdef QLNX_MAX_COALESCE
5003         cdev->rx_coalesce_usecs = 255;
5004         cdev->tx_coalesce_usecs = 255;
5005 #endif
5006
5007         rc = qlnx_nic_start(cdev);
5008
5009         ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5010         ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5011
5012 qlnx_slowpath_start_exit:
5013
5014         return (rc);
5015 }
5016
5017 static int
5018 qlnx_slowpath_stop(qlnx_host_t *ha)
5019 {
5020         struct ecore_dev        *cdev;
5021         device_t                dev = ha->pci_dev;
5022         int                     i;
5023
5024         cdev = &ha->cdev;
5025
5026         ecore_hw_stop(cdev);
5027
5028         for (i = 0; i < ha->cdev.num_hwfns; i++) {
5029
5030                 if (ha->sp_handle[i])
5031                         (void)bus_teardown_intr(dev, ha->sp_irq[i],
5032                                 ha->sp_handle[i]);
5033
5034                 ha->sp_handle[i] = NULL;
5035
5036                 if (ha->sp_irq[i])
5037                         (void) bus_release_resource(dev, SYS_RES_IRQ,
5038                                 ha->sp_irq_rid[i], ha->sp_irq[i]);
5039                 ha->sp_irq[i] = NULL;
5040         }
5041
5042         ecore_resc_free(cdev);
5043
5044         return 0;
5045 }
5046
5047 static void
5048 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE],
5049         char ver_str[VER_SIZE])
5050 {
5051         int     i;
5052
5053         memcpy(cdev->name, name, NAME_SIZE);
5054
5055         for_each_hwfn(cdev, i) {
5056                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5057         }
5058
5059         cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5060
5061         return ;
5062 }
5063
5064 void
5065 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5066 {
5067         enum ecore_mcp_protocol_type    type;
5068         union ecore_mcp_protocol_stats  *stats;
5069         struct ecore_eth_stats          eth_stats;
5070         qlnx_host_t                     *ha;
5071
5072         ha = cdev;
5073         stats = proto_stats;
5074         type = proto_type;
5075
5076         switch (type) {
5077
5078         case ECORE_MCP_LAN_STATS:
5079                 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5080                 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5081                 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5082                 stats->lan_stats.fcs_err = -1;
5083                 break;
5084
5085         default:
5086                 ha->err_get_proto_invalid_type++;
5087
5088                 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5089                 break;
5090         }
5091         return;
5092 }
5093
5094 static int
5095 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5096 {
5097         struct ecore_hwfn       *p_hwfn;
5098         struct ecore_ptt        *p_ptt;
5099
5100         p_hwfn = &ha->cdev.hwfns[0];
5101         p_ptt = ecore_ptt_acquire(p_hwfn);
5102
5103         if (p_ptt ==  NULL) {
5104                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5105                 return (-1);
5106         }
5107         ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5108         
5109         ecore_ptt_release(p_hwfn, p_ptt);
5110
5111         return (0);
5112 }
5113
5114 static int
5115 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5116 {
5117         struct ecore_hwfn       *p_hwfn;
5118         struct ecore_ptt        *p_ptt;
5119
5120         p_hwfn = &ha->cdev.hwfns[0];
5121         p_ptt = ecore_ptt_acquire(p_hwfn);
5122
5123         if (p_ptt ==  NULL) {
5124                 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5125                 return (-1);
5126         }
5127         ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5128         
5129         ecore_ptt_release(p_hwfn, p_ptt);
5130
5131         return (0);
5132 }
5133
5134 static int
5135 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5136 {
5137         struct ecore_dev        *cdev;
5138
5139         cdev = &ha->cdev;
5140
5141         bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5142         bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5143         bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5144
5145         return 0;
5146 }
5147
5148 static void
5149 qlnx_init_fp(qlnx_host_t *ha)
5150 {
5151         int rss_id, txq_array_index, tc;
5152
5153         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5154
5155                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5156
5157                 fp->rss_id = rss_id;
5158                 fp->edev = ha;
5159                 fp->sb_info = &ha->sb_array[rss_id];
5160                 fp->rxq = &ha->rxq_array[rss_id];
5161                 fp->rxq->rxq_id = rss_id;
5162
5163                 for (tc = 0; tc < ha->num_tc; tc++) {
5164                         txq_array_index = tc * ha->num_rss + rss_id;
5165                         fp->txq[tc] = &ha->txq_array[txq_array_index];
5166                         fp->txq[tc]->index = txq_array_index;
5167                 }
5168
5169                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5170                         rss_id);
5171
5172                 fp->tx_ring_full = 0;
5173
5174                 /* reset all the statistics counters */
5175
5176                 fp->tx_pkts_processed = 0;
5177                 fp->tx_pkts_freed = 0;
5178                 fp->tx_pkts_transmitted = 0;
5179                 fp->tx_pkts_completed = 0;
5180                 fp->tx_lso_wnd_min_len = 0;
5181                 fp->tx_defrag = 0;
5182                 fp->tx_nsegs_gt_elem_left = 0;
5183                 fp->tx_tso_max_nsegs = 0;
5184                 fp->tx_tso_min_nsegs = 0;
5185                 fp->err_tx_nsegs_gt_elem_left = 0;
5186                 fp->err_tx_dmamap_create = 0;
5187                 fp->err_tx_defrag_dmamap_load = 0;
5188                 fp->err_tx_non_tso_max_seg = 0;
5189                 fp->err_tx_dmamap_load = 0;
5190                 fp->err_tx_defrag = 0;
5191                 fp->err_tx_free_pkt_null = 0;
5192                 fp->err_tx_cons_idx_conflict = 0;
5193
5194                 fp->rx_pkts = 0;
5195                 fp->err_m_getcl = 0;
5196                 fp->err_m_getjcl = 0;
5197         }
5198         return;
5199 }
5200
5201 static void
5202 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5203 {
5204         struct ecore_dev        *cdev;
5205
5206         cdev = &ha->cdev;
5207
5208         if (sb_info->sb_virt) {
5209                 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5210                         (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5211                 sb_info->sb_virt = NULL;
5212         }
5213 }
5214
5215 static int
5216 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
5217         void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id)
5218 {
5219         struct ecore_hwfn       *p_hwfn;
5220         int                     hwfn_index, rc;
5221         u16                     rel_sb_id;
5222
5223         hwfn_index = sb_id % cdev->num_hwfns;
5224         p_hwfn = &cdev->hwfns[hwfn_index];
5225         rel_sb_id = sb_id / cdev->num_hwfns;
5226
5227         QL_DPRINT2(((qlnx_host_t *)cdev), 
5228                 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \
5229                 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5230                 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5231                 sb_virt_addr, (void *)sb_phy_addr);
5232
5233         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5234                              sb_virt_addr, sb_phy_addr, rel_sb_id);
5235
5236         return rc;
5237 }
5238
5239 /* This function allocates fast-path status block memory */
5240 static int
5241 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5242 {
5243         struct status_block_e4  *sb_virt;
5244         bus_addr_t              sb_phys;
5245         int                     rc;
5246         uint32_t                size;
5247         struct ecore_dev        *cdev;
5248
5249         cdev = &ha->cdev;
5250
5251         size = sizeof(*sb_virt);
5252         sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5253
5254         if (!sb_virt) {
5255                 QL_DPRINT1(ha, "Status block allocation failed\n");
5256                 return -ENOMEM;
5257         }
5258
5259         rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5260         if (rc) {
5261                 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5262         }
5263
5264         return rc;
5265 }
5266
5267 static void
5268 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5269 {
5270         int                     i;
5271         struct sw_rx_data       *rx_buf;
5272
5273         for (i = 0; i < rxq->num_rx_buffers; i++) {
5274
5275                 rx_buf = &rxq->sw_rx_ring[i];
5276
5277                 if (rx_buf->data != NULL) {
5278                         if (rx_buf->map != NULL) {
5279                                 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5280                                 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5281                                 rx_buf->map = NULL;
5282                         }
5283                         m_freem(rx_buf->data);
5284                         rx_buf->data = NULL;
5285                 }
5286         }
5287         return;
5288 }
5289
5290 static void
5291 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5292 {
5293         struct ecore_dev        *cdev;
5294         int                     i;
5295
5296         cdev = &ha->cdev;
5297
5298         qlnx_free_rx_buffers(ha, rxq);
5299
5300         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5301                 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5302                 if (rxq->tpa_info[i].mpf != NULL)
5303                         m_freem(rxq->tpa_info[i].mpf);
5304         }
5305
5306         bzero((void *)&rxq->sw_rx_ring[0],
5307                 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5308
5309         /* Free the real RQ ring used by FW */
5310         if (rxq->rx_bd_ring.p_virt_addr) {
5311                 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5312                 rxq->rx_bd_ring.p_virt_addr = NULL;
5313         }
5314
5315         /* Free the real completion ring used by FW */
5316         if (rxq->rx_comp_ring.p_virt_addr &&
5317                         rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5318                 ecore_chain_free(cdev, &rxq->rx_comp_ring);
5319                 rxq->rx_comp_ring.p_virt_addr = NULL;
5320                 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
5321         }
5322
5323 #ifdef QLNX_SOFT_LRO
5324         {
5325                 struct lro_ctrl *lro;
5326
5327                 lro = &rxq->lro;
5328                 tcp_lro_free(lro);
5329         }
5330 #endif /* #ifdef QLNX_SOFT_LRO */
5331
5332         return;
5333 }
5334
5335 static int
5336 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5337 {
5338         register struct mbuf    *mp;
5339         uint16_t                rx_buf_size;
5340         struct sw_rx_data       *sw_rx_data;
5341         struct eth_rx_bd        *rx_bd;
5342         dma_addr_t              dma_addr;
5343         bus_dmamap_t            map;
5344         bus_dma_segment_t       segs[1];
5345         int                     nsegs;
5346         int                     ret;
5347         struct ecore_dev        *cdev;
5348
5349         cdev = &ha->cdev;
5350
5351         rx_buf_size = rxq->rx_buf_size;
5352
5353         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5354
5355         if (mp == NULL) {
5356                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5357                 return -ENOMEM;
5358         }
5359
5360         mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5361
5362         map = (bus_dmamap_t)0;
5363
5364         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5365                         BUS_DMA_NOWAIT);
5366         dma_addr = segs[0].ds_addr;
5367
5368         if (ret || !dma_addr || (nsegs != 1)) {
5369                 m_freem(mp);
5370                 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5371                            ret, (long long unsigned int)dma_addr, nsegs);
5372                 return -ENOMEM;
5373         }
5374
5375         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5376         sw_rx_data->data = mp;
5377         sw_rx_data->dma_addr = dma_addr;
5378         sw_rx_data->map = map;
5379
5380         /* Advance PROD and get BD pointer */
5381         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
5382         rx_bd->addr.hi = htole32(U64_HI(dma_addr));
5383         rx_bd->addr.lo = htole32(U64_LO(dma_addr));
5384         bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5385
5386         rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5387
5388         return 0;
5389 }
5390
5391 static int
5392 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
5393         struct qlnx_agg_info *tpa)
5394 {
5395         struct mbuf             *mp;
5396         dma_addr_t              dma_addr;
5397         bus_dmamap_t            map;
5398         bus_dma_segment_t       segs[1];
5399         int                     nsegs;
5400         int                     ret;
5401         struct sw_rx_data       *rx_buf;
5402
5403         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5404
5405         if (mp == NULL) {
5406                 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
5407                 return -ENOMEM;
5408         }
5409
5410         mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5411
5412         map = (bus_dmamap_t)0;
5413
5414         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5415                         BUS_DMA_NOWAIT);
5416         dma_addr = segs[0].ds_addr;
5417
5418         if (ret || !dma_addr || (nsegs != 1)) {
5419                 m_freem(mp);
5420                 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5421                         ret, (long long unsigned int)dma_addr, nsegs);
5422                 return -ENOMEM;
5423         }
5424
5425         rx_buf = &tpa->rx_buf;
5426
5427         memset(rx_buf, 0, sizeof (struct sw_rx_data));
5428
5429         rx_buf->data = mp;
5430         rx_buf->dma_addr = dma_addr;
5431         rx_buf->map = map;
5432
5433         bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
5434
5435         return (0);
5436 }
5437
5438 static void
5439 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
5440 {
5441         struct sw_rx_data       *rx_buf;
5442
5443         rx_buf = &tpa->rx_buf;
5444
5445         if (rx_buf->data != NULL) {
5446                 if (rx_buf->map != NULL) {
5447                         bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5448                         bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5449                         rx_buf->map = NULL;
5450                 }
5451                 m_freem(rx_buf->data);
5452                 rx_buf->data = NULL;
5453         }
5454         return;
5455 }
5456
5457 /* This function allocates all memory needed per Rx queue */
5458 static int
5459 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5460 {
5461         int                     i, rc, num_allocated;
5462         struct ifnet            *ifp;
5463         struct ecore_dev         *cdev;
5464
5465         cdev = &ha->cdev;
5466         ifp = ha->ifp;
5467
5468         rxq->num_rx_buffers = RX_RING_SIZE;
5469
5470         rxq->rx_buf_size = ha->rx_buf_size;
5471
5472         /* Allocate the parallel driver ring for Rx buffers */
5473         bzero((void *)&rxq->sw_rx_ring[0],
5474                 (sizeof (struct sw_rx_data) * RX_RING_SIZE));
5475
5476         /* Allocate FW Rx ring  */
5477
5478         rc = ecore_chain_alloc(cdev,
5479                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5480                         ECORE_CHAIN_MODE_NEXT_PTR,
5481                         ECORE_CHAIN_CNT_TYPE_U16,
5482                         RX_RING_SIZE,
5483                         sizeof(struct eth_rx_bd),
5484                         &rxq->rx_bd_ring, NULL);
5485
5486         if (rc)
5487                 goto err;
5488
5489         /* Allocate FW completion ring */
5490         rc = ecore_chain_alloc(cdev,
5491                         ECORE_CHAIN_USE_TO_CONSUME,
5492                         ECORE_CHAIN_MODE_PBL,
5493                         ECORE_CHAIN_CNT_TYPE_U16,
5494                         RX_RING_SIZE,
5495                         sizeof(union eth_rx_cqe),
5496                         &rxq->rx_comp_ring, NULL);
5497
5498         if (rc)
5499                 goto err;
5500
5501         /* Allocate buffers for the Rx ring */
5502
5503         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
5504                 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
5505                         &rxq->tpa_info[i]);
5506                 if (rc)
5507                         break;
5508
5509         }
5510
5511         for (i = 0; i < rxq->num_rx_buffers; i++) {
5512                 rc = qlnx_alloc_rx_buffer(ha, rxq);
5513                 if (rc)
5514                         break;
5515         }
5516         num_allocated = i;
5517         if (!num_allocated) {
5518                 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
5519                 goto err;
5520         } else if (num_allocated < rxq->num_rx_buffers) {
5521                 QL_DPRINT1(ha, "Allocated less buffers than"
5522                         " desired (%d allocated)\n", num_allocated);
5523         }
5524
5525 #ifdef QLNX_SOFT_LRO
5526
5527         {
5528                 struct lro_ctrl *lro;
5529
5530                 lro = &rxq->lro;
5531
5532 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5533                 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5534                         QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5535                                    rxq->rxq_id);
5536                         goto err;
5537                 }
5538 #else
5539                 if (tcp_lro_init(lro)) {
5540                         QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
5541                                    rxq->rxq_id);
5542                         goto err;
5543                 }
5544 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5545
5546                 lro->ifp = ha->ifp;
5547         }
5548 #endif /* #ifdef QLNX_SOFT_LRO */
5549         return 0;
5550
5551 err:
5552         qlnx_free_mem_rxq(ha, rxq);
5553         return -ENOMEM;
5554 }
5555
5556
5557 static void
5558 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
5559         struct qlnx_tx_queue *txq)
5560 {
5561         struct ecore_dev        *cdev;
5562
5563         cdev = &ha->cdev;
5564
5565         bzero((void *)&txq->sw_tx_ring[0],
5566                 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5567
5568         /* Free the real RQ ring used by FW */
5569         if (txq->tx_pbl.p_virt_addr) {
5570                 ecore_chain_free(cdev, &txq->tx_pbl);
5571                 txq->tx_pbl.p_virt_addr = NULL;
5572         }
5573         return;
5574 }
5575
5576 /* This function allocates all memory needed per Tx queue */
5577 static int
5578 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 
5579         struct qlnx_tx_queue *txq)
5580 {
5581         int                     ret = ECORE_SUCCESS;
5582         union eth_tx_bd_types   *p_virt;
5583         struct ecore_dev        *cdev;
5584
5585         cdev = &ha->cdev;
5586
5587         bzero((void *)&txq->sw_tx_ring[0],
5588                 (sizeof (struct sw_tx_bd) * TX_RING_SIZE));
5589
5590         /* Allocate the real Tx ring to be used by FW */
5591         ret = ecore_chain_alloc(cdev,
5592                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
5593                         ECORE_CHAIN_MODE_PBL,
5594                         ECORE_CHAIN_CNT_TYPE_U16,
5595                         TX_RING_SIZE,
5596                         sizeof(*p_virt),
5597                         &txq->tx_pbl, NULL);
5598
5599         if (ret != ECORE_SUCCESS) {
5600                 goto err;
5601         }
5602
5603         txq->num_tx_buffers = TX_RING_SIZE;
5604
5605         return 0;
5606
5607 err:
5608         qlnx_free_mem_txq(ha, fp, txq);
5609         return -ENOMEM;
5610 }
5611
5612 static void
5613 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5614 {
5615         struct mbuf     *mp;
5616         struct ifnet    *ifp = ha->ifp;
5617
5618         if (mtx_initialized(&fp->tx_mtx)) {
5619
5620                 if (fp->tx_br != NULL) {
5621
5622                         mtx_lock(&fp->tx_mtx);
5623
5624                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
5625                                 fp->tx_pkts_freed++;
5626                                 m_freem(mp);
5627                         }
5628
5629                         mtx_unlock(&fp->tx_mtx);
5630
5631                         buf_ring_free(fp->tx_br, M_DEVBUF);
5632                         fp->tx_br = NULL;
5633                 }
5634                 mtx_destroy(&fp->tx_mtx);
5635         }
5636         return;
5637 }
5638
5639 static void
5640 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5641 {
5642         int     tc;
5643
5644         qlnx_free_mem_sb(ha, fp->sb_info);
5645
5646         qlnx_free_mem_rxq(ha, fp->rxq);
5647
5648         for (tc = 0; tc < ha->num_tc; tc++)
5649                 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
5650
5651         return;
5652 }
5653
5654 static int
5655 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5656 {
5657         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5658                 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5659
5660         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5661
5662         fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5663                                    M_NOWAIT, &fp->tx_mtx);
5664         if (fp->tx_br == NULL) {
5665                 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
5666                         ha->dev_unit, fp->rss_id);
5667                 return -ENOMEM;
5668         }
5669         return 0;
5670 }
5671
5672 static int
5673 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5674 {
5675         int     rc, tc;
5676
5677         rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
5678         if (rc)
5679                 goto err;
5680
5681         if (ha->rx_jumbo_buf_eq_mtu) {
5682                 if (ha->max_frame_size <= MCLBYTES)
5683                         ha->rx_buf_size = MCLBYTES;
5684                 else if (ha->max_frame_size <= MJUMPAGESIZE)
5685                         ha->rx_buf_size = MJUMPAGESIZE;
5686                 else if (ha->max_frame_size <= MJUM9BYTES)
5687                         ha->rx_buf_size = MJUM9BYTES;
5688                 else if (ha->max_frame_size <= MJUM16BYTES)
5689                         ha->rx_buf_size = MJUM16BYTES;
5690         } else {
5691                 if (ha->max_frame_size <= MCLBYTES)
5692                         ha->rx_buf_size = MCLBYTES;
5693                 else
5694                         ha->rx_buf_size = MJUMPAGESIZE;
5695         }
5696
5697         rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
5698         if (rc)
5699                 goto err;
5700
5701         for (tc = 0; tc < ha->num_tc; tc++) {
5702                 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
5703                 if (rc)
5704                         goto err;
5705         }
5706
5707         return 0;
5708
5709 err:
5710         qlnx_free_mem_fp(ha, fp);
5711         return -ENOMEM;
5712 }
5713
5714 static void
5715 qlnx_free_mem_load(qlnx_host_t *ha)
5716 {
5717         int                     i;
5718         struct ecore_dev        *cdev;
5719
5720         cdev = &ha->cdev;
5721
5722         for (i = 0; i < ha->num_rss; i++) {
5723                 struct qlnx_fastpath *fp = &ha->fp_array[i];
5724
5725                 qlnx_free_mem_fp(ha, fp);
5726         }
5727         return;
5728 }
5729
5730 static int
5731 qlnx_alloc_mem_load(qlnx_host_t *ha)
5732 {
5733         int     rc = 0, rss_id;
5734
5735         for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5736                 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5737
5738                 rc = qlnx_alloc_mem_fp(ha, fp);
5739                 if (rc)
5740                         break;
5741         }
5742         return (rc);
5743 }
5744
5745 static int
5746 qlnx_start_vport(struct ecore_dev *cdev,
5747                 u8 vport_id,
5748                 u16 mtu,
5749                 u8 drop_ttl0_flg,
5750                 u8 inner_vlan_removal_en_flg,
5751                 u8 tx_switching,
5752                 u8 hw_lro_enable)
5753 {
5754         int                                     rc, i;
5755         struct ecore_sp_vport_start_params      vport_start_params = { 0 };
5756         qlnx_host_t                             *ha;
5757
5758         ha = (qlnx_host_t *)cdev;
5759
5760         vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg;
5761         vport_start_params.tx_switching = 0;
5762         vport_start_params.handle_ptp_pkts = 0;
5763         vport_start_params.only_untagged = 0;
5764         vport_start_params.drop_ttl0 = drop_ttl0_flg;
5765
5766         vport_start_params.tpa_mode =
5767                 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5768         vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5769
5770         vport_start_params.vport_id = vport_id;
5771         vport_start_params.mtu = mtu;
5772
5773
5774         QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
5775
5776         for_each_hwfn(cdev, i) {
5777                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5778
5779                 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5780                 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5781
5782                 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5783
5784                 if (rc) {
5785                         QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
5786                                 " with MTU %d\n" , vport_id, mtu);
5787                         return -ENOMEM;
5788                 }
5789
5790                 ecore_hw_start_fastpath(p_hwfn);
5791
5792                 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
5793                         vport_id, mtu);
5794         }
5795         return 0;
5796 }
5797
5798
5799 static int
5800 qlnx_update_vport(struct ecore_dev *cdev,
5801         struct qlnx_update_vport_params *params)
5802 {
5803         struct ecore_sp_vport_update_params     sp_params;
5804         int                                     rc, i, j, fp_index;
5805         struct ecore_hwfn                       *p_hwfn;
5806         struct ecore_rss_params                 *rss;
5807         qlnx_host_t                             *ha = (qlnx_host_t *)cdev;
5808         struct qlnx_fastpath                    *fp;
5809
5810         memset(&sp_params, 0, sizeof(sp_params));
5811         /* Translate protocol params into sp params */
5812         sp_params.vport_id = params->vport_id;
5813
5814         sp_params.update_vport_active_rx_flg =
5815                 params->update_vport_active_rx_flg;
5816         sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
5817
5818         sp_params.update_vport_active_tx_flg =
5819                 params->update_vport_active_tx_flg;
5820         sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
5821
5822         sp_params.update_inner_vlan_removal_flg =
5823                 params->update_inner_vlan_removal_flg;
5824         sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5825
5826         sp_params.sge_tpa_params = params->sge_tpa_params;
5827
5828         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5829          * We need to re-fix the rss values per engine for CMT.
5830          */
5831         if (params->rss_params->update_rss_config)
5832         sp_params.rss_params = params->rss_params;
5833         else
5834                 sp_params.rss_params =  NULL;
5835
5836         for_each_hwfn(cdev, i) {
5837
5838                 p_hwfn = &cdev->hwfns[i];
5839
5840                 if ((cdev->num_hwfns > 1) &&
5841                         params->rss_params->update_rss_config &&
5842                         params->rss_params->rss_enable) {
5843
5844                         rss = params->rss_params;
5845
5846                         for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) {
5847
5848                                 fp_index = ((cdev->num_hwfns * j) + i) %
5849                                                 ha->num_rss;
5850
5851                                 fp = &ha->fp_array[fp_index];
5852                                 rss->rss_ind_table[j] = fp->rxq->handle;
5853                         }
5854
5855                         for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5856                                 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
5857                                         rss->rss_ind_table[j],
5858                                         rss->rss_ind_table[j+1],
5859                                         rss->rss_ind_table[j+2],
5860                                         rss->rss_ind_table[j+3],
5861                                         rss->rss_ind_table[j+4],
5862                                         rss->rss_ind_table[j+5],
5863                                         rss->rss_ind_table[j+6],
5864                                         rss->rss_ind_table[j+7]);
5865                                         j += 8;
5866                         }
5867                 }
5868
5869                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5870
5871                 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
5872
5873                 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5874                                            ECORE_SPQ_MODE_EBLOCK, NULL);
5875                 if (rc) {
5876                         QL_DPRINT1(ha, "Failed to update VPORT\n");
5877                         return rc;
5878                 }
5879
5880                 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
5881                         rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5882                         params->vport_id, params->vport_active_tx_flg,
5883                         params->vport_active_rx_flg,
5884                         params->update_vport_active_tx_flg,
5885                         params->update_vport_active_rx_flg);
5886         }
5887
5888         return 0;
5889 }
5890
5891 static void
5892 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5893 {
5894         struct eth_rx_bd        *rx_bd_cons =
5895                                         ecore_chain_consume(&rxq->rx_bd_ring);
5896         struct eth_rx_bd        *rx_bd_prod =
5897                                         ecore_chain_produce(&rxq->rx_bd_ring);
5898         struct sw_rx_data       *sw_rx_data_cons =
5899                                         &rxq->sw_rx_ring[rxq->sw_rx_cons];
5900         struct sw_rx_data       *sw_rx_data_prod =
5901                                         &rxq->sw_rx_ring[rxq->sw_rx_prod];
5902
5903         sw_rx_data_prod->data = sw_rx_data_cons->data;
5904         memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
5905
5906         rxq->sw_rx_cons  = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
5907         rxq->sw_rx_prod  = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
5908
5909         return;
5910 }
5911
5912 static void
5913 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
5914 {
5915
5916         uint16_t                bd_prod;
5917         uint16_t                cqe_prod;
5918         union {
5919                 struct eth_rx_prod_data rx_prod_data;
5920                 uint32_t                data32;
5921         } rx_prods;
5922
5923         bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
5924         cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
5925
5926         /* Update producers */
5927         rx_prods.rx_prod_data.bd_prod = htole16(bd_prod);
5928         rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod);
5929
5930         /* Make sure that the BD and SGE data is updated before updating the
5931          * producers since FW might read the BD/SGE right after the producer
5932          * is updated.
5933          */
5934         wmb();
5935
5936         internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
5937                 sizeof(rx_prods), &rx_prods.data32);
5938
5939         /* mmiowb is needed to synchronize doorbell writes from more than one
5940          * processor. It guarantees that the write arrives to the device before
5941          * the napi lock is released and another qlnx_poll is called (possibly
5942          * on another CPU). Without this barrier, the next doorbell can bypass
5943          * this doorbell. This is applicable to IA64/Altix systems.
5944          */
5945         wmb();
5946
5947         return;
5948 }
5949
5950 static uint32_t qlnx_hash_key[] = {
5951                 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda),
5952                 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2),
5953                 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d),
5954                 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0),
5955                 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb),
5956                 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4),
5957                 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3),
5958                 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c),
5959                 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b),
5960                 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)};
5961
5962 static int
5963 qlnx_start_queues(qlnx_host_t *ha)
5964 {
5965         int                             rc, tc, i, vport_id = 0,
5966                                         drop_ttl0_flg = 1, vlan_removal_en = 1,
5967                                         tx_switching = 0, hw_lro_enable = 0;
5968         struct ecore_dev                *cdev = &ha->cdev;
5969         struct ecore_rss_params         *rss_params = &ha->rss_params;
5970         struct qlnx_update_vport_params vport_update_params;
5971         struct ifnet                    *ifp;
5972         struct ecore_hwfn               *p_hwfn;
5973         struct ecore_sge_tpa_params     tpa_params;
5974         struct ecore_queue_start_common_params qparams;
5975         struct qlnx_fastpath            *fp;
5976
5977         ifp = ha->ifp;
5978
5979         QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
5980
5981         if (!ha->num_rss) {
5982                 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
5983                         " are no Rx queues\n");
5984                 return -EINVAL;
5985         }
5986
5987 #ifndef QLNX_SOFT_LRO
5988         hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
5989 #endif /* #ifndef QLNX_SOFT_LRO */
5990
5991         rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
5992                         vlan_removal_en, tx_switching, hw_lro_enable);
5993
5994         if (rc) {
5995                 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
5996                 return rc;
5997         }
5998
5999         QL_DPRINT2(ha, "Start vport ramrod passed, "
6000                 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
6001                 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en);
6002
6003         for_each_rss(i) {
6004                 struct ecore_rxq_start_ret_params rx_ret_params;
6005                 struct ecore_txq_start_ret_params tx_ret_params;
6006
6007                 fp = &ha->fp_array[i];
6008                 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6009
6010                 bzero(&qparams, sizeof(struct ecore_queue_start_common_params));
6011                 bzero(&rx_ret_params,
6012                         sizeof (struct ecore_rxq_start_ret_params));
6013
6014                 qparams.queue_id = i ;
6015                 qparams.vport_id = vport_id;
6016                 qparams.stats_id = vport_id;
6017                 qparams.p_sb = fp->sb_info;
6018                 qparams.sb_idx = RX_PI;
6019                 
6020
6021                 rc = ecore_eth_rx_queue_start(p_hwfn,
6022                         p_hwfn->hw_info.opaque_fid,
6023                         &qparams,
6024                         fp->rxq->rx_buf_size,   /* bd_max_bytes */
6025                         /* bd_chain_phys_addr */
6026                         fp->rxq->rx_bd_ring.p_phys_addr,
6027                         /* cqe_pbl_addr */
6028                         ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6029                         /* cqe_pbl_size */
6030                         ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6031                         &rx_ret_params);
6032
6033                 if (rc) {
6034                         QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6035                         return rc;
6036                 }
6037
6038                 fp->rxq->hw_rxq_prod_addr       = rx_ret_params.p_prod;
6039                 fp->rxq->handle                 = rx_ret_params.p_handle;
6040                 fp->rxq->hw_cons_ptr            =
6041                                 &fp->sb_info->sb_virt->pi_array[RX_PI];
6042
6043                 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6044
6045                 for (tc = 0; tc < ha->num_tc; tc++) {
6046                         struct qlnx_tx_queue *txq = fp->txq[tc];
6047                 
6048                         bzero(&qparams,
6049                                 sizeof(struct ecore_queue_start_common_params));
6050                         bzero(&tx_ret_params,
6051                                 sizeof (struct ecore_txq_start_ret_params));
6052
6053                         qparams.queue_id = txq->index / cdev->num_hwfns ;
6054                         qparams.vport_id = vport_id;
6055                         qparams.stats_id = vport_id;
6056                         qparams.p_sb = fp->sb_info;
6057                         qparams.sb_idx = TX_PI(tc);
6058
6059                         rc = ecore_eth_tx_queue_start(p_hwfn,
6060                                 p_hwfn->hw_info.opaque_fid,
6061                                 &qparams, tc,
6062                                 /* bd_chain_phys_addr */
6063                                 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6064                                 ecore_chain_get_page_cnt(&txq->tx_pbl),
6065                                 &tx_ret_params);
6066
6067                         if (rc) {
6068                                 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6069                                            txq->index, rc);
6070                                 return rc;
6071                         }
6072
6073                         txq->doorbell_addr = tx_ret_params.p_doorbell;
6074                         txq->handle = tx_ret_params.p_handle;
6075
6076                         txq->hw_cons_ptr =
6077                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6078                         SET_FIELD(txq->tx_db.data.params,
6079                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
6080                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6081                                   DB_AGG_CMD_SET);
6082                         SET_FIELD(txq->tx_db.data.params,
6083                                   ETH_DB_DATA_AGG_VAL_SEL,
6084                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
6085
6086                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6087                 }
6088         }
6089
6090         /* Fill struct with RSS params */
6091         if (ha->num_rss > 1) {
6092
6093                 rss_params->update_rss_config = 1;
6094                 rss_params->rss_enable = 1;
6095                 rss_params->update_rss_capabilities = 1;
6096                 rss_params->update_rss_ind_table = 1;
6097                 rss_params->update_rss_key = 1;
6098                 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6099                                        ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
6100                 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6101
6102                 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
6103                         fp = &ha->fp_array[(i % ha->num_rss)];
6104                         rss_params->rss_ind_table[i] = fp->rxq->handle;
6105                 }
6106
6107                 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
6108                         rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6109
6110         } else {
6111                 memset(rss_params, 0, sizeof(*rss_params));
6112         }
6113
6114
6115         /* Prepare and send the vport enable */
6116         memset(&vport_update_params, 0, sizeof(vport_update_params));
6117         vport_update_params.vport_id = vport_id;
6118         vport_update_params.update_vport_active_tx_flg = 1;
6119         vport_update_params.vport_active_tx_flg = 1;
6120         vport_update_params.update_vport_active_rx_flg = 1;
6121         vport_update_params.vport_active_rx_flg = 1;
6122         vport_update_params.rss_params = rss_params;
6123         vport_update_params.update_inner_vlan_removal_flg = 1;
6124         vport_update_params.inner_vlan_removal_flg = 1;
6125
6126         if (hw_lro_enable) {
6127                 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params));
6128
6129                 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
6130
6131                 tpa_params.update_tpa_en_flg = 1;
6132                 tpa_params.tpa_ipv4_en_flg = 1;
6133                 tpa_params.tpa_ipv6_en_flg = 1;
6134
6135                 tpa_params.update_tpa_param_flg = 1;
6136                 tpa_params.tpa_pkt_split_flg = 0;
6137                 tpa_params.tpa_hdr_data_split_flg = 0;
6138                 tpa_params.tpa_gro_consistent_flg = 0;
6139                 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
6140                 tpa_params.tpa_max_size = (uint16_t)(-1);
6141                 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6142                 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6143
6144                 vport_update_params.sge_tpa_params = &tpa_params;
6145         }
6146
6147         rc = qlnx_update_vport(cdev, &vport_update_params);
6148         if (rc) {
6149                 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6150                 return rc;
6151         }
6152
6153         return 0;
6154 }
6155
6156 static int
6157 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6158         struct qlnx_tx_queue *txq)
6159 {
6160         uint16_t        hw_bd_cons;
6161         uint16_t        ecore_cons_idx;
6162
6163         QL_DPRINT2(ha, "enter\n");
6164
6165         hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6166
6167         while (hw_bd_cons !=
6168                 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6169
6170                 mtx_lock(&fp->tx_mtx);
6171
6172                 (void)qlnx_tx_int(ha, fp, txq);
6173
6174                 mtx_unlock(&fp->tx_mtx);
6175
6176                 qlnx_mdelay(__func__, 2);
6177
6178                 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6179         }
6180
6181         QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6182
6183         return 0;
6184 }
6185
6186 static int
6187 qlnx_stop_queues(qlnx_host_t *ha)
6188 {
6189         struct qlnx_update_vport_params vport_update_params;
6190         struct ecore_dev                *cdev;
6191         struct qlnx_fastpath            *fp;
6192         int                             rc, tc, i;
6193
6194         cdev = &ha->cdev;
6195
6196         /* Disable the vport */
6197
6198         memset(&vport_update_params, 0, sizeof(vport_update_params));
6199
6200         vport_update_params.vport_id = 0;
6201         vport_update_params.update_vport_active_tx_flg = 1;
6202         vport_update_params.vport_active_tx_flg = 0;
6203         vport_update_params.update_vport_active_rx_flg = 1;
6204         vport_update_params.vport_active_rx_flg = 0;
6205         vport_update_params.rss_params = &ha->rss_params;
6206         vport_update_params.rss_params->update_rss_config = 0;
6207         vport_update_params.rss_params->rss_enable = 0;
6208         vport_update_params.update_inner_vlan_removal_flg = 0;
6209         vport_update_params.inner_vlan_removal_flg = 0;
6210
6211         QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6212
6213         rc = qlnx_update_vport(cdev, &vport_update_params);
6214         if (rc) {
6215                 QL_DPRINT1(ha, "Failed to update vport\n");
6216                 return rc;
6217         }
6218
6219         /* Flush Tx queues. If needed, request drain from MCP */
6220         for_each_rss(i) {
6221                 fp = &ha->fp_array[i];
6222
6223                 for (tc = 0; tc < ha->num_tc; tc++) {
6224                         struct qlnx_tx_queue *txq = fp->txq[tc];
6225
6226                         rc = qlnx_drain_txq(ha, fp, txq);
6227                         if (rc)
6228                                 return rc;
6229                 }
6230         }
6231
6232         /* Stop all Queues in reverse order*/
6233         for (i = ha->num_rss - 1; i >= 0; i--) {
6234
6235                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6236
6237                 fp = &ha->fp_array[i];
6238
6239                 /* Stop the Tx Queue(s)*/
6240                 for (tc = 0; tc < ha->num_tc; tc++) {
6241                         int tx_queue_id;
6242
6243                         tx_queue_id = tc * ha->num_rss + i;
6244                         rc = ecore_eth_tx_queue_stop(p_hwfn,
6245                                         fp->txq[tc]->handle);
6246                                         
6247                         if (rc) {
6248                                 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6249                                            tx_queue_id);
6250                                 return rc;
6251                         }
6252                 }
6253
6254                 /* Stop the Rx Queue*/
6255                 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6256                                 false);
6257                 if (rc) {
6258                         QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6259                         return rc;
6260                 }
6261         }
6262
6263         /* Stop the vport */
6264         for_each_hwfn(cdev, i) {
6265
6266                 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6267
6268                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6269
6270                 if (rc) {
6271                         QL_DPRINT1(ha, "Failed to stop VPORT\n");
6272                         return rc;
6273                 }
6274         }
6275
6276         return rc;
6277 }
6278
6279 static int
6280 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6281         enum ecore_filter_opcode opcode,
6282         unsigned char mac[ETH_ALEN])
6283 {
6284         struct ecore_filter_ucast       ucast;
6285         struct ecore_dev                *cdev;
6286         int                             rc;
6287
6288         cdev = &ha->cdev;
6289
6290         bzero(&ucast, sizeof(struct ecore_filter_ucast));
6291
6292         ucast.opcode = opcode;
6293         ucast.type = ECORE_FILTER_MAC;
6294         ucast.is_rx_filter = 1;
6295         ucast.vport_to_add_to = 0;
6296         memcpy(&ucast.mac[0], mac, ETH_ALEN);
6297
6298         rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6299
6300         return (rc);
6301 }
6302
6303 static int
6304 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6305 {
6306         struct ecore_filter_ucast       ucast;
6307         struct ecore_dev                *cdev;
6308         int                             rc;
6309
6310         bzero(&ucast, sizeof(struct ecore_filter_ucast));
6311
6312         ucast.opcode = ECORE_FILTER_REPLACE;
6313         ucast.type = ECORE_FILTER_MAC; 
6314         ucast.is_rx_filter = 1;
6315
6316         cdev = &ha->cdev;
6317
6318         rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL);
6319
6320         return (rc);
6321 }
6322
6323 static int
6324 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6325 {
6326         struct ecore_filter_mcast       *mcast;
6327         struct ecore_dev                *cdev;
6328         int                             rc, i;
6329
6330         cdev = &ha->cdev;
6331
6332         mcast = &ha->ecore_mcast;
6333         bzero(mcast, sizeof(struct ecore_filter_mcast));
6334
6335         mcast->opcode = ECORE_FILTER_REMOVE;
6336
6337         for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) {
6338
6339                 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6340                         ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6341                         ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6342
6343                         memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN);
6344                         mcast->num_mc_addrs++;
6345                 }
6346         }
6347         mcast = &ha->ecore_mcast;
6348
6349         rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL);
6350
6351         bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
6352         ha->nmcast = 0;
6353
6354         return (rc);
6355 }
6356
6357 static int
6358 qlnx_clean_filters(qlnx_host_t *ha)
6359 {
6360         int     rc = 0;
6361
6362         /* Remove all unicast macs */
6363         rc = qlnx_remove_all_ucast_mac(ha);
6364         if (rc)
6365                 return rc;
6366
6367         /* Remove all multicast macs */
6368         rc = qlnx_remove_all_mcast_mac(ha);
6369         if (rc)
6370                 return rc;
6371
6372         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
6373
6374         return (rc);
6375 }
6376
6377 static int
6378 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
6379 {
6380         struct ecore_filter_accept_flags        accept;
6381         int                                     rc = 0;
6382         struct ecore_dev                        *cdev;
6383
6384         cdev = &ha->cdev;
6385
6386         bzero(&accept, sizeof(struct ecore_filter_accept_flags));
6387
6388         accept.update_rx_mode_config = 1;
6389         accept.rx_accept_filter = filter;
6390
6391         accept.update_tx_mode_config = 1;
6392         accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
6393                 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
6394
6395         rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false,
6396                         ECORE_SPQ_MODE_CB, NULL);
6397
6398         return (rc);
6399 }
6400
6401 static int
6402 qlnx_set_rx_mode(qlnx_host_t *ha)
6403 {
6404         int     rc = 0;
6405         uint8_t filter;
6406
6407         rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
6408         if (rc)
6409                 return rc;
6410
6411         rc = qlnx_remove_all_mcast_mac(ha);
6412         if (rc)
6413                 return rc;
6414
6415         filter = ECORE_ACCEPT_UCAST_MATCHED |
6416                         ECORE_ACCEPT_MCAST_MATCHED |
6417                         ECORE_ACCEPT_BCAST;
6418         ha->filter = filter;
6419
6420         rc = qlnx_set_rx_accept_filter(ha, filter);
6421
6422         return (rc);
6423 }
6424
6425 static int
6426 qlnx_set_link(qlnx_host_t *ha, bool link_up)
6427 {
6428         int                     i, rc = 0;
6429         struct ecore_dev        *cdev;
6430         struct ecore_hwfn       *hwfn;
6431         struct ecore_ptt        *ptt;
6432
6433         cdev = &ha->cdev;
6434
6435         for_each_hwfn(cdev, i) {
6436
6437                 hwfn = &cdev->hwfns[i];
6438
6439                 ptt = ecore_ptt_acquire(hwfn);
6440                 if (!ptt)
6441                         return -EBUSY;
6442
6443                 rc = ecore_mcp_set_link(hwfn, ptt, link_up);
6444
6445                 ecore_ptt_release(hwfn, ptt);
6446
6447                 if (rc)
6448                         return rc;
6449         }
6450         return (rc);
6451 }
6452
6453 #if __FreeBSD_version >= 1100000
6454 static uint64_t
6455 qlnx_get_counter(if_t ifp, ift_counter cnt)
6456 {
6457         qlnx_host_t *ha;
6458         uint64_t count;
6459
6460         ha = (qlnx_host_t *)if_getsoftc(ifp);
6461
6462         switch (cnt) {
6463
6464         case IFCOUNTER_IPACKETS:
6465                 count = ha->hw_stats.common.rx_ucast_pkts +
6466                         ha->hw_stats.common.rx_mcast_pkts +
6467                         ha->hw_stats.common.rx_bcast_pkts;
6468                 break;
6469
6470         case IFCOUNTER_IERRORS:
6471                 count = ha->hw_stats.common.rx_crc_errors +
6472                         ha->hw_stats.common.rx_align_errors +
6473                         ha->hw_stats.common.rx_oversize_packets +
6474                         ha->hw_stats.common.rx_undersize_packets;
6475                 break;
6476
6477         case IFCOUNTER_OPACKETS:
6478                 count = ha->hw_stats.common.tx_ucast_pkts +
6479                         ha->hw_stats.common.tx_mcast_pkts +
6480                         ha->hw_stats.common.tx_bcast_pkts;
6481                 break;
6482
6483         case IFCOUNTER_OERRORS:
6484                 count = ha->hw_stats.common.tx_err_drop_pkts;
6485                 break;
6486
6487         case IFCOUNTER_COLLISIONS:
6488                 return (0);
6489
6490         case IFCOUNTER_IBYTES:
6491                 count = ha->hw_stats.common.rx_ucast_bytes +
6492                         ha->hw_stats.common.rx_mcast_bytes +
6493                         ha->hw_stats.common.rx_bcast_bytes;
6494                 break;
6495
6496         case IFCOUNTER_OBYTES:
6497                 count = ha->hw_stats.common.tx_ucast_bytes +
6498                         ha->hw_stats.common.tx_mcast_bytes +
6499                         ha->hw_stats.common.tx_bcast_bytes;
6500                 break;
6501
6502         case IFCOUNTER_IMCASTS:
6503                 count = ha->hw_stats.common.rx_mcast_bytes;
6504                 break;
6505
6506         case IFCOUNTER_OMCASTS:
6507                 count = ha->hw_stats.common.tx_mcast_bytes;
6508                 break;
6509
6510         case IFCOUNTER_IQDROPS:
6511         case IFCOUNTER_OQDROPS:
6512         case IFCOUNTER_NOPROTO:
6513
6514         default:
6515                 return (if_get_counter_default(ifp, cnt));
6516         }
6517         return (count);
6518 }
6519 #endif
6520
6521
6522 static void
6523 qlnx_timer(void *arg)
6524 {
6525         qlnx_host_t     *ha;
6526
6527         ha = (qlnx_host_t *)arg;
6528
6529         ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
6530
6531         if (ha->storm_stats_enable)
6532                 qlnx_sample_storm_stats(ha);
6533
6534         callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6535
6536         return;
6537 }
6538
6539 static int
6540 qlnx_load(qlnx_host_t *ha)
6541 {
6542         int                     i;
6543         int                     rc = 0;
6544         struct ecore_dev        *cdev;
6545         device_t                dev;
6546
6547         cdev = &ha->cdev;
6548         dev = ha->pci_dev;
6549
6550         QL_DPRINT2(ha, "enter\n");
6551
6552         rc = qlnx_alloc_mem_arrays(ha);
6553         if (rc)
6554                 goto qlnx_load_exit0;
6555
6556         qlnx_init_fp(ha);
6557
6558         rc = qlnx_alloc_mem_load(ha);
6559         if (rc)
6560                 goto qlnx_load_exit1;
6561
6562         QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
6563                    ha->num_rss, ha->num_tc);
6564
6565         for (i = 0; i < ha->num_rss; i++) {
6566
6567                 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6568                         (INTR_TYPE_NET | INTR_MPSAFE),
6569                         NULL, qlnx_fp_isr, &ha->irq_vec[i],
6570                         &ha->irq_vec[i].handle))) {
6571
6572                         QL_DPRINT1(ha, "could not setup interrupt\n");
6573                         goto qlnx_load_exit2;
6574                 }
6575
6576                 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
6577                          irq %p handle %p\n", i,
6578                         ha->irq_vec[i].irq_rid,
6579                         ha->irq_vec[i].irq, ha->irq_vec[i].handle);
6580
6581                 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6582         }
6583
6584         rc = qlnx_start_queues(ha);
6585         if (rc)
6586                 goto qlnx_load_exit2;
6587
6588         QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
6589
6590         /* Add primary mac and set Rx filters */
6591         rc = qlnx_set_rx_mode(ha);
6592         if (rc)
6593                 goto qlnx_load_exit2;
6594
6595         /* Ask for link-up using current configuration */
6596         qlnx_set_link(ha, true);
6597
6598         ha->state = QLNX_STATE_OPEN;
6599
6600         bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
6601
6602         if (ha->flags.callout_init)
6603                 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
6604
6605         goto qlnx_load_exit0;
6606
6607 qlnx_load_exit2:
6608         qlnx_free_mem_load(ha);
6609
6610 qlnx_load_exit1:
6611         ha->num_rss = 0;
6612
6613 qlnx_load_exit0:
6614         QL_DPRINT2(ha, "exit [%d]\n", rc);
6615         return rc;
6616 }
6617
6618 static void
6619 qlnx_drain_soft_lro(qlnx_host_t *ha)
6620 {
6621 #ifdef QLNX_SOFT_LRO
6622
6623         struct ifnet    *ifp;
6624         int             i;
6625
6626         ifp = ha->ifp;
6627
6628
6629         if (ifp->if_capenable & IFCAP_LRO) {
6630
6631                 for (i = 0; i < ha->num_rss; i++) {
6632
6633                         struct qlnx_fastpath *fp = &ha->fp_array[i];
6634                         struct lro_ctrl *lro;
6635
6636                         lro = &fp->rxq->lro;
6637
6638 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
6639
6640                         tcp_lro_flush_all(lro);
6641
6642 #else
6643                         struct lro_entry *queued;
6644
6645                         while ((!SLIST_EMPTY(&lro->lro_active))){
6646                                 queued = SLIST_FIRST(&lro->lro_active);
6647                                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
6648                                 tcp_lro_flush(lro, queued);
6649                         }
6650
6651 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
6652
6653                 }
6654         }
6655
6656 #endif /* #ifdef QLNX_SOFT_LRO */
6657
6658         return;
6659 }
6660
6661 static void
6662 qlnx_unload(qlnx_host_t *ha)
6663 {
6664         struct ecore_dev        *cdev;
6665         device_t                dev;
6666         int                     i;
6667
6668         cdev = &ha->cdev;
6669         dev = ha->pci_dev;
6670
6671         QL_DPRINT2(ha, "enter\n");
6672         QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
6673
6674         if (ha->state == QLNX_STATE_OPEN) {
6675
6676                 qlnx_set_link(ha, false);
6677                 qlnx_clean_filters(ha);
6678                 qlnx_stop_queues(ha);
6679                 ecore_hw_stop_fastpath(cdev);
6680
6681                 for (i = 0; i < ha->num_rss; i++) {
6682                         if (ha->irq_vec[i].handle) {
6683                                 (void)bus_teardown_intr(dev,
6684                                         ha->irq_vec[i].irq,
6685                                         ha->irq_vec[i].handle);
6686                                 ha->irq_vec[i].handle = NULL;
6687                         }
6688                 }
6689
6690                 qlnx_drain_fp_taskqueues(ha);
6691                 qlnx_drain_soft_lro(ha);
6692                 qlnx_free_mem_load(ha);
6693         }
6694
6695         if (ha->flags.callout_init)
6696                 callout_drain(&ha->qlnx_callout);
6697
6698         qlnx_mdelay(__func__, 1000);
6699
6700         ha->state = QLNX_STATE_CLOSED;
6701
6702         QL_DPRINT2(ha, "exit\n");
6703         return;
6704 }
6705
6706 static int
6707 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6708 {
6709         int                     rval = -1;
6710         struct ecore_hwfn       *p_hwfn;
6711         struct ecore_ptt        *p_ptt;
6712
6713         ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6714
6715         p_hwfn = &ha->cdev.hwfns[hwfn_index];
6716         p_ptt = ecore_ptt_acquire(p_hwfn);
6717
6718         if (!p_ptt) {
6719                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6720                 return (rval);
6721         }
6722
6723         rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6724
6725         if (rval == DBG_STATUS_OK)
6726                 rval = 0;
6727         else {
6728                 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
6729                         "[0x%x]\n", rval);
6730         }
6731
6732         ecore_ptt_release(p_hwfn, p_ptt);
6733
6734         return (rval);
6735 }
6736
6737 static int
6738 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6739 {
6740         int                     rval = -1;
6741         struct ecore_hwfn       *p_hwfn;
6742         struct ecore_ptt        *p_ptt;
6743
6744         ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6745
6746         p_hwfn = &ha->cdev.hwfns[hwfn_index];
6747         p_ptt = ecore_ptt_acquire(p_hwfn);
6748
6749         if (!p_ptt) {
6750                 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
6751                 return (rval);
6752         }
6753
6754         rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6755
6756         if (rval == DBG_STATUS_OK)
6757                 rval = 0;
6758         else {
6759                 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
6760                         " [0x%x]\n", rval);
6761         }
6762
6763         ecore_ptt_release(p_hwfn, p_ptt);
6764
6765         return (rval);
6766 }
6767
6768
6769 static void
6770 qlnx_sample_storm_stats(qlnx_host_t *ha)
6771 {
6772         int                     i, index;
6773         struct ecore_dev        *cdev;
6774         qlnx_storm_stats_t      *s_stats;
6775         uint32_t                reg;
6776         struct ecore_ptt        *p_ptt;
6777         struct ecore_hwfn       *hwfn;
6778
6779         if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
6780                 ha->storm_stats_enable = 0;
6781                 return;
6782         }
6783
6784         cdev = &ha->cdev;
6785
6786         for_each_hwfn(cdev, i) {
6787
6788                 hwfn = &cdev->hwfns[i];
6789
6790                 p_ptt = ecore_ptt_acquire(hwfn);
6791                 if (!p_ptt)
6792                         return;
6793
6794                 index = ha->storm_stats_index +
6795                                 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN);
6796
6797                 s_stats = &ha->storm_stats[index];
6798
6799                 /* XSTORM */
6800                 reg = XSEM_REG_FAST_MEMORY +
6801                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6802                 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6803
6804                 reg = XSEM_REG_FAST_MEMORY +
6805                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6806                 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6807
6808                 reg = XSEM_REG_FAST_MEMORY +
6809                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6810                 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6811
6812                 reg = XSEM_REG_FAST_MEMORY +
6813                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6814                 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6815
6816                 /* YSTORM */
6817                 reg = YSEM_REG_FAST_MEMORY +
6818                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6819                 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6820
6821                 reg = YSEM_REG_FAST_MEMORY +
6822                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6823                 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6824
6825                 reg = YSEM_REG_FAST_MEMORY +
6826                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6827                 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6828
6829                 reg = YSEM_REG_FAST_MEMORY +
6830                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6831                 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6832
6833                 /* PSTORM */
6834                 reg = PSEM_REG_FAST_MEMORY +
6835                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6836                 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6837
6838                 reg = PSEM_REG_FAST_MEMORY +
6839                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6840                 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6841
6842                 reg = PSEM_REG_FAST_MEMORY +
6843                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6844                 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6845
6846                 reg = PSEM_REG_FAST_MEMORY +
6847                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6848                 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6849
6850                 /* TSTORM */
6851                 reg = TSEM_REG_FAST_MEMORY +
6852                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6853                 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6854
6855                 reg = TSEM_REG_FAST_MEMORY +
6856                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6857                 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6858
6859                 reg = TSEM_REG_FAST_MEMORY +
6860                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6861                 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6862
6863                 reg = TSEM_REG_FAST_MEMORY +
6864                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6865                 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6866
6867                 /* MSTORM */
6868                 reg = MSEM_REG_FAST_MEMORY +
6869                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6870                 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6871
6872                 reg = MSEM_REG_FAST_MEMORY +
6873                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6874                 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6875
6876                 reg = MSEM_REG_FAST_MEMORY +
6877                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6878                 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6879
6880                 reg = MSEM_REG_FAST_MEMORY +
6881                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6882                 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6883
6884                 /* USTORM */
6885                 reg = USEM_REG_FAST_MEMORY +
6886                                 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2;
6887                 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 
6888
6889                 reg = USEM_REG_FAST_MEMORY +
6890                                 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2;
6891                 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 
6892
6893                 reg = USEM_REG_FAST_MEMORY +
6894                                 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2;
6895                 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 
6896
6897                 reg = USEM_REG_FAST_MEMORY +
6898                                 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2;
6899                 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 
6900
6901                 ecore_ptt_release(hwfn, p_ptt);
6902         }
6903
6904         ha->storm_stats_index++;
6905
6906         return;
6907 }
6908
6909 /*
6910  * Name: qlnx_dump_buf8
6911  * Function: dumps a buffer as bytes
6912  */
6913 static void
6914 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
6915 {
6916         device_t        dev;
6917         uint32_t        i = 0;
6918         uint8_t         *buf;
6919
6920         dev = ha->pci_dev;
6921         buf = dbuf;
6922
6923         device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
6924
6925         while (len >= 16) {
6926                 device_printf(dev,"0x%08x:"
6927                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6928                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6929                         buf[0], buf[1], buf[2], buf[3],
6930                         buf[4], buf[5], buf[6], buf[7],
6931                         buf[8], buf[9], buf[10], buf[11],
6932                         buf[12], buf[13], buf[14], buf[15]);
6933                 i += 16;
6934                 len -= 16;
6935                 buf += 16;
6936         }
6937         switch (len) {
6938         case 1:
6939                 device_printf(dev,"0x%08x: %02x\n", i, buf[0]);
6940                 break;
6941         case 2:
6942                 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]);
6943                 break;
6944         case 3:
6945                 device_printf(dev,"0x%08x: %02x %02x %02x\n",
6946                         i, buf[0], buf[1], buf[2]);
6947                 break;
6948         case 4:
6949                 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i,
6950                         buf[0], buf[1], buf[2], buf[3]);
6951                 break;
6952         case 5:
6953                 device_printf(dev,"0x%08x:"
6954                         " %02x %02x %02x %02x %02x\n", i,
6955                         buf[0], buf[1], buf[2], buf[3], buf[4]);
6956                 break;
6957         case 6:
6958                 device_printf(dev,"0x%08x:"
6959                         " %02x %02x %02x %02x %02x %02x\n", i,
6960                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
6961                 break;
6962         case 7:
6963                 device_printf(dev,"0x%08x:"
6964                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
6965                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
6966                 break;
6967         case 8:
6968                 device_printf(dev,"0x%08x:"
6969                         " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
6970                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6971                         buf[7]);
6972                 break;
6973         case 9:
6974                 device_printf(dev,"0x%08x:"
6975                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6976                         " %02x\n", i,
6977                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6978                         buf[7], buf[8]);
6979                 break;
6980         case 10:
6981                 device_printf(dev,"0x%08x:"
6982                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6983                         " %02x %02x\n", i,
6984                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6985                         buf[7], buf[8], buf[9]);
6986                 break;
6987         case 11:
6988                 device_printf(dev,"0x%08x:"
6989                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6990                         " %02x %02x %02x\n", i,
6991                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6992                         buf[7], buf[8], buf[9], buf[10]);
6993                 break;
6994         case 12:
6995                 device_printf(dev,"0x%08x:"
6996                         " %02x %02x %02x %02x %02x %02x %02x %02x"
6997                         " %02x %02x %02x %02x\n", i,
6998                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
6999                         buf[7], buf[8], buf[9], buf[10], buf[11]);
7000                 break;
7001         case 13:
7002                 device_printf(dev,"0x%08x:"
7003                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7004                         " %02x %02x %02x %02x %02x\n", i,
7005                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7006                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]);
7007                 break;
7008         case 14:
7009                 device_printf(dev,"0x%08x:"
7010                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7011                         " %02x %02x %02x %02x %02x %02x\n", i,
7012                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7013                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7014                         buf[13]);
7015                 break;
7016         case 15:
7017                 device_printf(dev,"0x%08x:"
7018                         " %02x %02x %02x %02x %02x %02x %02x %02x"
7019                         " %02x %02x %02x %02x %02x %02x %02x\n", i,
7020                         buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
7021                         buf[7], buf[8], buf[9], buf[10], buf[11], buf[12],
7022                         buf[13], buf[14]);
7023                 break;
7024         default:
7025                 break;
7026         }
7027
7028         device_printf(dev, "%s: %s dump end\n", __func__, msg);
7029
7030         return;
7031 }
7032