]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/qlxgbe/ql_hw.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2014 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44
45 /*
46  * Static Functions
47  */
48
49 static void qla_del_rcv_cntxt(qla_host_t *ha);
50 static int qla_init_rcv_cntxt(qla_host_t *ha);
51 static void qla_del_xmt_cntxt(qla_host_t *ha);
52 static int qla_init_xmt_cntxt(qla_host_t *ha);
53 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs,
57         uint32_t create);
58 static int qla_get_nic_partition(qla_host_t *ha);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61         int tenable);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66                 uint8_t *hdr);
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx);
70
71 static int qla_minidump_init(qla_host_t *ha);
72 static void qla_minidump_free(qla_host_t *ha);
73
74
75 static int
76 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
77 {
78         int err = 0, ret;
79         qla_host_t *ha;
80         uint32_t i;
81
82         err = sysctl_handle_int(oidp, &ret, 0, req);
83
84         if (err || !req->newptr)
85                 return (err);
86
87         if (ret == 1) {
88
89                 ha = (qla_host_t *)arg1;
90
91                 for (i = 0; i < ha->hw.num_sds_rings; i++) 
92                         device_printf(ha->pci_dev,
93                                 "%s: sds_ring[%d] = %p\n", __func__,i,
94                                 (void *)ha->hw.sds[i].intr_count);
95
96                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
97                         device_printf(ha->pci_dev,
98                                 "%s: tx[%d] = %p\n", __func__,i,
99                                 (void *)ha->tx_ring[i].count);
100
101                 for (i = 0; i < ha->hw.num_rds_rings; i++)
102                         device_printf(ha->pci_dev,
103                                 "%s: rds_ring[%d] = %p\n", __func__,i,
104                                 (void *)ha->hw.rds[i].count);
105
106                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
107                         (void *)ha->lro_pkt_count);
108
109                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
110                         (void *)ha->lro_bytes);
111         }
112         return (err);
113 }
114
115 #ifdef QL_DBG
116
117 static void
118 qla_stop_pegs(qla_host_t *ha)
119 {
120         uint32_t val = 1;
121
122         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
123         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
124         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
125         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
126         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
127         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
128 }
129
130 static int
131 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
132 {
133         int err, ret = 0;
134         qla_host_t *ha;
135         
136         err = sysctl_handle_int(oidp, &ret, 0, req);
137
138
139         if (err || !req->newptr)
140                 return (err);
141
142         if (ret == 1) {
143                 ha = (qla_host_t *)arg1;
144                 (void)QLA_LOCK(ha, __func__, 0);
145                 qla_stop_pegs(ha);      
146                 QLA_UNLOCK(ha, __func__);
147         }
148
149         return err;
150 }
151 #endif /* #ifdef QL_DBG */
152
153 /*
154  * Name: ql_hw_add_sysctls
155  * Function: Add P3Plus specific sysctls
156  */
157 void
158 ql_hw_add_sysctls(qla_host_t *ha)
159 {
160         device_t        dev;
161
162         dev = ha->pci_dev;
163
164         ha->hw.num_sds_rings = MAX_SDS_RINGS;
165         ha->hw.num_rds_rings = MAX_RDS_RINGS;
166         ha->hw.num_tx_rings = NUM_TX_RINGS;
167
168         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
171                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
172
173         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
174                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
176                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
177
178         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
181                 ha->hw.num_tx_rings, "Number of Transmit Rings");
182
183         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
186                 ha->txr_idx, "Tx Ring Used");
187
188         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
189                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
191                 (void *)ha, 0,
192                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
193
194         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
195                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
197                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
198
199         ha->hw.sds_cidx_thres = 32;
200         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
203                 ha->hw.sds_cidx_thres,
204                 "Number of SDS entries to process before updating"
205                 " SDS Ring Consumer Index");
206
207         ha->hw.rds_pidx_thres = 32;
208         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
209                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
211                 ha->hw.rds_pidx_thres,
212                 "Number of Rcv Rings Entries to post before updating"
213                 " RDS Ring Producer Index");
214
215         ha->hw.mdump_active = 0;
216         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
217                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
218                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
219                 ha->hw.mdump_active,
220                 "Minidump Utility is Active \n"
221                 "\t 0 = Minidump Utility is not active\n"
222                 "\t 1 = Minidump Utility is retrieved on this port\n"
223                 "\t 2 = Minidump Utility is retrieved on the other port\n");
224
225         ha->hw.mdump_start = 0;
226         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
227                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228                 OID_AUTO, "minidump_start", CTLFLAG_RW,
229                 &ha->hw.mdump_start, ha->hw.mdump_start,
230                 "Minidump Utility can start minidump process");
231 #ifdef QL_DBG
232
233         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
234                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
235                 OID_AUTO, "err_inject",
236                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
237                 "Error to be injected\n"
238                 "\t\t\t 0: No Errors\n"
239                 "\t\t\t 1: rcv: rxb struct invalid\n"
240                 "\t\t\t 2: rcv: mp == NULL\n"
241                 "\t\t\t 3: lro: rxb struct invalid\n"
242                 "\t\t\t 4: lro: mp == NULL\n"
243                 "\t\t\t 5: rcv: num handles invalid\n"
244                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
245                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
246                 "\t\t\t 8: mbx: mailbox command failure\n"
247                 "\t\t\t 9: heartbeat failure\n"
248                 "\t\t\t A: temperature failure\n" );
249
250         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
251                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
252                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
253                 (void *)ha, 0,
254                 qla_sysctl_stop_pegs, "I", "Peg Stop");
255
256 #endif /* #ifdef QL_DBG */
257
258 }
259
260 void
261 ql_hw_link_status(qla_host_t *ha)
262 {
263         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
264
265         if (ha->hw.link_up) {
266                 device_printf(ha->pci_dev, "link Up\n");
267         } else {
268                 device_printf(ha->pci_dev, "link Down\n");
269         }
270
271         if (ha->hw.flags.fduplex) {
272                 device_printf(ha->pci_dev, "Full Duplex\n");
273         } else {
274                 device_printf(ha->pci_dev, "Half Duplex\n");
275         }
276
277         if (ha->hw.flags.autoneg) {
278                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
279         } else {
280                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
281         }
282
283         switch (ha->hw.link_speed) {
284         case 0x710:
285                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
286                 break;
287
288         case 0x3E8:
289                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
290                 break;
291
292         case 0x64:
293                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
294                 break;
295
296         default:
297                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
298                 break;
299         }
300
301         switch (ha->hw.module_type) {
302
303         case 0x01:
304                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
305                 break;
306
307         case 0x02:
308                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
309                 break;
310
311         case 0x03:
312                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
313                 break;
314
315         case 0x04:
316                 device_printf(ha->pci_dev,
317                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
318                         ha->hw.cable_length);
319                 break;
320
321         case 0x05:
322                 device_printf(ha->pci_dev, "Module Type 10GE Active"
323                         " Limiting Copper(Compliant)[%d m]\n",
324                         ha->hw.cable_length);
325                 break;
326
327         case 0x06:
328                 device_printf(ha->pci_dev,
329                         "Module Type 10GE Passive Copper"
330                         " (Legacy, Best Effort)[%d m]\n",
331                         ha->hw.cable_length);
332                 break;
333
334         case 0x07:
335                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
336                 break;
337
338         case 0x08:
339                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
340                 break;
341
342         case 0x09:
343                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
344                 break;
345
346         case 0x0A:
347                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
348                 break;
349
350         case 0x0B:
351                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
352                         "(Legacy, Best Effort)\n");
353                 break;
354
355         default:
356                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
357                         ha->hw.module_type);
358                 break;
359         }
360
361         if (ha->hw.link_faults == 1)
362                 device_printf(ha->pci_dev, "SFP Power Fault\n");
363 }
364
365 /*
366  * Name: ql_free_dma
367  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
368  */
369 void
370 ql_free_dma(qla_host_t *ha)
371 {
372         uint32_t i;
373
374         if (ha->hw.dma_buf.flags.sds_ring) {
375                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
376                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
377                 }
378                 ha->hw.dma_buf.flags.sds_ring = 0;
379         }
380
381         if (ha->hw.dma_buf.flags.rds_ring) {
382                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
383                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
384                 }
385                 ha->hw.dma_buf.flags.rds_ring = 0;
386         }
387
388         if (ha->hw.dma_buf.flags.tx_ring) {
389                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
390                 ha->hw.dma_buf.flags.tx_ring = 0;
391         }
392         qla_minidump_free(ha);
393 }
394
395 /*
396  * Name: ql_alloc_dma
397  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
398  */
399 int
400 ql_alloc_dma(qla_host_t *ha)
401 {
402         device_t                dev;
403         uint32_t                i, j, size, tx_ring_size;
404         qla_hw_t                *hw;
405         qla_hw_tx_cntxt_t       *tx_cntxt;
406         uint8_t                 *vaddr;
407         bus_addr_t              paddr;
408
409         dev = ha->pci_dev;
410
411         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
412
413         hw = &ha->hw;
414         /*
415          * Allocate Transmit Ring
416          */
417         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
418         size = (tx_ring_size * ha->hw.num_tx_rings);
419
420         hw->dma_buf.tx_ring.alignment = 8;
421         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
422         
423         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
424                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
425                 goto ql_alloc_dma_exit;
426         }
427
428         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
429         paddr = hw->dma_buf.tx_ring.dma_addr;
430         
431         for (i = 0; i < ha->hw.num_tx_rings; i++) {
432                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
433
434                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
435                 tx_cntxt->tx_ring_paddr = paddr;
436
437                 vaddr += tx_ring_size;
438                 paddr += tx_ring_size;
439         }
440
441         for (i = 0; i < ha->hw.num_tx_rings; i++) {
442                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
443
444                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
445                 tx_cntxt->tx_cons_paddr = paddr;
446
447                 vaddr += sizeof (uint32_t);
448                 paddr += sizeof (uint32_t);
449         }
450
451         ha->hw.dma_buf.flags.tx_ring = 1;
452
453         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
454                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
455                 hw->dma_buf.tx_ring.dma_b));
456         /*
457          * Allocate Receive Descriptor Rings
458          */
459
460         for (i = 0; i < hw->num_rds_rings; i++) {
461
462                 hw->dma_buf.rds_ring[i].alignment = 8;
463                 hw->dma_buf.rds_ring[i].size =
464                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
465
466                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
467                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
468                                 __func__, i);
469
470                         for (j = 0; j < i; j++)
471                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
472
473                         goto ql_alloc_dma_exit;
474                 }
475                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
476                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
477                         hw->dma_buf.rds_ring[i].dma_b));
478         }
479
480         hw->dma_buf.flags.rds_ring = 1;
481
482         /*
483          * Allocate Status Descriptor Rings
484          */
485
486         for (i = 0; i < hw->num_sds_rings; i++) {
487                 hw->dma_buf.sds_ring[i].alignment = 8;
488                 hw->dma_buf.sds_ring[i].size =
489                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
490
491                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
492                         device_printf(dev, "%s: sds ring alloc failed\n",
493                                 __func__);
494
495                         for (j = 0; j < i; j++)
496                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
497
498                         goto ql_alloc_dma_exit;
499                 }
500                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
501                         __func__, i,
502                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
503                         hw->dma_buf.sds_ring[i].dma_b));
504         }
505         for (i = 0; i < hw->num_sds_rings; i++) {
506                 hw->sds[i].sds_ring_base =
507                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
508         }
509
510         hw->dma_buf.flags.sds_ring = 1;
511
512         return 0;
513
514 ql_alloc_dma_exit:
515         ql_free_dma(ha);
516         return -1;
517 }
518
519 #define Q8_MBX_MSEC_DELAY       5000
520
521 static int
522 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
523         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
524 {
525         uint32_t i;
526         uint32_t data;
527         int ret = 0;
528
529         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
530                 ret = -3;
531                 ha->qla_initiate_recovery = 1;
532                 goto exit_qla_mbx_cmd;
533         }
534
535         if (no_pause)
536                 i = 1000;
537         else
538                 i = Q8_MBX_MSEC_DELAY;
539
540         while (i) {
541                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
542                 if (data == 0)
543                         break;
544                 if (no_pause) {
545                         DELAY(1000);
546                 } else {
547                         qla_mdelay(__func__, 1);
548                 }
549                 i--;
550         }
551
552         if (i == 0) {
553                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
554                         __func__, data);
555                 ret = -1;
556                 ha->qla_initiate_recovery = 1;
557                 goto exit_qla_mbx_cmd;
558         }
559
560         for (i = 0; i < n_hmbox; i++) {
561                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
562                 h_mbox++;
563         }
564
565         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
566
567
568         i = Q8_MBX_MSEC_DELAY;
569         while (i) {
570                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
571
572                 if ((data & 0x3) == 1) {
573                         data = READ_REG32(ha, Q8_FW_MBOX0);
574                         if ((data & 0xF000) != 0x8000)
575                                 break;
576                 }
577                 if (no_pause) {
578                         DELAY(1000);
579                 } else {
580                         qla_mdelay(__func__, 1);
581                 }
582                 i--;
583         }
584         if (i == 0) {
585                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
586                         __func__, data);
587                 ret = -2;
588                 ha->qla_initiate_recovery = 1;
589                 goto exit_qla_mbx_cmd;
590         }
591
592         for (i = 0; i < n_fwmbox; i++) {
593                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
594         }
595
596         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
597         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
598
599 exit_qla_mbx_cmd:
600         return (ret);
601 }
602
603 static int
604 qla_get_nic_partition(qla_host_t *ha)
605 {
606         uint32_t *mbox, err;
607         device_t dev = ha->pci_dev;
608
609         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
610
611         mbox = ha->hw.mbox;
612
613         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
614
615         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
616                 device_printf(dev, "%s: failed0\n", __func__);
617                 return (-1);
618         }
619         err = mbox[0] >> 25; 
620
621         if ((err != 1) && (err != 0)) {
622                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
623                 return (-1);
624         }
625         return 0;
626 }
627
628 static int
629 qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs, uint32_t create)
630 {
631         uint32_t i, err;
632         device_t dev = ha->pci_dev;
633         q80_config_intr_t *c_intr;
634         q80_config_intr_rsp_t *c_intr_rsp;
635
636         c_intr = (q80_config_intr_t *)ha->hw.mbox;
637         bzero(c_intr, (sizeof (q80_config_intr_t)));
638
639         c_intr->opcode = Q8_MBX_CONFIG_INTR;
640
641         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
642         c_intr->count_version |= Q8_MBX_CMD_VERSION;
643
644         c_intr->nentries = num_intrs;
645
646         for (i = 0; i < num_intrs; i++) {
647                 if (create) {
648                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
649                         c_intr->intr[i].msix_index = i + 1;
650                 } else {
651                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
652                         c_intr->intr[i].msix_index = ha->hw.intr_id[i];
653                 }
654
655                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
656         }
657
658         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
659                 (sizeof (q80_config_intr_t) >> 2),
660                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
661                 device_printf(dev, "%s: failed0\n", __func__);
662                 return (-1);
663         }
664
665         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
666
667         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
668
669         if (err) {
670                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
671                         c_intr_rsp->nentries);
672
673                 for (i = 0; i < c_intr_rsp->nentries; i++) {
674                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
675                                 __func__, i, 
676                                 c_intr_rsp->intr[i].status,
677                                 c_intr_rsp->intr[i].intr_id,
678                                 c_intr_rsp->intr[i].intr_src);
679                 }
680
681                 return (-1);
682         }
683
684         for (i = 0; ((i < num_intrs) && create); i++) {
685                 if (!c_intr_rsp->intr[i].status) {
686                         ha->hw.intr_id[i] = c_intr_rsp->intr[i].intr_id;
687                         ha->hw.intr_src[i] = c_intr_rsp->intr[i].intr_src;
688                 }
689         }
690
691         return (0);
692 }
693
694 /*
695  * Name: qla_config_rss
696  * Function: Configure RSS for the context/interface.
697  */
698 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
699                         0x8030f20c77cb2da3ULL,
700                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
701                         0x255b0ec26d5a56daULL };
702
703 static int
704 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
705 {
706         q80_config_rss_t        *c_rss;
707         q80_config_rss_rsp_t    *c_rss_rsp;
708         uint32_t                err, i;
709         device_t                dev = ha->pci_dev;
710
711         c_rss = (q80_config_rss_t *)ha->hw.mbox;
712         bzero(c_rss, (sizeof (q80_config_rss_t)));
713
714         c_rss->opcode = Q8_MBX_CONFIG_RSS;
715
716         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
717         c_rss->count_version |= Q8_MBX_CMD_VERSION;
718
719         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
720                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
721
722         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
723         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
724
725         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
726
727         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
728         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
729
730         c_rss->cntxt_id = cntxt_id;
731
732         for (i = 0; i < 5; i++) {
733                 c_rss->rss_key[i] = rss_key[i];
734         }
735
736         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
737                 (sizeof (q80_config_rss_t) >> 2),
738                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
739                 device_printf(dev, "%s: failed0\n", __func__);
740                 return (-1);
741         }
742         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
743
744         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
745
746         if (err) {
747                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
748                 return (-1);
749         }
750         return 0;
751 }
752
753 static uint8_t rss_ind_default_table[Q8_RSS_IND_TBL_SIZE];
754
755 static int
756 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
757         uint16_t cntxt_id, uint8_t *ind_table)
758 {
759         q80_config_rss_ind_table_t      *c_rss_ind;
760         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
761         uint32_t                        err;
762         device_t                        dev = ha->pci_dev;
763
764         if ((count > Q8_RSS_IND_TBL_SIZE) ||
765                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
766                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
767                         start_idx, count);
768                 return (-1);
769         }
770
771         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
772         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
773
774         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
775         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
776         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
777
778         c_rss_ind->start_idx = start_idx;
779         c_rss_ind->end_idx = start_idx + count - 1;
780         c_rss_ind->cntxt_id = cntxt_id;
781         bcopy(ind_table, c_rss_ind->ind_table, count);
782
783         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
784                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
785                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
786                 device_printf(dev, "%s: failed0\n", __func__);
787                 return (-1);
788         }
789
790         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
791         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
792
793         if (err) {
794                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
795                 return (-1);
796         }
797         return 0;
798 }
799
800 /*
801  * Name: qla_config_intr_coalesce
802  * Function: Configure Interrupt Coalescing.
803  */
804 static int
805 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
806 {
807         q80_config_intr_coalesc_t       *intrc;
808         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
809         uint32_t                        err, i;
810         device_t                        dev = ha->pci_dev;
811         
812         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
813         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
814
815         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
816         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
817         intrc->count_version |= Q8_MBX_CMD_VERSION;
818
819         intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
820         intrc->cntxt_id = cntxt_id;
821
822         intrc->max_pkts = 256;
823         intrc->max_mswait = 3;
824
825         if (tenable) {
826                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
827                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
828
829                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
830                         intrc->sds_ring_mask |= (1 << i);
831                 }
832                 intrc->ms_timeout = 1000;
833         }
834
835         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
836                 (sizeof (q80_config_intr_coalesc_t) >> 2),
837                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
838                 device_printf(dev, "%s: failed0\n", __func__);
839                 return (-1);
840         }
841         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
842
843         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
844
845         if (err) {
846                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
847                 return (-1);
848         }
849         
850         return 0;
851 }
852
853
854 /*
855  * Name: qla_config_mac_addr
856  * Function: binds a MAC address to the context/interface.
857  *      Can be unicast, multicast or broadcast.
858  */
859 static int
860 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
861 {
862         q80_config_mac_addr_t           *cmac;
863         q80_config_mac_addr_rsp_t       *cmac_rsp;
864         uint32_t                        err;
865         device_t                        dev = ha->pci_dev;
866
867         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
868         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
869
870         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
871         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
872         cmac->count_version |= Q8_MBX_CMD_VERSION;
873
874         if (add_mac) 
875                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
876         else
877                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
878                 
879         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
880
881         cmac->nmac_entries = 1;
882         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
883         bcopy(mac_addr, cmac->mac_addr[0].addr, 6); 
884
885         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
886                 (sizeof (q80_config_mac_addr_t) >> 2),
887                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
888                 device_printf(dev, "%s: %s failed0\n", __func__,
889                         (add_mac ? "Add" : "Del"));
890                 return (-1);
891         }
892         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
893
894         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
895
896         if (err) {
897                 device_printf(dev, "%s: %s "
898                         "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
899                         __func__, (add_mac ? "Add" : "Del"),
900                         mac_addr[0], mac_addr[1], mac_addr[2],
901                         mac_addr[3], mac_addr[4], mac_addr[5], err);
902                 return (-1);
903         }
904         
905         return 0;
906 }
907
908
909 /*
910  * Name: qla_set_mac_rcv_mode
911  * Function: Enable/Disable AllMulticast and Promiscous Modes.
912  */
913 static int
914 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
915 {
916         q80_config_mac_rcv_mode_t       *rcv_mode;
917         uint32_t                        err;
918         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
919         device_t                        dev = ha->pci_dev;
920
921         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
922         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
923
924         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
925         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
926         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
927
928         rcv_mode->mode = mode;
929
930         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
931
932         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
933                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
934                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
935                 device_printf(dev, "%s: failed0\n", __func__);
936                 return (-1);
937         }
938         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
939
940         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
941
942         if (err) {
943                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
944                 return (-1);
945         }
946         
947         return 0;
948 }
949
950 int
951 ql_set_promisc(qla_host_t *ha)
952 {
953         int ret;
954
955         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
956         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
957         return (ret);
958 }
959
960 int
961 ql_set_allmulti(qla_host_t *ha)
962 {
963         int ret;
964
965         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
966         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
967         return (ret);
968 }
969
970
971 /*
972  * Name: ql_set_max_mtu
973  * Function:
974  *      Sets the maximum transfer unit size for the specified rcv context.
975  */
976 int
977 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
978 {
979         device_t                dev;
980         q80_set_max_mtu_t       *max_mtu;
981         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
982         uint32_t                err;
983
984         dev = ha->pci_dev;
985
986         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
987         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
988
989         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
990         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
991         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
992
993         max_mtu->cntxt_id = cntxt_id;
994         max_mtu->mtu = mtu;
995
996         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
997                 (sizeof (q80_set_max_mtu_t) >> 2),
998                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
999                 device_printf(dev, "%s: failed\n", __func__);
1000                 return -1;
1001         }
1002
1003         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1004
1005         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1006
1007         if (err) {
1008                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1009         }
1010
1011         return 0;
1012 }
1013
1014 static int
1015 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1016 {
1017         device_t                dev;
1018         q80_link_event_t        *lnk;
1019         q80_link_event_rsp_t    *lnk_rsp;
1020         uint32_t                err;
1021
1022         dev = ha->pci_dev;
1023
1024         lnk = (q80_link_event_t *)ha->hw.mbox;
1025         bzero(lnk, (sizeof (q80_link_event_t)));
1026
1027         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1028         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1029         lnk->count_version |= Q8_MBX_CMD_VERSION;
1030
1031         lnk->cntxt_id = cntxt_id;
1032         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1033
1034         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1035                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1036                 device_printf(dev, "%s: failed\n", __func__);
1037                 return -1;
1038         }
1039
1040         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1041
1042         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1043
1044         if (err) {
1045                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1046         }
1047
1048         return 0;
1049 }
1050
1051 static int
1052 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1053 {
1054         device_t                dev;
1055         q80_config_fw_lro_t     *fw_lro;
1056         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1057         uint32_t                err;
1058
1059         dev = ha->pci_dev;
1060
1061         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1062         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1063
1064         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1065         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1066         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1067
1068         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1069
1070         fw_lro->cntxt_id = cntxt_id;
1071
1072         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1073                 (sizeof (q80_config_fw_lro_t) >> 2),
1074                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1075                 device_printf(dev, "%s: failed\n", __func__);
1076                 return -1;
1077         }
1078
1079         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1080
1081         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1082
1083         if (err) {
1084                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1085         }
1086
1087         return 0;
1088 }
1089
1090 static void
1091 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat)
1092 {
1093         device_t dev = ha->pci_dev;
1094
1095         device_printf(dev, "%s: total_bytes\t\t%" PRIu64 "\n", __func__,
1096                 xstat->total_bytes);
1097         device_printf(dev, "%s: total_pkts\t\t%" PRIu64 "\n", __func__,
1098                 xstat->total_pkts);
1099         device_printf(dev, "%s: errors\t\t%" PRIu64 "\n", __func__,
1100                 xstat->errors);
1101         device_printf(dev, "%s: pkts_dropped\t%" PRIu64 "\n", __func__,
1102                 xstat->pkts_dropped);
1103         device_printf(dev, "%s: switch_pkts\t\t%" PRIu64 "\n", __func__,
1104                 xstat->switch_pkts);
1105         device_printf(dev, "%s: num_buffers\t\t%" PRIu64 "\n", __func__,
1106                 xstat->num_buffers);
1107 }
1108
1109 static void
1110 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1111 {
1112         device_t dev = ha->pci_dev;
1113
1114         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1115                 rstat->total_bytes);
1116         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1117                 rstat->total_pkts);
1118         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1119                 rstat->lro_pkt_count);
1120         device_printf(dev, "%s: sw_pkt_count\t\t%" PRIu64 "\n", __func__,
1121                 rstat->sw_pkt_count);
1122         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1123                 rstat->ip_chksum_err);
1124         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1125                 rstat->pkts_wo_acntxts);
1126         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1127                 __func__, rstat->pkts_dropped_no_sds_card);
1128         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1129                 __func__, rstat->pkts_dropped_no_sds_host);
1130         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1131                 rstat->oversized_pkts);
1132         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1133                 __func__, rstat->pkts_dropped_no_rds);
1134         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1135                 __func__, rstat->unxpctd_mcast_pkts);
1136         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1137                 rstat->re1_fbq_error);
1138         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1139                 rstat->invalid_mac_addr);
1140         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1141                 rstat->rds_prime_trys);
1142         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1143                 rstat->rds_prime_success);
1144         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1145                 rstat->lro_flows_added);
1146         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1147                 rstat->lro_flows_deleted);
1148         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1149                 rstat->lro_flows_active);
1150         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1151                 __func__, rstat->pkts_droped_unknown);
1152 }
1153
1154 static void
1155 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1156 {
1157         device_t dev = ha->pci_dev;
1158
1159         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1160                 mstat->xmt_frames);
1161         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1162                 mstat->xmt_bytes);
1163         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1164                 mstat->xmt_mcast_pkts);
1165         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1166                 mstat->xmt_bcast_pkts);
1167         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1168                 mstat->xmt_pause_frames);
1169         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1170                 mstat->xmt_cntrl_pkts);
1171         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1172                 __func__, mstat->xmt_pkt_lt_64bytes);
1173         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1174                 __func__, mstat->xmt_pkt_lt_127bytes);
1175         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1176                 __func__, mstat->xmt_pkt_lt_255bytes);
1177         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1178                 __func__, mstat->xmt_pkt_lt_511bytes);
1179         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t%" PRIu64 "\n",
1180                 __func__, mstat->xmt_pkt_lt_1023bytes);
1181         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t%" PRIu64 "\n",
1182                 __func__, mstat->xmt_pkt_lt_1518bytes);
1183         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t%" PRIu64 "\n",
1184                 __func__, mstat->xmt_pkt_gt_1518bytes);
1185
1186         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1187                 mstat->rcv_frames);
1188         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1189                 mstat->rcv_bytes);
1190         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1191                 mstat->rcv_mcast_pkts);
1192         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1193                 mstat->rcv_bcast_pkts);
1194         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1195                 mstat->rcv_pause_frames);
1196         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1197                 mstat->rcv_cntrl_pkts);
1198         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1199                 __func__, mstat->rcv_pkt_lt_64bytes);
1200         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1201                 __func__, mstat->rcv_pkt_lt_127bytes);
1202         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1203                 __func__, mstat->rcv_pkt_lt_255bytes);
1204         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1205                 __func__, mstat->rcv_pkt_lt_511bytes);
1206         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t%" PRIu64 "\n",
1207                 __func__, mstat->rcv_pkt_lt_1023bytes);
1208         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t%" PRIu64 "\n",
1209                 __func__, mstat->rcv_pkt_lt_1518bytes);
1210         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t%" PRIu64 "\n",
1211                 __func__, mstat->rcv_pkt_gt_1518bytes);
1212
1213         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1214                 mstat->rcv_len_error);
1215         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1216                 mstat->rcv_len_small);
1217         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1218                 mstat->rcv_len_large);
1219         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1220                 mstat->rcv_jabber);
1221         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1222                 mstat->rcv_dropped);
1223         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1224                 mstat->fcs_error);
1225         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1226                 mstat->align_error);
1227 }
1228
1229
1230 static int
1231 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd)
1232 {
1233         device_t                dev;
1234         q80_get_stats_t         *stat;
1235         q80_get_stats_rsp_t     *stat_rsp;
1236         uint32_t                err;
1237
1238         dev = ha->pci_dev;
1239
1240         stat = (q80_get_stats_t *)ha->hw.mbox;
1241         bzero(stat, (sizeof (q80_get_stats_t)));
1242
1243         stat->opcode = Q8_MBX_GET_STATS;
1244         stat->count_version = 2;
1245         stat->count_version |= Q8_MBX_CMD_VERSION;
1246
1247         stat->cmd = cmd;
1248
1249         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1250                 ha->hw.mbox, (sizeof (q80_get_stats_rsp_t) >> 2), 0)) {
1251                 device_printf(dev, "%s: failed\n", __func__);
1252                 return -1;
1253         }
1254
1255         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1256
1257         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1258
1259         if (err) {
1260                 return -1;
1261         }
1262
1263         return 0;
1264 }
1265
1266 void
1267 ql_get_stats(qla_host_t *ha)
1268 {
1269         q80_get_stats_rsp_t     *stat_rsp;
1270         q80_mac_stats_t         *mstat;
1271         q80_xmt_stats_t         *xstat;
1272         q80_rcv_stats_t         *rstat;
1273         uint32_t                cmd;
1274
1275         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1276         /*
1277          * Get MAC Statistics
1278          */
1279         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1280
1281         cmd |= ((ha->pci_func & 0x1) << 16);
1282
1283         if (qla_get_hw_stats(ha, cmd) == 0) {
1284                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1285                 qla_mac_stats(ha, mstat);
1286         } else {
1287                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1288                         __func__, ha->hw.mbox[0]);
1289         }
1290         /*
1291          * Get RCV Statistics
1292          */
1293         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1294         cmd |= (ha->hw.rcv_cntxt_id << 16);
1295
1296         if (qla_get_hw_stats(ha, cmd) == 0) {
1297                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1298                 qla_rcv_stats(ha, rstat);
1299         } else {
1300                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1301                         __func__, ha->hw.mbox[0]);
1302         }
1303         /*
1304          * Get XMT Statistics
1305          */
1306         cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1307         cmd |= (ha->hw.tx_cntxt[ha->txr_idx].tx_cntxt_id << 16);
1308
1309
1310         if (qla_get_hw_stats(ha, cmd) == 0) {
1311                 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1312                 qla_xmt_stats(ha, xstat);
1313         } else {
1314                 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1315                         __func__, ha->hw.mbox[0]);
1316         }
1317 }
1318
1319 /*
1320  * Name: qla_tx_tso
1321  * Function: Checks if the packet to be transmitted is a candidate for
1322  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1323  *      Ring Structure are plugged in.
1324  */
1325 static int
1326 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1327 {
1328         struct ether_vlan_header *eh;
1329         struct ip *ip = NULL;
1330         struct ip6_hdr *ip6 = NULL;
1331         struct tcphdr *th = NULL;
1332         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1333         uint16_t etype, opcode, offload = 1;
1334         device_t dev;
1335
1336         dev = ha->pci_dev;
1337
1338
1339         eh = mtod(mp, struct ether_vlan_header *);
1340
1341         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1342                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1343                 etype = ntohs(eh->evl_proto);
1344         } else {
1345                 ehdrlen = ETHER_HDR_LEN;
1346                 etype = ntohs(eh->evl_encap_proto);
1347         }
1348
1349         hdrlen = 0;
1350
1351         switch (etype) {
1352                 case ETHERTYPE_IP:
1353
1354                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1355                                         sizeof(struct tcphdr);
1356
1357                         if (mp->m_len < tcp_opt_off) {
1358                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1359                                 ip = (struct ip *)(hdr + ehdrlen);
1360                         } else {
1361                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1362                         }
1363
1364                         ip_hlen = ip->ip_hl << 2;
1365                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1366
1367                                 
1368                         if ((ip->ip_p != IPPROTO_TCP) ||
1369                                 (ip_hlen != sizeof (struct ip))){
1370                                 /* IP Options are not supported */
1371
1372                                 offload = 0;
1373                         } else
1374                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1375
1376                 break;
1377
1378                 case ETHERTYPE_IPV6:
1379
1380                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1381                                         sizeof (struct tcphdr);
1382
1383                         if (mp->m_len < tcp_opt_off) {
1384                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1385                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1386                         } else {
1387                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1388                         }
1389
1390                         ip_hlen = sizeof(struct ip6_hdr);
1391                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1392
1393                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1394                                 //device_printf(dev, "%s: ipv6\n", __func__);
1395                                 offload = 0;
1396                         } else
1397                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1398                 break;
1399
1400                 default:
1401                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1402                         offload = 0;
1403                 break;
1404         }
1405
1406         if (!offload)
1407                 return (-1);
1408
1409         tcp_hlen = th->th_off << 2;
1410         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1411
1412         if (mp->m_len < hdrlen) {
1413                 if (mp->m_len < tcp_opt_off) {
1414                         if (tcp_hlen > sizeof(struct tcphdr)) {
1415                                 m_copydata(mp, tcp_opt_off,
1416                                         (tcp_hlen - sizeof(struct tcphdr)),
1417                                         &hdr[tcp_opt_off]);
1418                         }
1419                 } else {
1420                         m_copydata(mp, 0, hdrlen, hdr);
1421                 }
1422         }
1423
1424         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1425
1426         tx_cmd->flags_opcode = opcode ;
1427         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1428         tx_cmd->total_hdr_len = hdrlen;
1429
1430         /* Check for Multicast least significant bit of MSB == 1 */
1431         if (eh->evl_dhost[0] & 0x01) {
1432                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1433         }
1434
1435         if (mp->m_len < hdrlen) {
1436                 printf("%d\n", hdrlen);
1437                 return (1);
1438         }
1439
1440         return (0);
1441 }
1442
1443 /*
1444  * Name: qla_tx_chksum
1445  * Function: Checks if the packet to be transmitted is a candidate for
1446  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1447  *      Ring Structure are plugged in.
1448  */
1449 static int
1450 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1451         uint32_t *tcp_hdr_off)
1452 {
1453         struct ether_vlan_header *eh;
1454         struct ip *ip;
1455         struct ip6_hdr *ip6;
1456         uint32_t ehdrlen, ip_hlen;
1457         uint16_t etype, opcode, offload = 1;
1458         device_t dev;
1459         uint8_t buf[sizeof(struct ip6_hdr)];
1460
1461         dev = ha->pci_dev;
1462
1463         *op_code = 0;
1464
1465         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1466                 return (-1);
1467
1468         eh = mtod(mp, struct ether_vlan_header *);
1469
1470         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1471                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1472                 etype = ntohs(eh->evl_proto);
1473         } else {
1474                 ehdrlen = ETHER_HDR_LEN;
1475                 etype = ntohs(eh->evl_encap_proto);
1476         }
1477
1478                 
1479         switch (etype) {
1480                 case ETHERTYPE_IP:
1481                         ip = (struct ip *)(mp->m_data + ehdrlen);
1482
1483                         ip_hlen = sizeof (struct ip);
1484
1485                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1486                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1487                                 ip = (struct ip *)buf;
1488                         }
1489
1490                         if (ip->ip_p == IPPROTO_TCP)
1491                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1492                         else if (ip->ip_p == IPPROTO_UDP)
1493                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1494                         else {
1495                                 //device_printf(dev, "%s: ipv4\n", __func__);
1496                                 offload = 0;
1497                         }
1498                 break;
1499
1500                 case ETHERTYPE_IPV6:
1501                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1502
1503                         ip_hlen = sizeof(struct ip6_hdr);
1504
1505                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1506                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1507                                         buf);
1508                                 ip6 = (struct ip6_hdr *)buf;
1509                         }
1510
1511                         if (ip6->ip6_nxt == IPPROTO_TCP)
1512                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1513                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1514                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1515                         else {
1516                                 //device_printf(dev, "%s: ipv6\n", __func__);
1517                                 offload = 0;
1518                         }
1519                 break;
1520
1521                 default:
1522                         offload = 0;
1523                 break;
1524         }
1525         if (!offload)
1526                 return (-1);
1527
1528         *op_code = opcode;
1529         *tcp_hdr_off = (ip_hlen + ehdrlen);
1530
1531         return (0);
1532 }
1533
1534 #define QLA_TX_MIN_FREE 2
1535 /*
1536  * Name: ql_hw_send
1537  * Function: Transmits a packet. It first checks if the packet is a
1538  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1539  *      offload. If either of these creteria are not met, it is transmitted
1540  *      as a regular ethernet frame.
1541  */
1542 int
1543 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1544         uint32_t tx_idx,  struct mbuf *mp, uint32_t txr_idx)
1545 {
1546         struct ether_vlan_header *eh;
1547         qla_hw_t *hw = &ha->hw;
1548         q80_tx_cmd_t *tx_cmd, tso_cmd;
1549         bus_dma_segment_t *c_seg;
1550         uint32_t num_tx_cmds, hdr_len = 0;
1551         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1552         device_t dev;
1553         int i, ret;
1554         uint8_t *src = NULL, *dst = NULL;
1555         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1556         uint32_t op_code = 0;
1557         uint32_t tcp_hdr_off = 0;
1558
1559         dev = ha->pci_dev;
1560
1561         /*
1562          * Always make sure there is atleast one empty slot in the tx_ring
1563          * tx_ring is considered full when there only one entry available
1564          */
1565         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1566
1567         total_length = mp->m_pkthdr.len;
1568         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1569                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1570                         __func__, total_length);
1571                 return (-1);
1572         }
1573         eh = mtod(mp, struct ether_vlan_header *);
1574
1575         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1576
1577                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1578
1579                 src = frame_hdr;
1580                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1581
1582                 if (!(ret & ~1)) {
1583                         /* find the additional tx_cmd descriptors required */
1584
1585                         if (mp->m_flags & M_VLANTAG)
1586                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1587
1588                         hdr_len = tso_cmd.total_hdr_len;
1589
1590                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1591                         bytes = QL_MIN(bytes, hdr_len);
1592
1593                         num_tx_cmds++;
1594                         hdr_len -= bytes;
1595
1596                         while (hdr_len) {
1597                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1598                                 hdr_len -= bytes;
1599                                 num_tx_cmds++;
1600                         }
1601                         hdr_len = tso_cmd.total_hdr_len;
1602
1603                         if (ret == 0)
1604                                 src = (uint8_t *)eh;
1605                 } else 
1606                         return (EINVAL);
1607         } else {
1608                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1609         }
1610
1611         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1612                 qla_hw_tx_done_locked(ha, txr_idx);
1613                 if (hw->tx_cntxt[txr_idx].txr_free <=
1614                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
1615                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1616                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1617                                 __func__));
1618                         return (-1);
1619                 }
1620         }
1621
1622         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1623
1624         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1625
1626                 if (nsegs > ha->hw.max_tx_segs)
1627                         ha->hw.max_tx_segs = nsegs;
1628
1629                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1630
1631                 if (op_code) {
1632                         tx_cmd->flags_opcode = op_code;
1633                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
1634
1635                 } else {
1636                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1637                 }
1638         } else {
1639                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1640                 ha->tx_tso_frames++;
1641         }
1642
1643         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1644                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1645         } else if (mp->m_flags & M_VLANTAG) {
1646
1647                 if (hdr_len) { /* TSO */
1648                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1649                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1650                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1651                 } else
1652                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1653
1654                 ha->hw_vlan_tx_frames++;
1655                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1656         }
1657
1658
1659         tx_cmd->n_bufs = (uint8_t)nsegs;
1660         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1661         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1662         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1663
1664         c_seg = segs;
1665
1666         while (1) {
1667                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1668
1669                         switch (i) {
1670                         case 0:
1671                                 tx_cmd->buf1_addr = c_seg->ds_addr;
1672                                 tx_cmd->buf1_len = c_seg->ds_len;
1673                                 break;
1674
1675                         case 1:
1676                                 tx_cmd->buf2_addr = c_seg->ds_addr;
1677                                 tx_cmd->buf2_len = c_seg->ds_len;
1678                                 break;
1679
1680                         case 2:
1681                                 tx_cmd->buf3_addr = c_seg->ds_addr;
1682                                 tx_cmd->buf3_len = c_seg->ds_len;
1683                                 break;
1684
1685                         case 3:
1686                                 tx_cmd->buf4_addr = c_seg->ds_addr;
1687                                 tx_cmd->buf4_len = c_seg->ds_len;
1688                                 break;
1689                         }
1690
1691                         c_seg++;
1692                         nsegs--;
1693                 }
1694
1695                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1696                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
1697                                 (NUM_TX_DESCRIPTORS - 1);
1698                 tx_cmd_count++;
1699
1700                 if (!nsegs)
1701                         break;
1702                 
1703                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1704                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1705         }
1706
1707         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1708
1709                 /* TSO : Copy the header in the following tx cmd descriptors */
1710
1711                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
1712
1713                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1714                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1715
1716                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1717                 bytes = QL_MIN(bytes, hdr_len);
1718
1719                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1720
1721                 if (mp->m_flags & M_VLANTAG) {
1722                         /* first copy the src/dst MAC addresses */
1723                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1724                         dst += (ETHER_ADDR_LEN * 2);
1725                         src += (ETHER_ADDR_LEN * 2);
1726                         
1727                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1728                         dst += 2;
1729                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
1730                         dst += 2;
1731
1732                         /* bytes left in src header */
1733                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
1734                                         ETHER_VLAN_ENCAP_LEN);
1735
1736                         /* bytes left in TxCmd Entry */
1737                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
1738
1739
1740                         bcopy(src, dst, bytes);
1741                         src += bytes;
1742                         hdr_len -= bytes;
1743                 } else {
1744                         bcopy(src, dst, bytes);
1745                         src += bytes;
1746                         hdr_len -= bytes;
1747                 }
1748
1749                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1750                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1751                                         (NUM_TX_DESCRIPTORS - 1);
1752                 tx_cmd_count++;
1753                 
1754                 while (hdr_len) {
1755                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1756                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1757
1758                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1759
1760                         bcopy(src, tx_cmd, bytes);
1761                         src += bytes;
1762                         hdr_len -= bytes;
1763
1764                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
1765                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1766                                         (NUM_TX_DESCRIPTORS - 1);
1767                         tx_cmd_count++;
1768                 }
1769         }
1770
1771         hw->tx_cntxt[txr_idx].txr_free =
1772                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
1773
1774         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
1775                 txr_idx);
1776         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
1777
1778         return (0);
1779 }
1780
1781
1782 static int
1783 qla_config_rss_ind_table(qla_host_t *ha)
1784 {
1785         uint32_t i, count;
1786         uint8_t rss_ind_tbl[16];
1787
1788         bzero(rss_ind_default_table, sizeof(rss_ind_default_table));
1789
1790
1791         for (i = 0; i < 16; i++) {
1792                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
1793         }
1794
1795         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + 16) {
1796
1797                 if ((i + 16) > Q8_RSS_IND_TBL_MAX_IDX) {
1798                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
1799                 } else {
1800                         count = 16;
1801                 }
1802
1803                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
1804                         rss_ind_tbl))
1805                         return (-1);
1806         }
1807
1808         return (0);
1809 }
1810
1811 /*
1812  * Name: ql_del_hw_if
1813  * Function: Destroys the hardware specific entities corresponding to an
1814  *      Ethernet Interface
1815  */
1816 void
1817 ql_del_hw_if(qla_host_t *ha)
1818 {
1819
1820         qla_del_rcv_cntxt(ha);
1821         qla_del_xmt_cntxt(ha);
1822
1823         if (ha->hw.flags.init_intr_cnxt) {
1824                 qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 0);
1825                 ha->hw.flags.init_intr_cnxt = 0;
1826         }
1827 }
1828
1829 /*
1830  * Name: ql_init_hw_if
1831  * Function: Creates the hardware specific entities corresponding to an
1832  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1833  *      corresponding to the interface. Enables LRO if allowed.
1834  */
1835 int
1836 ql_init_hw_if(qla_host_t *ha)
1837 {
1838         device_t        dev;
1839         uint32_t        i;
1840         uint8_t         bcast_mac[6];
1841         qla_rdesc_t     *rdesc;
1842
1843         dev = ha->pci_dev;
1844
1845         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1846                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1847                         ha->hw.dma_buf.sds_ring[i].size);
1848         }
1849         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
1850
1851         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
1852         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
1853         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1854
1855         qla_get_nic_partition(ha);
1856
1857         if (qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 1) == 0) {
1858                 ha->hw.flags.init_intr_cnxt = 1;
1859         } else 
1860                 return (-1);
1861
1862         if (ha->hw.mdump_init == 0) {
1863                 qla_minidump_init(ha);
1864         }
1865
1866         /*
1867          * Create Receive Context
1868          */
1869         if (qla_init_rcv_cntxt(ha)) {
1870                 return (-1);
1871         }
1872
1873         for (i = 0; i < ha->hw.num_rds_rings; i++) {
1874                 rdesc = &ha->hw.rds[i];
1875                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
1876                 rdesc->rx_in = 0;
1877                 /* Update the RDS Producer Indices */
1878                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
1879                         rdesc->rx_next);
1880         }
1881
1882
1883         /*
1884          * Create Transmit Context
1885          */
1886         if (qla_init_xmt_cntxt(ha)) {
1887                 qla_del_rcv_cntxt(ha);
1888                 return (-1);
1889         }
1890         ha->hw.max_tx_segs = 0;
1891
1892         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
1893                 return(-1);
1894
1895         ha->hw.flags.unicast_mac = 1;
1896
1897         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1898         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1899
1900         if (qla_config_mac_addr(ha, bcast_mac, 1))
1901                 return (-1);
1902
1903         ha->hw.flags.bcast_mac = 1;
1904
1905         /*
1906          * program any cached multicast addresses
1907          */
1908         if (qla_hw_add_all_mcast(ha))
1909                 return (-1);
1910
1911         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
1912                 return (-1);
1913
1914         if (qla_config_rss_ind_table(ha))
1915                 return (-1);
1916
1917         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0))
1918                 return (-1);
1919
1920         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
1921                 return (-1);
1922
1923         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
1924                 return (-1);
1925
1926         for (i = 0; i < ha->hw.num_sds_rings; i++)
1927                 QL_ENABLE_INTERRUPTS(ha, i);
1928
1929         return (0);
1930 }
1931
1932 static int
1933 qla_map_sds_to_rds(qla_host_t *ha)
1934 {
1935         device_t                dev = ha->pci_dev;
1936         q80_rq_map_sds_to_rds_t *map_rings;
1937         q80_rsp_add_rcv_rings_t *map_rings_rsp;
1938         uint32_t                i, err;
1939         qla_hw_t                *hw = &ha->hw;
1940
1941         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
1942         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
1943
1944         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
1945         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
1946         map_rings->count_version |= Q8_MBX_CMD_VERSION;
1947
1948         map_rings->cntxt_id = hw->rcv_cntxt_id;
1949         map_rings->num_rings = hw->num_sds_rings;
1950
1951         for (i = 0; i < hw->num_sds_rings; i++) {
1952                 map_rings->sds_rds[i].sds_ring = i;
1953                 map_rings->sds_rds[i].rds_ring = i;
1954         }
1955
1956         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
1957                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
1958                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
1959                 device_printf(dev, "%s: failed0\n", __func__);
1960                 return (-1);
1961         }
1962
1963         map_rings_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
1964
1965         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
1966
1967         if (err) {
1968                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1969                 return (-1);
1970         }
1971
1972         return (0);
1973 }
1974
1975 /*
1976  * Name: qla_init_rcv_cntxt
1977  * Function: Creates the Receive Context.
1978  */
1979 static int
1980 qla_init_rcv_cntxt(qla_host_t *ha)
1981 {
1982         q80_rq_rcv_cntxt_t      *rcntxt;
1983         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
1984         q80_stat_desc_t         *sdesc;
1985         int                     i, j;
1986         qla_hw_t                *hw = &ha->hw;
1987         device_t                dev;
1988         uint32_t                err;
1989         uint32_t                rcntxt_sds_rings;
1990         uint32_t                rcntxt_rds_rings;
1991
1992         dev = ha->pci_dev;
1993
1994         /*
1995          * Create Receive Context
1996          */
1997
1998         for (i = 0; i < hw->num_sds_rings; i++) {
1999                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2000
2001                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2002                         sdesc->data[0] = 1ULL;
2003                         sdesc->data[1] = 1ULL;
2004                 }
2005         }
2006
2007         rcntxt_sds_rings = hw->num_sds_rings;
2008         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2009                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2010
2011         rcntxt_rds_rings = hw->num_rds_rings;
2012
2013         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2014                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2015
2016         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2017         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2018
2019         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2020         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2021         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2022
2023         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2024                         Q8_RCV_CNTXT_CAP0_LRO |
2025                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2026                         Q8_RCV_CNTXT_CAP0_RSS |
2027                         Q8_RCV_CNTXT_CAP0_SGL_JUMBO |
2028                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2029
2030         if (ha->hw.num_rds_rings > 1) {
2031                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2032                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2033         } else
2034                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2035
2036         rcntxt->nsds_rings = rcntxt_sds_rings;
2037
2038         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2039
2040         rcntxt->rcv_vpid = 0;
2041
2042         for (i = 0; i <  rcntxt_sds_rings; i++) {
2043                 rcntxt->sds[i].paddr =
2044                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2045                 rcntxt->sds[i].size =
2046                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2047                 if (ha->msix_count == 2) {
2048                         rcntxt->sds[i].intr_id =
2049                                 qla_host_to_le16(hw->intr_id[0]);
2050                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2051                 } else {
2052                         rcntxt->sds[i].intr_id =
2053                                 qla_host_to_le16(hw->intr_id[i]);
2054                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2055                 }
2056         }
2057
2058         for (i = 0; i <  rcntxt_rds_rings; i++) {
2059                 rcntxt->rds[i].paddr_std =
2060                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2061                 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2062                 rcntxt->rds[i].std_nentries =
2063                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2064         }
2065
2066         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2067                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2068                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2069                 device_printf(dev, "%s: failed0\n", __func__);
2070                 return (-1);
2071         }
2072
2073         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2074
2075         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2076
2077         if (err) {
2078                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2079                 return (-1);
2080         }
2081
2082         for (i = 0; i <  rcntxt_sds_rings; i++) {
2083                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2084         }
2085
2086         for (i = 0; i <  rcntxt_rds_rings; i++) {
2087                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2088         }
2089
2090         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2091
2092         ha->hw.flags.init_rx_cnxt = 1;
2093
2094         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2095                 err = qla_add_rcv_rings(ha, MAX_RCNTXT_SDS_RINGS);
2096                 if (err)
2097                         return -1;
2098         }
2099
2100         if (hw->num_rds_rings > 1) {
2101                 err = qla_map_sds_to_rds(ha);
2102                 if (err)
2103                         return -1;
2104         }
2105
2106         return (0);
2107 }
2108
2109 static int
2110 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx)
2111 {
2112         device_t                dev = ha->pci_dev;
2113         q80_rq_add_rcv_rings_t  *add_rcv;
2114         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2115         uint32_t                i,j, err;
2116         uint8_t                 nsds;
2117         qla_hw_t                *hw = &ha->hw;
2118
2119         nsds = hw->num_sds_rings - MAX_RCNTXT_SDS_RINGS;
2120
2121         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2122         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2123
2124         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2125         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2126         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2127
2128         if (hw->num_rds_rings > 1)
2129                 add_rcv->nrds_sets_rings = nsds | (1 << 5);
2130         else
2131                 add_rcv->nrds_sets_rings = 0;
2132
2133         add_rcv->nsds_rings = nsds;
2134         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2135
2136         for (i = 0; i <  nsds; i++) {
2137
2138                 j = i + sds_idx;
2139
2140                 add_rcv->sds[i].paddr =
2141                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2142
2143                 add_rcv->sds[i].size =
2144                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2145
2146                 if (ha->msix_count == 2) {
2147                         add_rcv->sds[i].intr_id =
2148                                 qla_host_to_le16(hw->intr_id[0]);
2149                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2150                 } else {
2151                         add_rcv->sds[i].intr_id =
2152                                 qla_host_to_le16(hw->intr_id[j]);
2153                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2154                 }
2155
2156         }
2157         for (i = 0; ((i <  nsds) && (hw->num_rds_rings > 1)); i++) {
2158                 j = i + sds_idx;
2159                 add_rcv->rds[i].paddr_std =
2160                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2161                 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2162                 add_rcv->rds[i].std_nentries =
2163                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2164         }
2165
2166
2167         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2168                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2169                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2170                 device_printf(dev, "%s: failed0\n", __func__);
2171                 return (-1);
2172         }
2173
2174         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2175
2176         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2177
2178         if (err) {
2179                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2180                 return (-1);
2181         }
2182
2183         for (i = sds_idx; i < hw->num_sds_rings; i++) {
2184                 hw->sds[i].sds_consumer = add_rcv_rsp->sds_cons[(i - sds_idx)];
2185         }
2186         for (i = sds_idx; i < hw->num_rds_rings; i++) {
2187                 hw->rds[i].prod_std = add_rcv_rsp->rds[(i - sds_idx)].prod_std;
2188         }
2189         return (0);
2190 }
2191
2192 /*
2193  * Name: qla_del_rcv_cntxt
2194  * Function: Destroys the Receive Context.
2195  */
2196 static void
2197 qla_del_rcv_cntxt(qla_host_t *ha)
2198 {
2199         device_t                        dev = ha->pci_dev;
2200         q80_rcv_cntxt_destroy_t         *rcntxt;
2201         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2202         uint32_t                        err;
2203         uint8_t                         bcast_mac[6];
2204
2205         if (!ha->hw.flags.init_rx_cnxt)
2206                 return;
2207
2208         if (qla_hw_del_all_mcast(ha))
2209                 return;
2210
2211         if (ha->hw.flags.bcast_mac) {
2212
2213                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2214                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2215
2216                 if (qla_config_mac_addr(ha, bcast_mac, 0))
2217                         return;
2218                 ha->hw.flags.bcast_mac = 0;
2219
2220         }
2221
2222         if (ha->hw.flags.unicast_mac) {
2223                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2224                         return;
2225                 ha->hw.flags.unicast_mac = 0;
2226         }
2227
2228         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2229         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2230
2231         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2232         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2233         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2234
2235         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2236
2237         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2238                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2239                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2240                 device_printf(dev, "%s: failed0\n", __func__);
2241                 return;
2242         }
2243         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2244
2245         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2246
2247         if (err) {
2248                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2249         }
2250
2251         ha->hw.flags.init_rx_cnxt = 0;
2252         return;
2253 }
2254
2255 /*
2256  * Name: qla_init_xmt_cntxt
2257  * Function: Creates the Transmit Context.
2258  */
2259 static int
2260 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2261 {
2262         device_t                dev;
2263         qla_hw_t                *hw = &ha->hw;
2264         q80_rq_tx_cntxt_t       *tcntxt;
2265         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2266         uint32_t                err;
2267         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2268
2269         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2270
2271         dev = ha->pci_dev;
2272
2273         /*
2274          * Create Transmit Context
2275          */
2276         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2277         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2278
2279         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2280         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2281         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2282
2283         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2284
2285         tcntxt->ntx_rings = 1;
2286
2287         tcntxt->tx_ring[0].paddr =
2288                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2289         tcntxt->tx_ring[0].tx_consumer =
2290                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2291         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2292
2293         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2294         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2295
2296
2297         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2298         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2299
2300         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2301                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2302                 ha->hw.mbox,
2303                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2304                 device_printf(dev, "%s: failed0\n", __func__);
2305                 return (-1);
2306         }
2307         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2308
2309         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2310
2311         if (err) {
2312                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2313                 return -1;
2314         }
2315
2316         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2317         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2318
2319         return (0);
2320 }
2321
2322
2323 /*
2324  * Name: qla_del_xmt_cntxt
2325  * Function: Destroys the Transmit Context.
2326  */
2327 static int
2328 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2329 {
2330         device_t                        dev = ha->pci_dev;
2331         q80_tx_cntxt_destroy_t          *tcntxt;
2332         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2333         uint32_t                        err;
2334
2335         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2336         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2337
2338         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2339         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2340         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2341
2342         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2343
2344         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2345                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2346                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2347                 device_printf(dev, "%s: failed0\n", __func__);
2348                 return (-1);
2349         }
2350         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2351
2352         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2353
2354         if (err) {
2355                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2356                 return (-1);
2357         }
2358
2359         return (0);
2360 }
2361 static void
2362 qla_del_xmt_cntxt(qla_host_t *ha)
2363 {
2364         uint32_t i;
2365
2366         if (!ha->hw.flags.init_tx_cnxt)
2367                 return;
2368
2369         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2370                 if (qla_del_xmt_cntxt_i(ha, i))
2371                         break;
2372         }
2373         ha->hw.flags.init_tx_cnxt = 0;
2374 }
2375
2376 static int
2377 qla_init_xmt_cntxt(qla_host_t *ha)
2378 {
2379         uint32_t i, j;
2380
2381         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2382                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2383                         for (j = 0; j < i; j++)
2384                                 qla_del_xmt_cntxt_i(ha, j);
2385                         return (-1);
2386                 }
2387         }
2388         ha->hw.flags.init_tx_cnxt = 1;
2389         return (0);
2390 }
2391
2392 static int
2393 qla_hw_add_all_mcast(qla_host_t *ha)
2394 {
2395         int i, nmcast;
2396
2397         nmcast = ha->hw.nmcast;
2398
2399         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2400                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2401                         (ha->hw.mcast[i].addr[1] != 0) ||
2402                         (ha->hw.mcast[i].addr[2] != 0) ||
2403                         (ha->hw.mcast[i].addr[3] != 0) ||
2404                         (ha->hw.mcast[i].addr[4] != 0) ||
2405                         (ha->hw.mcast[i].addr[5] != 0)) {
2406
2407                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2408                                 device_printf(ha->pci_dev, "%s: failed\n",
2409                                         __func__);
2410                                 return (-1);
2411                         }
2412
2413                         nmcast--;
2414                 }
2415         }
2416         return 0;
2417 }
2418
2419 static int
2420 qla_hw_del_all_mcast(qla_host_t *ha)
2421 {
2422         int i, nmcast;
2423
2424         nmcast = ha->hw.nmcast;
2425
2426         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2427                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2428                         (ha->hw.mcast[i].addr[1] != 0) ||
2429                         (ha->hw.mcast[i].addr[2] != 0) ||
2430                         (ha->hw.mcast[i].addr[3] != 0) ||
2431                         (ha->hw.mcast[i].addr[4] != 0) ||
2432                         (ha->hw.mcast[i].addr[5] != 0)) {
2433
2434                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2435                                 return (-1);
2436
2437                         nmcast--;
2438                 }
2439         }
2440         return 0;
2441 }
2442
2443 static int
2444 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2445 {
2446         int i;
2447
2448         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2449
2450                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2451                         return 0; /* its been already added */
2452         }
2453
2454         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2455
2456                 if ((ha->hw.mcast[i].addr[0] == 0) && 
2457                         (ha->hw.mcast[i].addr[1] == 0) &&
2458                         (ha->hw.mcast[i].addr[2] == 0) &&
2459                         (ha->hw.mcast[i].addr[3] == 0) &&
2460                         (ha->hw.mcast[i].addr[4] == 0) &&
2461                         (ha->hw.mcast[i].addr[5] == 0)) {
2462
2463                         if (qla_config_mac_addr(ha, mta, 1))
2464                                 return (-1);
2465
2466                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2467                         ha->hw.nmcast++;        
2468
2469                         return 0;
2470                 }
2471         }
2472         return 0;
2473 }
2474
2475 static int
2476 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2477 {
2478         int i;
2479
2480         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2481                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2482
2483                         if (qla_config_mac_addr(ha, mta, 0))
2484                                 return (-1);
2485
2486                         ha->hw.mcast[i].addr[0] = 0;
2487                         ha->hw.mcast[i].addr[1] = 0;
2488                         ha->hw.mcast[i].addr[2] = 0;
2489                         ha->hw.mcast[i].addr[3] = 0;
2490                         ha->hw.mcast[i].addr[4] = 0;
2491                         ha->hw.mcast[i].addr[5] = 0;
2492
2493                         ha->hw.nmcast--;        
2494
2495                         return 0;
2496                 }
2497         }
2498         return 0;
2499 }
2500
2501 /*
2502  * Name: ql_hw_set_multi
2503  * Function: Sets the Multicast Addresses provided the host O.S into the
2504  *      hardware (for the given interface)
2505  */
2506 int
2507 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2508         uint32_t add_mac)
2509 {
2510         int i;
2511         uint8_t *mta = mcast;
2512         int ret = 0;
2513
2514         for (i = 0; i < mcnt; i++) {
2515                 if (add_mac) {
2516                         ret = qla_hw_add_mcast(ha, mta);
2517                         if (ret)
2518                                 break;
2519                 } else {
2520                         ret = qla_hw_del_mcast(ha, mta);
2521                         if (ret)
2522                                 break;
2523                 }
2524                         
2525                 mta += Q8_MAC_ADDR_LEN;
2526         }
2527         return (ret);
2528 }
2529
2530 /*
2531  * Name: qla_hw_tx_done_locked
2532  * Function: Handle Transmit Completions
2533  */
2534 static void
2535 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2536 {
2537         qla_tx_buf_t *txb;
2538         qla_hw_t *hw = &ha->hw;
2539         uint32_t comp_idx, comp_count = 0;
2540         qla_hw_tx_cntxt_t *hw_tx_cntxt;
2541
2542         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2543
2544         /* retrieve index of last entry in tx ring completed */
2545         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2546
2547         while (comp_idx != hw_tx_cntxt->txr_comp) {
2548
2549                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2550
2551                 hw_tx_cntxt->txr_comp++;
2552                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2553                         hw_tx_cntxt->txr_comp = 0;
2554
2555                 comp_count++;
2556
2557                 if (txb->m_head) {
2558                         ha->ifp->if_opackets++;
2559
2560                         bus_dmamap_sync(ha->tx_tag, txb->map,
2561                                 BUS_DMASYNC_POSTWRITE);
2562                         bus_dmamap_unload(ha->tx_tag, txb->map);
2563                         m_freem(txb->m_head);
2564
2565                         txb->m_head = NULL;
2566                 }
2567         }
2568
2569         hw_tx_cntxt->txr_free += comp_count;
2570         return;
2571 }
2572
2573 /*
2574  * Name: ql_hw_tx_done
2575  * Function: Handle Transmit Completions
2576  */
2577 void
2578 ql_hw_tx_done(qla_host_t *ha)
2579 {
2580         int i;
2581         uint32_t flag = 0;
2582
2583         if (!mtx_trylock(&ha->tx_lock)) {
2584                 QL_DPRINT8(ha, (ha->pci_dev,
2585                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2586                 return;
2587         }
2588         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2589                 qla_hw_tx_done_locked(ha, i);
2590                 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2591                         flag = 1;
2592         }
2593
2594         if (!flag)
2595                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2596
2597         QLA_TX_UNLOCK(ha);
2598         return;
2599 }
2600
2601 void
2602 ql_update_link_state(qla_host_t *ha)
2603 {
2604         uint32_t link_state;
2605         uint32_t prev_link_state;
2606
2607         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2608                 ha->hw.link_up = 0;
2609                 return;
2610         }
2611         link_state = READ_REG32(ha, Q8_LINK_STATE);
2612
2613         prev_link_state =  ha->hw.link_up;
2614
2615         if (ha->pci_func == 0) 
2616                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
2617         else
2618                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
2619
2620         if (prev_link_state !=  ha->hw.link_up) {
2621                 if (ha->hw.link_up) {
2622                         if_link_state_change(ha->ifp, LINK_STATE_UP);
2623                 } else {
2624                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
2625                 }
2626         }
2627         return;
2628 }
2629
2630 void
2631 ql_hw_stop_rcv(qla_host_t *ha)
2632 {
2633         int i, done, count = 100;
2634
2635         while (count--) {
2636                 done = 1;
2637                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2638                         if (ha->hw.sds[i].rcv_active)
2639                                 done = 0;
2640                 }
2641                 if (done)
2642                         break;
2643                 else 
2644                         qla_mdelay(__func__, 10);
2645         }
2646         if (!count)
2647                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
2648
2649         return;
2650 }
2651
2652 int
2653 ql_hw_check_health(qla_host_t *ha)
2654 {
2655         uint32_t val;
2656
2657         ha->hw.health_count++;
2658
2659         if (ha->hw.health_count < 1000)
2660                 return 0;
2661
2662         ha->hw.health_count = 0;
2663
2664         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
2665
2666         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
2667                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
2668                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
2669                         __func__, val);
2670                 return -1;
2671         }
2672
2673         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
2674
2675         if ((val != ha->hw.hbeat_value) &&
2676                 (!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) {
2677                 ha->hw.hbeat_value = val;
2678                 return 0;
2679         }
2680         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
2681                 __func__, val);
2682
2683         return -1;
2684 }
2685
2686 static int
2687 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
2688 {
2689         uint32_t                        err;
2690         device_t                        dev = ha->pci_dev;
2691         q80_config_md_templ_size_t      *md_size;
2692         q80_config_md_templ_size_rsp_t  *md_size_rsp;
2693
2694         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
2695         bzero(md_size, sizeof(q80_config_md_templ_size_t));
2696
2697         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
2698         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
2699         md_size->count_version |= Q8_MBX_CMD_VERSION;
2700
2701         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
2702                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
2703                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
2704
2705                 device_printf(dev, "%s: failed\n", __func__);
2706
2707                 return (-1);
2708         }
2709
2710         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
2711
2712         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
2713
2714         if (err) {
2715                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2716                 return(-1);
2717         }
2718
2719         *size = md_size_rsp->templ_size;
2720
2721         return (0);
2722 }
2723
2724 static int
2725 qla_get_minidump_template(qla_host_t *ha)
2726 {
2727         uint32_t                        err;
2728         device_t                        dev = ha->pci_dev;
2729         q80_config_md_templ_cmd_t       *md_templ;
2730         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
2731
2732         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
2733         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
2734
2735         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
2736         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
2737         md_templ->count_version |= Q8_MBX_CMD_VERSION;
2738
2739         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
2740         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
2741
2742         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
2743                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
2744                  ha->hw.mbox,
2745                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
2746
2747                 device_printf(dev, "%s: failed\n", __func__);
2748
2749                 return (-1);
2750         }
2751
2752         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
2753
2754         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
2755
2756         if (err) {
2757                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2758                 return (-1);
2759         }
2760
2761         return (0);
2762
2763 }
2764
2765 static int
2766 qla_minidump_init(qla_host_t *ha)
2767 {
2768         int             ret;
2769         uint32_t        template_size = 0;
2770         device_t        dev = ha->pci_dev;
2771
2772         /*
2773          * Get Minidump Template Size
2774          */
2775         ret = qla_get_minidump_tmplt_size(ha, &template_size);
2776
2777         if (ret || (template_size == 0)) {
2778                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
2779                         template_size);
2780                 return (-1);
2781         }
2782
2783         /*
2784          * Allocate Memory for Minidump Template
2785          */
2786
2787         ha->hw.dma_buf.minidump.alignment = 8;
2788         ha->hw.dma_buf.minidump.size = template_size;
2789
2790         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
2791
2792                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
2793
2794                 return (-1);
2795         }
2796         ha->hw.dma_buf.flags.minidump = 1;
2797
2798         /*
2799          * Retrieve Minidump Template
2800          */
2801         ret = qla_get_minidump_template(ha);
2802
2803         if (ret) {
2804                 qla_minidump_free(ha);
2805         } else {
2806                 ha->hw.mdump_init = 1;
2807         }
2808
2809         return (ret);
2810 }
2811
2812
2813 static void
2814 qla_minidump_free(qla_host_t *ha)
2815 {
2816         ha->hw.mdump_init = 0;
2817         if (ha->hw.dma_buf.flags.minidump) {
2818                 ha->hw.dma_buf.flags.minidump = 0;
2819                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
2820         }
2821         return;
2822 }
2823
2824 void
2825 ql_minidump(qla_host_t *ha)
2826 {
2827         uint32_t delay = 6000;
2828
2829         if (!ha->hw.mdump_init)
2830                 return;
2831
2832         if (!ha->hw.mdump_active)
2833                 return;
2834
2835         if (ha->hw.mdump_active == 1) {
2836                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
2837                 ha->hw.mdump_start = 1;
2838         }
2839
2840         while (delay-- && ha->hw.mdump_active) {
2841                 qla_mdelay(__func__, 100);
2842         }
2843         ha->hw.mdump_start = 0;
2844         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
2845
2846         return;
2847 }