]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r394247
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58         uint32_t num_intrs, uint32_t create);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61         int tenable, int rcv);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66                 uint8_t *hdr);
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static void qla_get_quick_stats(qla_host_t *ha);
77
78 static void ql_minidump_free(qla_host_t *ha);
79
80
81 static int
82 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
83 {
84         int err = 0, ret;
85         qla_host_t *ha;
86         uint32_t i;
87
88         err = sysctl_handle_int(oidp, &ret, 0, req);
89
90         if (err || !req->newptr)
91                 return (err);
92
93         if (ret == 1) {
94
95                 ha = (qla_host_t *)arg1;
96
97                 for (i = 0; i < ha->hw.num_sds_rings; i++) 
98                         device_printf(ha->pci_dev,
99                                 "%s: sds_ring[%d] = %p\n", __func__,i,
100                                 (void *)ha->hw.sds[i].intr_count);
101
102                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
103                         device_printf(ha->pci_dev,
104                                 "%s: tx[%d] = %p\n", __func__,i,
105                                 (void *)ha->tx_ring[i].count);
106
107                 for (i = 0; i < ha->hw.num_rds_rings; i++)
108                         device_printf(ha->pci_dev,
109                                 "%s: rds_ring[%d] = %p\n", __func__,i,
110                                 (void *)ha->hw.rds[i].count);
111
112                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113                         (void *)ha->lro_pkt_count);
114
115                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116                         (void *)ha->lro_bytes);
117
118 #ifdef QL_ENABLE_ISCSI_TLV
119                 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120                         (void *)ha->hw.iscsi_pkt_count);
121 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
122
123         }
124         return (err);
125 }
126
127 static int
128 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
129 {
130         int err, ret = 0;
131         qla_host_t *ha;
132
133         err = sysctl_handle_int(oidp, &ret, 0, req);
134
135         if (err || !req->newptr)
136                 return (err);
137
138         if (ret == 1) {
139                 ha = (qla_host_t *)arg1;
140                 qla_get_quick_stats(ha);
141         }
142         return (err);
143 }
144
145 #ifdef QL_DBG
146
147 static void
148 qla_stop_pegs(qla_host_t *ha)
149 {
150         uint32_t val = 1;
151
152         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
158 }
159
160 static int
161 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
162 {
163         int err, ret = 0;
164         qla_host_t *ha;
165         
166         err = sysctl_handle_int(oidp, &ret, 0, req);
167
168
169         if (err || !req->newptr)
170                 return (err);
171
172         if (ret == 1) {
173                 ha = (qla_host_t *)arg1;
174                 (void)QLA_LOCK(ha, __func__, 0);
175                 qla_stop_pegs(ha);      
176                 QLA_UNLOCK(ha, __func__);
177         }
178
179         return err;
180 }
181 #endif /* #ifdef QL_DBG */
182
183 static int
184 qla_validate_set_port_cfg_bit(uint32_t bits)
185 {
186         if ((bits & 0xF) > 1)
187                 return (-1);
188
189         if (((bits >> 4) & 0xF) > 2)
190                 return (-1);
191
192         if (((bits >> 8) & 0xF) > 2)
193                 return (-1);
194
195         return (0);
196 }
197
198 static int
199 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
200 {
201         int err, ret = 0;
202         qla_host_t *ha;
203         uint32_t cfg_bits;
204
205         err = sysctl_handle_int(oidp, &ret, 0, req);
206
207         if (err || !req->newptr)
208                 return (err);
209
210         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
211
212                 ha = (qla_host_t *)arg1;
213
214                 err = qla_get_port_config(ha, &cfg_bits);
215
216                 if (err)
217                         goto qla_sysctl_set_port_cfg_exit;
218
219                 if (ret & 0x1) {
220                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
221                 } else {
222                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
223                 }
224
225                 ret = ret >> 4;
226                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
227
228                 if ((ret & 0xF) == 0) {
229                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230                 } else if ((ret & 0xF) == 1){
231                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
232                 } else {
233                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
234                 }
235
236                 ret = ret >> 4;
237                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
238
239                 if (ret == 0) {
240                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241                 } else if (ret == 1){
242                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
243                 } else {
244                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
245                 }
246
247                 err = qla_set_port_config(ha, cfg_bits);
248         } else {
249                 ha = (qla_host_t *)arg1;
250
251                 err = qla_get_port_config(ha, &cfg_bits);
252         }
253
254 qla_sysctl_set_port_cfg_exit:
255         return err;
256 }
257
258 /*
259  * Name: ql_hw_add_sysctls
260  * Function: Add P3Plus specific sysctls
261  */
262 void
263 ql_hw_add_sysctls(qla_host_t *ha)
264 {
265         device_t        dev;
266
267         dev = ha->pci_dev;
268
269         ha->hw.num_sds_rings = MAX_SDS_RINGS;
270         ha->hw.num_rds_rings = MAX_RDS_RINGS;
271         ha->hw.num_tx_rings = NUM_TX_RINGS;
272
273         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
277
278         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
282
283         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286                 ha->hw.num_tx_rings, "Number of Transmit Rings");
287
288         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291                 ha->txr_idx, "Tx Ring Used");
292
293         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
296                 (void *)ha, 0,
297                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
298
299         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
302                 (void *)ha, 0,
303                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
304
305         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
309
310         ha->hw.sds_cidx_thres = 32;
311         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314                 ha->hw.sds_cidx_thres,
315                 "Number of SDS entries to process before updating"
316                 " SDS Ring Consumer Index");
317
318         ha->hw.rds_pidx_thres = 32;
319         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322                 ha->hw.rds_pidx_thres,
323                 "Number of Rcv Rings Entries to post before updating"
324                 " RDS Ring Producer Index");
325
326         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330                 &ha->hw.rcv_intr_coalesce,
331                 ha->hw.rcv_intr_coalesce,
332                 "Rcv Intr Coalescing Parameters\n"
333                 "\tbits 15:0 max packets\n"
334                 "\tbits 31:16 max micro-seconds to wait\n"
335                 "\tplease run\n"
336                 "\tifconfig <if> down && ifconfig <if> up\n"
337                 "\tto take effect \n");
338
339         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343                 &ha->hw.xmt_intr_coalesce,
344                 ha->hw.xmt_intr_coalesce,
345                 "Xmt Intr Coalescing Parameters\n"
346                 "\tbits 15:0 max packets\n"
347                 "\tbits 31:16 max micro-seconds to wait\n"
348                 "\tplease run\n"
349                 "\tifconfig <if> down && ifconfig <if> up\n"
350                 "\tto take effect \n");
351
352         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
355                 (void *)ha, 0,
356                 qla_sysctl_port_cfg, "I",
357                         "Set Port Configuration if values below "
358                         "otherwise Get Port Configuration\n"
359                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362                         " 1 = xmt only; 2 = rcv only;\n"
363                 );
364
365         ha->hw.enable_9kb = 1;
366
367         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
371
372         ha->hw.mdump_active = 0;
373         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
376                 ha->hw.mdump_active,
377                 "Minidump retrieval is Active");
378
379         ha->hw.mdump_done = 0;
380         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382                 OID_AUTO, "mdump_done", CTLFLAG_RW,
383                 &ha->hw.mdump_done, ha->hw.mdump_done,
384                 "Minidump has been done and available for retrieval");
385
386         ha->hw.mdump_capture_mask = 0xF;
387         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
388                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
389                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
390                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
391                 "Minidump capture mask");
392 #ifdef QL_DBG
393
394         ha->err_inject = 0;
395         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
396                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397                 OID_AUTO, "err_inject",
398                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
399                 "Error to be injected\n"
400                 "\t\t\t 0: No Errors\n"
401                 "\t\t\t 1: rcv: rxb struct invalid\n"
402                 "\t\t\t 2: rcv: mp == NULL\n"
403                 "\t\t\t 3: lro: rxb struct invalid\n"
404                 "\t\t\t 4: lro: mp == NULL\n"
405                 "\t\t\t 5: rcv: num handles invalid\n"
406                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
407                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
408                 "\t\t\t 8: mbx: mailbox command failure\n"
409                 "\t\t\t 9: heartbeat failure\n"
410                 "\t\t\t A: temperature failure\n" );
411
412         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
413                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
414                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
415                 (void *)ha, 0,
416                 qla_sysctl_stop_pegs, "I", "Peg Stop");
417
418 #endif /* #ifdef QL_DBG */
419
420         ha->hw.user_pri_nic = 0;
421         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
422                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
423                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
424                 ha->hw.user_pri_nic,
425                 "VLAN Tag User Priority for Normal Ethernet Packets");
426
427         ha->hw.user_pri_iscsi = 4;
428         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
429                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
431                 ha->hw.user_pri_iscsi,
432                 "VLAN Tag User Priority for iSCSI Packets");
433
434 }
435
436 void
437 ql_hw_link_status(qla_host_t *ha)
438 {
439         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
440
441         if (ha->hw.link_up) {
442                 device_printf(ha->pci_dev, "link Up\n");
443         } else {
444                 device_printf(ha->pci_dev, "link Down\n");
445         }
446
447         if (ha->hw.flags.fduplex) {
448                 device_printf(ha->pci_dev, "Full Duplex\n");
449         } else {
450                 device_printf(ha->pci_dev, "Half Duplex\n");
451         }
452
453         if (ha->hw.flags.autoneg) {
454                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
455         } else {
456                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
457         }
458
459         switch (ha->hw.link_speed) {
460         case 0x710:
461                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
462                 break;
463
464         case 0x3E8:
465                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
466                 break;
467
468         case 0x64:
469                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
470                 break;
471
472         default:
473                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
474                 break;
475         }
476
477         switch (ha->hw.module_type) {
478
479         case 0x01:
480                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
481                 break;
482
483         case 0x02:
484                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
485                 break;
486
487         case 0x03:
488                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
489                 break;
490
491         case 0x04:
492                 device_printf(ha->pci_dev,
493                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
494                         ha->hw.cable_length);
495                 break;
496
497         case 0x05:
498                 device_printf(ha->pci_dev, "Module Type 10GE Active"
499                         " Limiting Copper(Compliant)[%d m]\n",
500                         ha->hw.cable_length);
501                 break;
502
503         case 0x06:
504                 device_printf(ha->pci_dev,
505                         "Module Type 10GE Passive Copper"
506                         " (Legacy, Best Effort)[%d m]\n",
507                         ha->hw.cable_length);
508                 break;
509
510         case 0x07:
511                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
512                 break;
513
514         case 0x08:
515                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
516                 break;
517
518         case 0x09:
519                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
520                 break;
521
522         case 0x0A:
523                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
524                 break;
525
526         case 0x0B:
527                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
528                         "(Legacy, Best Effort)\n");
529                 break;
530
531         default:
532                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
533                         ha->hw.module_type);
534                 break;
535         }
536
537         if (ha->hw.link_faults == 1)
538                 device_printf(ha->pci_dev, "SFP Power Fault\n");
539 }
540
541 /*
542  * Name: ql_free_dma
543  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
544  */
545 void
546 ql_free_dma(qla_host_t *ha)
547 {
548         uint32_t i;
549
550         if (ha->hw.dma_buf.flags.sds_ring) {
551                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
552                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
553                 }
554                 ha->hw.dma_buf.flags.sds_ring = 0;
555         }
556
557         if (ha->hw.dma_buf.flags.rds_ring) {
558                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
559                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
560                 }
561                 ha->hw.dma_buf.flags.rds_ring = 0;
562         }
563
564         if (ha->hw.dma_buf.flags.tx_ring) {
565                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
566                 ha->hw.dma_buf.flags.tx_ring = 0;
567         }
568         ql_minidump_free(ha);
569 }
570
571 /*
572  * Name: ql_alloc_dma
573  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
574  */
575 int
576 ql_alloc_dma(qla_host_t *ha)
577 {
578         device_t                dev;
579         uint32_t                i, j, size, tx_ring_size;
580         qla_hw_t                *hw;
581         qla_hw_tx_cntxt_t       *tx_cntxt;
582         uint8_t                 *vaddr;
583         bus_addr_t              paddr;
584
585         dev = ha->pci_dev;
586
587         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
588
589         hw = &ha->hw;
590         /*
591          * Allocate Transmit Ring
592          */
593         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
594         size = (tx_ring_size * ha->hw.num_tx_rings);
595
596         hw->dma_buf.tx_ring.alignment = 8;
597         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
598         
599         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
600                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
601                 goto ql_alloc_dma_exit;
602         }
603
604         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
605         paddr = hw->dma_buf.tx_ring.dma_addr;
606         
607         for (i = 0; i < ha->hw.num_tx_rings; i++) {
608                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
609
610                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
611                 tx_cntxt->tx_ring_paddr = paddr;
612
613                 vaddr += tx_ring_size;
614                 paddr += tx_ring_size;
615         }
616
617         for (i = 0; i < ha->hw.num_tx_rings; i++) {
618                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
619
620                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
621                 tx_cntxt->tx_cons_paddr = paddr;
622
623                 vaddr += sizeof (uint32_t);
624                 paddr += sizeof (uint32_t);
625         }
626
627         ha->hw.dma_buf.flags.tx_ring = 1;
628
629         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
630                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
631                 hw->dma_buf.tx_ring.dma_b));
632         /*
633          * Allocate Receive Descriptor Rings
634          */
635
636         for (i = 0; i < hw->num_rds_rings; i++) {
637
638                 hw->dma_buf.rds_ring[i].alignment = 8;
639                 hw->dma_buf.rds_ring[i].size =
640                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
641
642                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
643                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
644                                 __func__, i);
645
646                         for (j = 0; j < i; j++)
647                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
648
649                         goto ql_alloc_dma_exit;
650                 }
651                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
652                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
653                         hw->dma_buf.rds_ring[i].dma_b));
654         }
655
656         hw->dma_buf.flags.rds_ring = 1;
657
658         /*
659          * Allocate Status Descriptor Rings
660          */
661
662         for (i = 0; i < hw->num_sds_rings; i++) {
663                 hw->dma_buf.sds_ring[i].alignment = 8;
664                 hw->dma_buf.sds_ring[i].size =
665                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
666
667                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
668                         device_printf(dev, "%s: sds ring alloc failed\n",
669                                 __func__);
670
671                         for (j = 0; j < i; j++)
672                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
673
674                         goto ql_alloc_dma_exit;
675                 }
676                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
677                         __func__, i,
678                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
679                         hw->dma_buf.sds_ring[i].dma_b));
680         }
681         for (i = 0; i < hw->num_sds_rings; i++) {
682                 hw->sds[i].sds_ring_base =
683                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
684         }
685
686         hw->dma_buf.flags.sds_ring = 1;
687
688         return 0;
689
690 ql_alloc_dma_exit:
691         ql_free_dma(ha);
692         return -1;
693 }
694
695 #define Q8_MBX_MSEC_DELAY       5000
696
697 static int
698 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
699         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
700 {
701         uint32_t i;
702         uint32_t data;
703         int ret = 0;
704
705         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
706                 ret = -3;
707                 ha->qla_initiate_recovery = 1;
708                 goto exit_qla_mbx_cmd;
709         }
710
711         if (no_pause)
712                 i = 1000;
713         else
714                 i = Q8_MBX_MSEC_DELAY;
715
716         while (i) {
717                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
718                 if (data == 0)
719                         break;
720                 if (no_pause) {
721                         DELAY(1000);
722                 } else {
723                         qla_mdelay(__func__, 1);
724                 }
725                 i--;
726         }
727
728         if (i == 0) {
729                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
730                         __func__, data);
731                 ret = -1;
732                 ha->qla_initiate_recovery = 1;
733                 goto exit_qla_mbx_cmd;
734         }
735
736         for (i = 0; i < n_hmbox; i++) {
737                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
738                 h_mbox++;
739         }
740
741         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
742
743
744         i = Q8_MBX_MSEC_DELAY;
745         while (i) {
746                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
747
748                 if ((data & 0x3) == 1) {
749                         data = READ_REG32(ha, Q8_FW_MBOX0);
750                         if ((data & 0xF000) != 0x8000)
751                                 break;
752                 }
753                 if (no_pause) {
754                         DELAY(1000);
755                 } else {
756                         qla_mdelay(__func__, 1);
757                 }
758                 i--;
759         }
760         if (i == 0) {
761                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
762                         __func__, data);
763                 ret = -2;
764                 ha->qla_initiate_recovery = 1;
765                 goto exit_qla_mbx_cmd;
766         }
767
768         for (i = 0; i < n_fwmbox; i++) {
769                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
770         }
771
772         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
773         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
774
775 exit_qla_mbx_cmd:
776         return (ret);
777 }
778
779 int
780 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
781         uint32_t *num_rcvq)
782 {
783         uint32_t *mbox, err;
784         device_t dev = ha->pci_dev;
785
786         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
787
788         mbox = ha->hw.mbox;
789
790         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
791
792         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
793                 device_printf(dev, "%s: failed0\n", __func__);
794                 return (-1);
795         }
796         err = mbox[0] >> 25; 
797
798         if (supports_9kb != NULL) {
799                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
800                         *supports_9kb = 1;
801                 else
802                         *supports_9kb = 0;
803         }
804
805         if (num_rcvq != NULL)
806                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
807
808         if ((err != 1) && (err != 0)) {
809                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
810                 return (-1);
811         }
812         return 0;
813 }
814
815 static int
816 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
817         uint32_t create)
818 {
819         uint32_t i, err;
820         device_t dev = ha->pci_dev;
821         q80_config_intr_t *c_intr;
822         q80_config_intr_rsp_t *c_intr_rsp;
823
824         c_intr = (q80_config_intr_t *)ha->hw.mbox;
825         bzero(c_intr, (sizeof (q80_config_intr_t)));
826
827         c_intr->opcode = Q8_MBX_CONFIG_INTR;
828
829         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
830         c_intr->count_version |= Q8_MBX_CMD_VERSION;
831
832         c_intr->nentries = num_intrs;
833
834         for (i = 0; i < num_intrs; i++) {
835                 if (create) {
836                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
837                         c_intr->intr[i].msix_index = start_idx + 1 + i;
838                 } else {
839                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
840                         c_intr->intr[i].msix_index =
841                                 ha->hw.intr_id[(start_idx + i)];
842                 }
843
844                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
845         }
846
847         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
848                 (sizeof (q80_config_intr_t) >> 2),
849                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
850                 device_printf(dev, "%s: failed0\n", __func__);
851                 return (-1);
852         }
853
854         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
855
856         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
857
858         if (err) {
859                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
860                         c_intr_rsp->nentries);
861
862                 for (i = 0; i < c_intr_rsp->nentries; i++) {
863                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
864                                 __func__, i, 
865                                 c_intr_rsp->intr[i].status,
866                                 c_intr_rsp->intr[i].intr_id,
867                                 c_intr_rsp->intr[i].intr_src);
868                 }
869
870                 return (-1);
871         }
872
873         for (i = 0; ((i < num_intrs) && create); i++) {
874                 if (!c_intr_rsp->intr[i].status) {
875                         ha->hw.intr_id[(start_idx + i)] =
876                                 c_intr_rsp->intr[i].intr_id;
877                         ha->hw.intr_src[(start_idx + i)] =
878                                 c_intr_rsp->intr[i].intr_src;
879                 }
880         }
881
882         return (0);
883 }
884
885 /*
886  * Name: qla_config_rss
887  * Function: Configure RSS for the context/interface.
888  */
889 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
890                         0x8030f20c77cb2da3ULL,
891                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
892                         0x255b0ec26d5a56daULL };
893
894 static int
895 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
896 {
897         q80_config_rss_t        *c_rss;
898         q80_config_rss_rsp_t    *c_rss_rsp;
899         uint32_t                err, i;
900         device_t                dev = ha->pci_dev;
901
902         c_rss = (q80_config_rss_t *)ha->hw.mbox;
903         bzero(c_rss, (sizeof (q80_config_rss_t)));
904
905         c_rss->opcode = Q8_MBX_CONFIG_RSS;
906
907         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
908         c_rss->count_version |= Q8_MBX_CMD_VERSION;
909
910         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
911                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
912         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
913         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
914
915         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
916         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
917
918         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
919
920         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
921         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
922
923         c_rss->cntxt_id = cntxt_id;
924
925         for (i = 0; i < 5; i++) {
926                 c_rss->rss_key[i] = rss_key[i];
927         }
928
929         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
930                 (sizeof (q80_config_rss_t) >> 2),
931                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
932                 device_printf(dev, "%s: failed0\n", __func__);
933                 return (-1);
934         }
935         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
936
937         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
938
939         if (err) {
940                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
941                 return (-1);
942         }
943         return 0;
944 }
945
946 static int
947 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
948         uint16_t cntxt_id, uint8_t *ind_table)
949 {
950         q80_config_rss_ind_table_t      *c_rss_ind;
951         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
952         uint32_t                        err;
953         device_t                        dev = ha->pci_dev;
954
955         if ((count > Q8_RSS_IND_TBL_SIZE) ||
956                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
957                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
958                         start_idx, count);
959                 return (-1);
960         }
961
962         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
963         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
964
965         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
966         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
967         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
968
969         c_rss_ind->start_idx = start_idx;
970         c_rss_ind->end_idx = start_idx + count - 1;
971         c_rss_ind->cntxt_id = cntxt_id;
972         bcopy(ind_table, c_rss_ind->ind_table, count);
973
974         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
975                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
976                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
977                 device_printf(dev, "%s: failed0\n", __func__);
978                 return (-1);
979         }
980
981         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
982         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
983
984         if (err) {
985                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
986                 return (-1);
987         }
988         return 0;
989 }
990
991 /*
992  * Name: qla_config_intr_coalesce
993  * Function: Configure Interrupt Coalescing.
994  */
995 static int
996 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
997         int rcv)
998 {
999         q80_config_intr_coalesc_t       *intrc;
1000         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1001         uint32_t                        err, i;
1002         device_t                        dev = ha->pci_dev;
1003         
1004         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1005         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1006
1007         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1008         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1009         intrc->count_version |= Q8_MBX_CMD_VERSION;
1010
1011         if (rcv) {
1012                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1013                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1014                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1015         } else {
1016                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1017                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1018                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1019         }
1020
1021         intrc->cntxt_id = cntxt_id;
1022
1023         if (tenable) {
1024                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1025                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1026
1027                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1028                         intrc->sds_ring_mask |= (1 << i);
1029                 }
1030                 intrc->ms_timeout = 1000;
1031         }
1032
1033         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1034                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1035                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1036                 device_printf(dev, "%s: failed0\n", __func__);
1037                 return (-1);
1038         }
1039         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1040
1041         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1042
1043         if (err) {
1044                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1045                 return (-1);
1046         }
1047         
1048         return 0;
1049 }
1050
1051
1052 /*
1053  * Name: qla_config_mac_addr
1054  * Function: binds a MAC address to the context/interface.
1055  *      Can be unicast, multicast or broadcast.
1056  */
1057 static int
1058 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1059 {
1060         q80_config_mac_addr_t           *cmac;
1061         q80_config_mac_addr_rsp_t       *cmac_rsp;
1062         uint32_t                        err;
1063         device_t                        dev = ha->pci_dev;
1064
1065         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1066         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1067
1068         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1069         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1070         cmac->count_version |= Q8_MBX_CMD_VERSION;
1071
1072         if (add_mac) 
1073                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1074         else
1075                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1076                 
1077         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1078
1079         cmac->nmac_entries = 1;
1080         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1081         bcopy(mac_addr, cmac->mac_addr[0].addr, 6); 
1082
1083         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1084                 (sizeof (q80_config_mac_addr_t) >> 2),
1085                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1086                 device_printf(dev, "%s: %s failed0\n", __func__,
1087                         (add_mac ? "Add" : "Del"));
1088                 return (-1);
1089         }
1090         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1091
1092         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1093
1094         if (err) {
1095                 device_printf(dev, "%s: %s "
1096                         "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1097                         __func__, (add_mac ? "Add" : "Del"),
1098                         mac_addr[0], mac_addr[1], mac_addr[2],
1099                         mac_addr[3], mac_addr[4], mac_addr[5], err);
1100                 return (-1);
1101         }
1102         
1103         return 0;
1104 }
1105
1106
1107 /*
1108  * Name: qla_set_mac_rcv_mode
1109  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1110  */
1111 static int
1112 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1113 {
1114         q80_config_mac_rcv_mode_t       *rcv_mode;
1115         uint32_t                        err;
1116         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1117         device_t                        dev = ha->pci_dev;
1118
1119         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1120         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1121
1122         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1123         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1124         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1125
1126         rcv_mode->mode = mode;
1127
1128         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1129
1130         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1131                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1132                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1133                 device_printf(dev, "%s: failed0\n", __func__);
1134                 return (-1);
1135         }
1136         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1137
1138         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1139
1140         if (err) {
1141                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1142                 return (-1);
1143         }
1144         
1145         return 0;
1146 }
1147
1148 int
1149 ql_set_promisc(qla_host_t *ha)
1150 {
1151         int ret;
1152
1153         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1154         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1155         return (ret);
1156 }
1157
1158 void
1159 qla_reset_promisc(qla_host_t *ha)
1160 {
1161         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1162         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1163 }
1164
1165 int
1166 ql_set_allmulti(qla_host_t *ha)
1167 {
1168         int ret;
1169
1170         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1171         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1172         return (ret);
1173 }
1174
1175 void
1176 qla_reset_allmulti(qla_host_t *ha)
1177 {
1178         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1179         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1180 }
1181
1182 /*
1183  * Name: ql_set_max_mtu
1184  * Function:
1185  *      Sets the maximum transfer unit size for the specified rcv context.
1186  */
1187 int
1188 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1189 {
1190         device_t                dev;
1191         q80_set_max_mtu_t       *max_mtu;
1192         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1193         uint32_t                err;
1194
1195         dev = ha->pci_dev;
1196
1197         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1198         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1199
1200         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1201         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1202         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1203
1204         max_mtu->cntxt_id = cntxt_id;
1205         max_mtu->mtu = mtu;
1206
1207         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1208                 (sizeof (q80_set_max_mtu_t) >> 2),
1209                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1210                 device_printf(dev, "%s: failed\n", __func__);
1211                 return -1;
1212         }
1213
1214         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1215
1216         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1217
1218         if (err) {
1219                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1220         }
1221
1222         return 0;
1223 }
1224
1225 static int
1226 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1227 {
1228         device_t                dev;
1229         q80_link_event_t        *lnk;
1230         q80_link_event_rsp_t    *lnk_rsp;
1231         uint32_t                err;
1232
1233         dev = ha->pci_dev;
1234
1235         lnk = (q80_link_event_t *)ha->hw.mbox;
1236         bzero(lnk, (sizeof (q80_link_event_t)));
1237
1238         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1239         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1240         lnk->count_version |= Q8_MBX_CMD_VERSION;
1241
1242         lnk->cntxt_id = cntxt_id;
1243         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1244
1245         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1246                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1247                 device_printf(dev, "%s: failed\n", __func__);
1248                 return -1;
1249         }
1250
1251         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1252
1253         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1254
1255         if (err) {
1256                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1257         }
1258
1259         return 0;
1260 }
1261
1262 static int
1263 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1264 {
1265         device_t                dev;
1266         q80_config_fw_lro_t     *fw_lro;
1267         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1268         uint32_t                err;
1269
1270         dev = ha->pci_dev;
1271
1272         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1273         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1274
1275         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1276         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1277         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1278
1279         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1280         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1281
1282         fw_lro->cntxt_id = cntxt_id;
1283
1284         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1285                 (sizeof (q80_config_fw_lro_t) >> 2),
1286                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1287                 device_printf(dev, "%s: failed\n", __func__);
1288                 return -1;
1289         }
1290
1291         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1292
1293         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1294
1295         if (err) {
1296                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1297         }
1298
1299         return 0;
1300 }
1301
1302 static void
1303 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1304 {
1305         device_t dev = ha->pci_dev;
1306
1307         if (i < ha->hw.num_tx_rings) {
1308                 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1309                         __func__, i, xstat->total_bytes);
1310                 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1311                         __func__, i, xstat->total_pkts);
1312                 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1313                         __func__, i, xstat->errors);
1314                 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1315                         __func__, i, xstat->pkts_dropped);
1316                 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1317                         __func__, i, xstat->switch_pkts);
1318                 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1319                         __func__, i, xstat->num_buffers);
1320         } else {
1321                 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1322                         __func__, xstat->total_bytes);
1323                 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1324                         __func__, xstat->total_pkts);
1325                 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1326                         __func__, xstat->errors);
1327                 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1328                         __func__, xstat->pkts_dropped);
1329                 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1330                         __func__, xstat->switch_pkts);
1331                 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1332                         __func__, xstat->num_buffers);
1333         }
1334 }
1335
1336 static void
1337 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1338 {
1339         device_t dev = ha->pci_dev;
1340
1341         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1342                 rstat->total_bytes);
1343         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1344                 rstat->total_pkts);
1345         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1346                 rstat->lro_pkt_count);
1347         device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1348                 rstat->sw_pkt_count);
1349         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1350                 rstat->ip_chksum_err);
1351         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1352                 rstat->pkts_wo_acntxts);
1353         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1354                 __func__, rstat->pkts_dropped_no_sds_card);
1355         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1356                 __func__, rstat->pkts_dropped_no_sds_host);
1357         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1358                 rstat->oversized_pkts);
1359         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1360                 __func__, rstat->pkts_dropped_no_rds);
1361         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1362                 __func__, rstat->unxpctd_mcast_pkts);
1363         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1364                 rstat->re1_fbq_error);
1365         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1366                 rstat->invalid_mac_addr);
1367         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1368                 rstat->rds_prime_trys);
1369         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1370                 rstat->rds_prime_success);
1371         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1372                 rstat->lro_flows_added);
1373         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1374                 rstat->lro_flows_deleted);
1375         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1376                 rstat->lro_flows_active);
1377         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1378                 __func__, rstat->pkts_droped_unknown);
1379 }
1380
1381 static void
1382 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1383 {
1384         device_t dev = ha->pci_dev;
1385
1386         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1387                 mstat->xmt_frames);
1388         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1389                 mstat->xmt_bytes);
1390         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1391                 mstat->xmt_mcast_pkts);
1392         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1393                 mstat->xmt_bcast_pkts);
1394         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1395                 mstat->xmt_pause_frames);
1396         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1397                 mstat->xmt_cntrl_pkts);
1398         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1399                 __func__, mstat->xmt_pkt_lt_64bytes);
1400         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1401                 __func__, mstat->xmt_pkt_lt_127bytes);
1402         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1403                 __func__, mstat->xmt_pkt_lt_255bytes);
1404         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1405                 __func__, mstat->xmt_pkt_lt_511bytes);
1406         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1407                 __func__, mstat->xmt_pkt_lt_1023bytes);
1408         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1409                 __func__, mstat->xmt_pkt_lt_1518bytes);
1410         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1411                 __func__, mstat->xmt_pkt_gt_1518bytes);
1412
1413         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1414                 mstat->rcv_frames);
1415         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1416                 mstat->rcv_bytes);
1417         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1418                 mstat->rcv_mcast_pkts);
1419         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1420                 mstat->rcv_bcast_pkts);
1421         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1422                 mstat->rcv_pause_frames);
1423         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1424                 mstat->rcv_cntrl_pkts);
1425         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1426                 __func__, mstat->rcv_pkt_lt_64bytes);
1427         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1428                 __func__, mstat->rcv_pkt_lt_127bytes);
1429         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1430                 __func__, mstat->rcv_pkt_lt_255bytes);
1431         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1432                 __func__, mstat->rcv_pkt_lt_511bytes);
1433         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1434                 __func__, mstat->rcv_pkt_lt_1023bytes);
1435         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1436                 __func__, mstat->rcv_pkt_lt_1518bytes);
1437         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1438                 __func__, mstat->rcv_pkt_gt_1518bytes);
1439
1440         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1441                 mstat->rcv_len_error);
1442         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1443                 mstat->rcv_len_small);
1444         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1445                 mstat->rcv_len_large);
1446         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1447                 mstat->rcv_jabber);
1448         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1449                 mstat->rcv_dropped);
1450         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1451                 mstat->fcs_error);
1452         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1453                 mstat->align_error);
1454 }
1455
1456
1457 static int
1458 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1459 {
1460         device_t                dev;
1461         q80_get_stats_t         *stat;
1462         q80_get_stats_rsp_t     *stat_rsp;
1463         uint32_t                err;
1464
1465         dev = ha->pci_dev;
1466
1467         stat = (q80_get_stats_t *)ha->hw.mbox;
1468         bzero(stat, (sizeof (q80_get_stats_t)));
1469
1470         stat->opcode = Q8_MBX_GET_STATS;
1471         stat->count_version = 2;
1472         stat->count_version |= Q8_MBX_CMD_VERSION;
1473
1474         stat->cmd = cmd;
1475
1476         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1477                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1478                 device_printf(dev, "%s: failed\n", __func__);
1479                 return -1;
1480         }
1481
1482         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1483
1484         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1485
1486         if (err) {
1487                 return -1;
1488         }
1489
1490         return 0;
1491 }
1492
1493 void
1494 ql_get_stats(qla_host_t *ha)
1495 {
1496         q80_get_stats_rsp_t     *stat_rsp;
1497         q80_mac_stats_t         *mstat;
1498         q80_xmt_stats_t         *xstat;
1499         q80_rcv_stats_t         *rstat;
1500         uint32_t                cmd;
1501         int                     i;
1502
1503         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1504         /*
1505          * Get MAC Statistics
1506          */
1507         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1508 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1509
1510         cmd |= ((ha->pci_func & 0x1) << 16);
1511
1512         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1513                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1514                 qla_mac_stats(ha, mstat);
1515         } else {
1516                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1517                         __func__, ha->hw.mbox[0]);
1518         }
1519         /*
1520          * Get RCV Statistics
1521          */
1522         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1523 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1524         cmd |= (ha->hw.rcv_cntxt_id << 16);
1525
1526         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1527                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1528                 qla_rcv_stats(ha, rstat);
1529         } else {
1530                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1531                         __func__, ha->hw.mbox[0]);
1532         }
1533         /*
1534          * Get XMT Statistics
1535          */
1536         for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1537                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1538 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
1539                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1540
1541                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1542                         == 0) {
1543                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1544                         qla_xmt_stats(ha, xstat, i);
1545                 } else {
1546                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1547                                 __func__, ha->hw.mbox[0]);
1548                 }
1549         }
1550         return;
1551 }
1552
1553 static void
1554 qla_get_quick_stats(qla_host_t *ha)
1555 {
1556         q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1557         q80_mac_stats_t         *mstat;
1558         q80_xmt_stats_t         *xstat;
1559         q80_rcv_stats_t         *rstat;
1560         uint32_t                cmd;
1561
1562         stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1563
1564         cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1565 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1566
1567 //      cmd |= ((ha->pci_func & 0x3) << 16);
1568         cmd |= (0xFFFF << 16);
1569
1570         if (qla_get_hw_stats(ha, cmd,
1571                         sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1572
1573                 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1574                 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1575                 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1576                 qla_mac_stats(ha, mstat);
1577                 qla_rcv_stats(ha, rstat);
1578                 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1579         } else {
1580                 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1581                         __func__, ha->hw.mbox[0]);
1582         }
1583         return;
1584 }
1585
1586 /*
1587  * Name: qla_tx_tso
1588  * Function: Checks if the packet to be transmitted is a candidate for
1589  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1590  *      Ring Structure are plugged in.
1591  */
1592 static int
1593 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1594 {
1595         struct ether_vlan_header *eh;
1596         struct ip *ip = NULL;
1597         struct ip6_hdr *ip6 = NULL;
1598         struct tcphdr *th = NULL;
1599         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1600         uint16_t etype, opcode, offload = 1;
1601         device_t dev;
1602
1603         dev = ha->pci_dev;
1604
1605
1606         eh = mtod(mp, struct ether_vlan_header *);
1607
1608         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1609                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1610                 etype = ntohs(eh->evl_proto);
1611         } else {
1612                 ehdrlen = ETHER_HDR_LEN;
1613                 etype = ntohs(eh->evl_encap_proto);
1614         }
1615
1616         hdrlen = 0;
1617
1618         switch (etype) {
1619                 case ETHERTYPE_IP:
1620
1621                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1622                                         sizeof(struct tcphdr);
1623
1624                         if (mp->m_len < tcp_opt_off) {
1625                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1626                                 ip = (struct ip *)(hdr + ehdrlen);
1627                         } else {
1628                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1629                         }
1630
1631                         ip_hlen = ip->ip_hl << 2;
1632                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1633
1634                                 
1635                         if ((ip->ip_p != IPPROTO_TCP) ||
1636                                 (ip_hlen != sizeof (struct ip))){
1637                                 /* IP Options are not supported */
1638
1639                                 offload = 0;
1640                         } else
1641                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1642
1643                 break;
1644
1645                 case ETHERTYPE_IPV6:
1646
1647                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1648                                         sizeof (struct tcphdr);
1649
1650                         if (mp->m_len < tcp_opt_off) {
1651                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1652                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1653                         } else {
1654                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1655                         }
1656
1657                         ip_hlen = sizeof(struct ip6_hdr);
1658                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1659
1660                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1661                                 //device_printf(dev, "%s: ipv6\n", __func__);
1662                                 offload = 0;
1663                         } else
1664                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1665                 break;
1666
1667                 default:
1668                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1669                         offload = 0;
1670                 break;
1671         }
1672
1673         if (!offload)
1674                 return (-1);
1675
1676         tcp_hlen = th->th_off << 2;
1677         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1678
1679         if (mp->m_len < hdrlen) {
1680                 if (mp->m_len < tcp_opt_off) {
1681                         if (tcp_hlen > sizeof(struct tcphdr)) {
1682                                 m_copydata(mp, tcp_opt_off,
1683                                         (tcp_hlen - sizeof(struct tcphdr)),
1684                                         &hdr[tcp_opt_off]);
1685                         }
1686                 } else {
1687                         m_copydata(mp, 0, hdrlen, hdr);
1688                 }
1689         }
1690
1691         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1692
1693         tx_cmd->flags_opcode = opcode ;
1694         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1695         tx_cmd->total_hdr_len = hdrlen;
1696
1697         /* Check for Multicast least significant bit of MSB == 1 */
1698         if (eh->evl_dhost[0] & 0x01) {
1699                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1700         }
1701
1702         if (mp->m_len < hdrlen) {
1703                 printf("%d\n", hdrlen);
1704                 return (1);
1705         }
1706
1707         return (0);
1708 }
1709
1710 /*
1711  * Name: qla_tx_chksum
1712  * Function: Checks if the packet to be transmitted is a candidate for
1713  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1714  *      Ring Structure are plugged in.
1715  */
1716 static int
1717 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1718         uint32_t *tcp_hdr_off)
1719 {
1720         struct ether_vlan_header *eh;
1721         struct ip *ip;
1722         struct ip6_hdr *ip6;
1723         uint32_t ehdrlen, ip_hlen;
1724         uint16_t etype, opcode, offload = 1;
1725         device_t dev;
1726         uint8_t buf[sizeof(struct ip6_hdr)];
1727
1728         dev = ha->pci_dev;
1729
1730         *op_code = 0;
1731
1732         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1733                 return (-1);
1734
1735         eh = mtod(mp, struct ether_vlan_header *);
1736
1737         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1738                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1739                 etype = ntohs(eh->evl_proto);
1740         } else {
1741                 ehdrlen = ETHER_HDR_LEN;
1742                 etype = ntohs(eh->evl_encap_proto);
1743         }
1744
1745                 
1746         switch (etype) {
1747                 case ETHERTYPE_IP:
1748                         ip = (struct ip *)(mp->m_data + ehdrlen);
1749
1750                         ip_hlen = sizeof (struct ip);
1751
1752                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1753                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1754                                 ip = (struct ip *)buf;
1755                         }
1756
1757                         if (ip->ip_p == IPPROTO_TCP)
1758                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1759                         else if (ip->ip_p == IPPROTO_UDP)
1760                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1761                         else {
1762                                 //device_printf(dev, "%s: ipv4\n", __func__);
1763                                 offload = 0;
1764                         }
1765                 break;
1766
1767                 case ETHERTYPE_IPV6:
1768                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1769
1770                         ip_hlen = sizeof(struct ip6_hdr);
1771
1772                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1773                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1774                                         buf);
1775                                 ip6 = (struct ip6_hdr *)buf;
1776                         }
1777
1778                         if (ip6->ip6_nxt == IPPROTO_TCP)
1779                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1780                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1781                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1782                         else {
1783                                 //device_printf(dev, "%s: ipv6\n", __func__);
1784                                 offload = 0;
1785                         }
1786                 break;
1787
1788                 default:
1789                         offload = 0;
1790                 break;
1791         }
1792         if (!offload)
1793                 return (-1);
1794
1795         *op_code = opcode;
1796         *tcp_hdr_off = (ip_hlen + ehdrlen);
1797
1798         return (0);
1799 }
1800
1801 #define QLA_TX_MIN_FREE 2
1802 /*
1803  * Name: ql_hw_send
1804  * Function: Transmits a packet. It first checks if the packet is a
1805  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1806  *      offload. If either of these creteria are not met, it is transmitted
1807  *      as a regular ethernet frame.
1808  */
1809 int
1810 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1811         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1812 {
1813         struct ether_vlan_header *eh;
1814         qla_hw_t *hw = &ha->hw;
1815         q80_tx_cmd_t *tx_cmd, tso_cmd;
1816         bus_dma_segment_t *c_seg;
1817         uint32_t num_tx_cmds, hdr_len = 0;
1818         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1819         device_t dev;
1820         int i, ret;
1821         uint8_t *src = NULL, *dst = NULL;
1822         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1823         uint32_t op_code = 0;
1824         uint32_t tcp_hdr_off = 0;
1825
1826         dev = ha->pci_dev;
1827
1828         /*
1829          * Always make sure there is atleast one empty slot in the tx_ring
1830          * tx_ring is considered full when there only one entry available
1831          */
1832         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1833
1834         total_length = mp->m_pkthdr.len;
1835         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1836                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1837                         __func__, total_length);
1838                 return (-1);
1839         }
1840         eh = mtod(mp, struct ether_vlan_header *);
1841
1842         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1843
1844                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1845
1846                 src = frame_hdr;
1847                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1848
1849                 if (!(ret & ~1)) {
1850                         /* find the additional tx_cmd descriptors required */
1851
1852                         if (mp->m_flags & M_VLANTAG)
1853                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1854
1855                         hdr_len = tso_cmd.total_hdr_len;
1856
1857                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1858                         bytes = QL_MIN(bytes, hdr_len);
1859
1860                         num_tx_cmds++;
1861                         hdr_len -= bytes;
1862
1863                         while (hdr_len) {
1864                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1865                                 hdr_len -= bytes;
1866                                 num_tx_cmds++;
1867                         }
1868                         hdr_len = tso_cmd.total_hdr_len;
1869
1870                         if (ret == 0)
1871                                 src = (uint8_t *)eh;
1872                 } else 
1873                         return (EINVAL);
1874         } else {
1875                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1876         }
1877
1878         if (iscsi_pdu)
1879                 ha->hw.iscsi_pkt_count++;
1880
1881         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1882                 qla_hw_tx_done_locked(ha, txr_idx);
1883                 if (hw->tx_cntxt[txr_idx].txr_free <=
1884                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
1885                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1886                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1887                                 __func__));
1888                         return (-1);
1889                 }
1890         }
1891
1892         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1893
1894         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1895
1896                 if (nsegs > ha->hw.max_tx_segs)
1897                         ha->hw.max_tx_segs = nsegs;
1898
1899                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1900
1901                 if (op_code) {
1902                         tx_cmd->flags_opcode = op_code;
1903                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
1904
1905                 } else {
1906                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1907                 }
1908         } else {
1909                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1910                 ha->tx_tso_frames++;
1911         }
1912
1913         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1914                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1915
1916                 if (iscsi_pdu)
1917                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1918
1919         } else if (mp->m_flags & M_VLANTAG) {
1920
1921                 if (hdr_len) { /* TSO */
1922                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1923                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1924                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1925                 } else
1926                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1927
1928                 ha->hw_vlan_tx_frames++;
1929                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1930
1931                 if (iscsi_pdu) {
1932                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1933                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1934                 }
1935         }
1936
1937
1938         tx_cmd->n_bufs = (uint8_t)nsegs;
1939         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1940         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1941         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1942
1943         c_seg = segs;
1944
1945         while (1) {
1946                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1947
1948                         switch (i) {
1949                         case 0:
1950                                 tx_cmd->buf1_addr = c_seg->ds_addr;
1951                                 tx_cmd->buf1_len = c_seg->ds_len;
1952                                 break;
1953
1954                         case 1:
1955                                 tx_cmd->buf2_addr = c_seg->ds_addr;
1956                                 tx_cmd->buf2_len = c_seg->ds_len;
1957                                 break;
1958
1959                         case 2:
1960                                 tx_cmd->buf3_addr = c_seg->ds_addr;
1961                                 tx_cmd->buf3_len = c_seg->ds_len;
1962                                 break;
1963
1964                         case 3:
1965                                 tx_cmd->buf4_addr = c_seg->ds_addr;
1966                                 tx_cmd->buf4_len = c_seg->ds_len;
1967                                 break;
1968                         }
1969
1970                         c_seg++;
1971                         nsegs--;
1972                 }
1973
1974                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1975                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
1976                                 (NUM_TX_DESCRIPTORS - 1);
1977                 tx_cmd_count++;
1978
1979                 if (!nsegs)
1980                         break;
1981                 
1982                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1983                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1984         }
1985
1986         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1987
1988                 /* TSO : Copy the header in the following tx cmd descriptors */
1989
1990                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
1991
1992                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1993                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1994
1995                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1996                 bytes = QL_MIN(bytes, hdr_len);
1997
1998                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1999
2000                 if (mp->m_flags & M_VLANTAG) {
2001                         /* first copy the src/dst MAC addresses */
2002                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2003                         dst += (ETHER_ADDR_LEN * 2);
2004                         src += (ETHER_ADDR_LEN * 2);
2005                         
2006                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2007                         dst += 2;
2008                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2009                         dst += 2;
2010
2011                         /* bytes left in src header */
2012                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2013                                         ETHER_VLAN_ENCAP_LEN);
2014
2015                         /* bytes left in TxCmd Entry */
2016                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2017
2018
2019                         bcopy(src, dst, bytes);
2020                         src += bytes;
2021                         hdr_len -= bytes;
2022                 } else {
2023                         bcopy(src, dst, bytes);
2024                         src += bytes;
2025                         hdr_len -= bytes;
2026                 }
2027
2028                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2029                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2030                                         (NUM_TX_DESCRIPTORS - 1);
2031                 tx_cmd_count++;
2032                 
2033                 while (hdr_len) {
2034                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2035                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2036
2037                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2038
2039                         bcopy(src, tx_cmd, bytes);
2040                         src += bytes;
2041                         hdr_len -= bytes;
2042
2043                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2044                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2045                                         (NUM_TX_DESCRIPTORS - 1);
2046                         tx_cmd_count++;
2047                 }
2048         }
2049
2050         hw->tx_cntxt[txr_idx].txr_free =
2051                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2052
2053         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2054                 txr_idx);
2055         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2056
2057         return (0);
2058 }
2059
2060
2061
2062 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2063 static int
2064 qla_config_rss_ind_table(qla_host_t *ha)
2065 {
2066         uint32_t i, count;
2067         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2068
2069
2070         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2071                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2072         }
2073
2074         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2075                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2076
2077                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2078                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2079                 } else {
2080                         count = Q8_CONFIG_IND_TBL_SIZE;
2081                 }
2082
2083                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2084                         rss_ind_tbl))
2085                         return (-1);
2086         }
2087
2088         return (0);
2089 }
2090
2091 /*
2092  * Name: ql_del_hw_if
2093  * Function: Destroys the hardware specific entities corresponding to an
2094  *      Ethernet Interface
2095  */
2096 void
2097 ql_del_hw_if(qla_host_t *ha)
2098 {
2099         uint32_t i;
2100         uint32_t num_msix;
2101
2102         (void)qla_stop_nic_func(ha);
2103
2104         qla_del_rcv_cntxt(ha);
2105         qla_del_xmt_cntxt(ha);
2106
2107         if (ha->hw.flags.init_intr_cnxt) {
2108                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2109
2110                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2111                                 num_msix = Q8_MAX_INTR_VECTORS;
2112                         else
2113                                 num_msix = ha->hw.num_sds_rings - i;
2114                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2115
2116                         i += num_msix;
2117                 }
2118
2119                 ha->hw.flags.init_intr_cnxt = 0;
2120         }
2121         return;
2122 }
2123
2124 void
2125 qla_confirm_9kb_enable(qla_host_t *ha)
2126 {
2127         uint32_t supports_9kb = 0;
2128
2129         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2130
2131         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2132         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2133         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2134
2135         qla_get_nic_partition(ha, &supports_9kb, NULL);
2136
2137         if (!supports_9kb)
2138                 ha->hw.enable_9kb = 0;
2139
2140         return;
2141 }
2142
2143
2144 /*
2145  * Name: ql_init_hw_if
2146  * Function: Creates the hardware specific entities corresponding to an
2147  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2148  *      corresponding to the interface. Enables LRO if allowed.
2149  */
2150 int
2151 ql_init_hw_if(qla_host_t *ha)
2152 {
2153         device_t        dev;
2154         uint32_t        i;
2155         uint8_t         bcast_mac[6];
2156         qla_rdesc_t     *rdesc;
2157         uint32_t        num_msix;
2158
2159         dev = ha->pci_dev;
2160
2161         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2162                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2163                         ha->hw.dma_buf.sds_ring[i].size);
2164         }
2165
2166         for (i = 0; i < ha->hw.num_sds_rings; ) {
2167
2168                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2169                         num_msix = Q8_MAX_INTR_VECTORS;
2170                 else
2171                         num_msix = ha->hw.num_sds_rings - i;
2172
2173                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2174
2175                         if (i > 0) {
2176
2177                                 num_msix = i;
2178
2179                                 for (i = 0; i < num_msix; ) {
2180                                         qla_config_intr_cntxt(ha, i,
2181                                                 Q8_MAX_INTR_VECTORS, 0);
2182                                         i += Q8_MAX_INTR_VECTORS;
2183                                 }
2184                         }
2185                         return (-1);
2186                 }
2187
2188                 i = i + num_msix;
2189         }
2190
2191         ha->hw.flags.init_intr_cnxt = 1;
2192
2193         /*
2194          * Create Receive Context
2195          */
2196         if (qla_init_rcv_cntxt(ha)) {
2197                 return (-1);
2198         }
2199
2200         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2201                 rdesc = &ha->hw.rds[i];
2202                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2203                 rdesc->rx_in = 0;
2204                 /* Update the RDS Producer Indices */
2205                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2206                         rdesc->rx_next);
2207         }
2208
2209
2210         /*
2211          * Create Transmit Context
2212          */
2213         if (qla_init_xmt_cntxt(ha)) {
2214                 qla_del_rcv_cntxt(ha);
2215                 return (-1);
2216         }
2217         ha->hw.max_tx_segs = 0;
2218
2219         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2220                 return(-1);
2221
2222         ha->hw.flags.unicast_mac = 1;
2223
2224         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226
2227         if (qla_config_mac_addr(ha, bcast_mac, 1))
2228                 return (-1);
2229
2230         ha->hw.flags.bcast_mac = 1;
2231
2232         /*
2233          * program any cached multicast addresses
2234          */
2235         if (qla_hw_add_all_mcast(ha))
2236                 return (-1);
2237
2238         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2239                 return (-1);
2240
2241         if (qla_config_rss_ind_table(ha))
2242                 return (-1);
2243
2244         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2245                 return (-1);
2246
2247         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2248                 return (-1);
2249
2250         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2251                 return (-1);
2252
2253         if (qla_init_nic_func(ha))
2254                 return (-1);
2255
2256         if (qla_query_fw_dcbx_caps(ha))
2257                 return (-1);
2258
2259         for (i = 0; i < ha->hw.num_sds_rings; i++)
2260                 QL_ENABLE_INTERRUPTS(ha, i);
2261
2262         return (0);
2263 }
2264
2265 static int
2266 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2267 {
2268         device_t                dev = ha->pci_dev;
2269         q80_rq_map_sds_to_rds_t *map_rings;
2270         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2271         uint32_t                i, err;
2272         qla_hw_t                *hw = &ha->hw;
2273
2274         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2275         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2276
2277         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2278         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2279         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2280
2281         map_rings->cntxt_id = hw->rcv_cntxt_id;
2282         map_rings->num_rings = num_idx;
2283
2284         for (i = 0; i < num_idx; i++) {
2285                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2286                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2287         }
2288
2289         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2290                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2291                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2292                 device_printf(dev, "%s: failed0\n", __func__);
2293                 return (-1);
2294         }
2295
2296         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2297
2298         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2299
2300         if (err) {
2301                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2302                 return (-1);
2303         }
2304
2305         return (0);
2306 }
2307
2308 /*
2309  * Name: qla_init_rcv_cntxt
2310  * Function: Creates the Receive Context.
2311  */
2312 static int
2313 qla_init_rcv_cntxt(qla_host_t *ha)
2314 {
2315         q80_rq_rcv_cntxt_t      *rcntxt;
2316         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2317         q80_stat_desc_t         *sdesc;
2318         int                     i, j;
2319         qla_hw_t                *hw = &ha->hw;
2320         device_t                dev;
2321         uint32_t                err;
2322         uint32_t                rcntxt_sds_rings;
2323         uint32_t                rcntxt_rds_rings;
2324         uint32_t                max_idx;
2325
2326         dev = ha->pci_dev;
2327
2328         /*
2329          * Create Receive Context
2330          */
2331
2332         for (i = 0; i < hw->num_sds_rings; i++) {
2333                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2334
2335                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2336                         sdesc->data[0] = 1ULL;
2337                         sdesc->data[1] = 1ULL;
2338                 }
2339         }
2340
2341         rcntxt_sds_rings = hw->num_sds_rings;
2342         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2343                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2344
2345         rcntxt_rds_rings = hw->num_rds_rings;
2346
2347         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2348                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2349
2350         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2351         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2352
2353         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2354         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2355         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2356
2357         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2358                         Q8_RCV_CNTXT_CAP0_LRO |
2359                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2360                         Q8_RCV_CNTXT_CAP0_RSS |
2361                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2362
2363         if (ha->hw.enable_9kb)
2364                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2365         else
2366                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2367
2368         if (ha->hw.num_rds_rings > 1) {
2369                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2370                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2371         } else
2372                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2373
2374         rcntxt->nsds_rings = rcntxt_sds_rings;
2375
2376         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2377
2378         rcntxt->rcv_vpid = 0;
2379
2380         for (i = 0; i <  rcntxt_sds_rings; i++) {
2381                 rcntxt->sds[i].paddr =
2382                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2383                 rcntxt->sds[i].size =
2384                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2385                 if (ha->msix_count == 2) {
2386                         rcntxt->sds[i].intr_id =
2387                                 qla_host_to_le16(hw->intr_id[0]);
2388                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2389                 } else {
2390                         rcntxt->sds[i].intr_id =
2391                                 qla_host_to_le16(hw->intr_id[i]);
2392                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2393                 }
2394         }
2395
2396         for (i = 0; i <  rcntxt_rds_rings; i++) {
2397                 rcntxt->rds[i].paddr_std =
2398                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2399
2400                 if (ha->hw.enable_9kb)
2401                         rcntxt->rds[i].std_bsize =
2402                                 qla_host_to_le64(MJUM9BYTES);
2403                 else
2404                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2405
2406                 rcntxt->rds[i].std_nentries =
2407                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2408         }
2409
2410         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2411                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2412                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2413                 device_printf(dev, "%s: failed0\n", __func__);
2414                 return (-1);
2415         }
2416
2417         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2418
2419         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2420
2421         if (err) {
2422                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2423                 return (-1);
2424         }
2425
2426         for (i = 0; i <  rcntxt_sds_rings; i++) {
2427                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2428         }
2429
2430         for (i = 0; i <  rcntxt_rds_rings; i++) {
2431                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2432         }
2433
2434         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2435
2436         ha->hw.flags.init_rx_cnxt = 1;
2437
2438         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2439
2440                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2441
2442                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2443                                 max_idx = MAX_RCNTXT_SDS_RINGS;
2444                         else
2445                                 max_idx = hw->num_sds_rings - i;
2446
2447                         err = qla_add_rcv_rings(ha, i, max_idx);
2448                         if (err)
2449                                 return -1;
2450
2451                         i += max_idx;
2452                 }
2453         }
2454
2455         if (hw->num_rds_rings > 1) {
2456
2457                 for (i = 0; i < hw->num_rds_rings; ) {
2458
2459                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2460                                 max_idx = MAX_SDS_TO_RDS_MAP;
2461                         else
2462                                 max_idx = hw->num_rds_rings - i;
2463
2464                         err = qla_map_sds_to_rds(ha, i, max_idx);
2465                         if (err)
2466                                 return -1;
2467
2468                         i += max_idx;
2469                 }
2470         }
2471
2472         return (0);
2473 }
2474
2475 static int
2476 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2477 {
2478         device_t                dev = ha->pci_dev;
2479         q80_rq_add_rcv_rings_t  *add_rcv;
2480         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2481         uint32_t                i,j, err;
2482         qla_hw_t                *hw = &ha->hw;
2483
2484         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2485         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2486
2487         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2488         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2489         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2490
2491         add_rcv->nrds_sets_rings = nsds | (1 << 5);
2492         add_rcv->nsds_rings = nsds;
2493         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2494
2495         for (i = 0; i <  nsds; i++) {
2496
2497                 j = i + sds_idx;
2498
2499                 add_rcv->sds[i].paddr =
2500                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2501
2502                 add_rcv->sds[i].size =
2503                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2504
2505                 if (ha->msix_count == 2) {
2506                         add_rcv->sds[i].intr_id =
2507                                 qla_host_to_le16(hw->intr_id[0]);
2508                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2509                 } else {
2510                         add_rcv->sds[i].intr_id =
2511                                 qla_host_to_le16(hw->intr_id[j]);
2512                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2513                 }
2514
2515         }
2516         for (i = 0; (i <  nsds); i++) {
2517                 j = i + sds_idx;
2518
2519                 add_rcv->rds[i].paddr_std =
2520                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2521
2522                 if (ha->hw.enable_9kb)
2523                         add_rcv->rds[i].std_bsize =
2524                                 qla_host_to_le64(MJUM9BYTES);
2525                 else
2526                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2527
2528                 add_rcv->rds[i].std_nentries =
2529                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2530         }
2531
2532
2533         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2534                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2535                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2536                 device_printf(dev, "%s: failed0\n", __func__);
2537                 return (-1);
2538         }
2539
2540         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2541
2542         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2543
2544         if (err) {
2545                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2546                 return (-1);
2547         }
2548
2549         for (i = 0; i < nsds; i++) {
2550                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2551         }
2552
2553         for (i = 0; i < nsds; i++) {
2554                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2555         }
2556
2557         return (0);
2558 }
2559
2560 /*
2561  * Name: qla_del_rcv_cntxt
2562  * Function: Destroys the Receive Context.
2563  */
2564 static void
2565 qla_del_rcv_cntxt(qla_host_t *ha)
2566 {
2567         device_t                        dev = ha->pci_dev;
2568         q80_rcv_cntxt_destroy_t         *rcntxt;
2569         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2570         uint32_t                        err;
2571         uint8_t                         bcast_mac[6];
2572
2573         if (!ha->hw.flags.init_rx_cnxt)
2574                 return;
2575
2576         if (qla_hw_del_all_mcast(ha))
2577                 return;
2578
2579         if (ha->hw.flags.bcast_mac) {
2580
2581                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2582                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2583
2584                 if (qla_config_mac_addr(ha, bcast_mac, 0))
2585                         return;
2586                 ha->hw.flags.bcast_mac = 0;
2587
2588         }
2589
2590         if (ha->hw.flags.unicast_mac) {
2591                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2592                         return;
2593                 ha->hw.flags.unicast_mac = 0;
2594         }
2595
2596         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2597         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2598
2599         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2600         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2601         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2602
2603         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2604
2605         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2606                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2607                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2608                 device_printf(dev, "%s: failed0\n", __func__);
2609                 return;
2610         }
2611         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2612
2613         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2614
2615         if (err) {
2616                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2617         }
2618
2619         ha->hw.flags.init_rx_cnxt = 0;
2620         return;
2621 }
2622
2623 /*
2624  * Name: qla_init_xmt_cntxt
2625  * Function: Creates the Transmit Context.
2626  */
2627 static int
2628 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2629 {
2630         device_t                dev;
2631         qla_hw_t                *hw = &ha->hw;
2632         q80_rq_tx_cntxt_t       *tcntxt;
2633         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2634         uint32_t                err;
2635         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2636
2637         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2638
2639         dev = ha->pci_dev;
2640
2641         /*
2642          * Create Transmit Context
2643          */
2644         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2645         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2646
2647         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2648         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2649         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2650
2651 #ifdef QL_ENABLE_ISCSI_TLV
2652
2653         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2654                                 Q8_TX_CNTXT_CAP0_TC;
2655
2656         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2657                 tcntxt->traffic_class = 1;
2658         }
2659
2660 #else
2661
2662         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2663
2664 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2665
2666         tcntxt->ntx_rings = 1;
2667
2668         tcntxt->tx_ring[0].paddr =
2669                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2670         tcntxt->tx_ring[0].tx_consumer =
2671                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2672         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2673
2674         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2675         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2676
2677
2678         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2679         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2680
2681         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2682                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2683                 ha->hw.mbox,
2684                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2685                 device_printf(dev, "%s: failed0\n", __func__);
2686                 return (-1);
2687         }
2688         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2689
2690         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2691
2692         if (err) {
2693                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2694                 return -1;
2695         }
2696
2697         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2698         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2699
2700         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2701                 return (-1);
2702
2703         return (0);
2704 }
2705
2706
2707 /*
2708  * Name: qla_del_xmt_cntxt
2709  * Function: Destroys the Transmit Context.
2710  */
2711 static int
2712 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2713 {
2714         device_t                        dev = ha->pci_dev;
2715         q80_tx_cntxt_destroy_t          *tcntxt;
2716         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2717         uint32_t                        err;
2718
2719         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2720         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2721
2722         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2723         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2724         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2725
2726         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2727
2728         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2729                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2730                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2731                 device_printf(dev, "%s: failed0\n", __func__);
2732                 return (-1);
2733         }
2734         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2735
2736         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2737
2738         if (err) {
2739                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2740                 return (-1);
2741         }
2742
2743         return (0);
2744 }
2745 static void
2746 qla_del_xmt_cntxt(qla_host_t *ha)
2747 {
2748         uint32_t i;
2749
2750         if (!ha->hw.flags.init_tx_cnxt)
2751                 return;
2752
2753         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2754                 if (qla_del_xmt_cntxt_i(ha, i))
2755                         break;
2756         }
2757         ha->hw.flags.init_tx_cnxt = 0;
2758 }
2759
2760 static int
2761 qla_init_xmt_cntxt(qla_host_t *ha)
2762 {
2763         uint32_t i, j;
2764
2765         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2766                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2767                         for (j = 0; j < i; j++)
2768                                 qla_del_xmt_cntxt_i(ha, j);
2769                         return (-1);
2770                 }
2771         }
2772         ha->hw.flags.init_tx_cnxt = 1;
2773         return (0);
2774 }
2775
2776 static int
2777 qla_hw_add_all_mcast(qla_host_t *ha)
2778 {
2779         int i, nmcast;
2780
2781         nmcast = ha->hw.nmcast;
2782
2783         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2784                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2785                         (ha->hw.mcast[i].addr[1] != 0) ||
2786                         (ha->hw.mcast[i].addr[2] != 0) ||
2787                         (ha->hw.mcast[i].addr[3] != 0) ||
2788                         (ha->hw.mcast[i].addr[4] != 0) ||
2789                         (ha->hw.mcast[i].addr[5] != 0)) {
2790
2791                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2792                                 device_printf(ha->pci_dev, "%s: failed\n",
2793                                         __func__);
2794                                 return (-1);
2795                         }
2796
2797                         nmcast--;
2798                 }
2799         }
2800         return 0;
2801 }
2802
2803 static int
2804 qla_hw_del_all_mcast(qla_host_t *ha)
2805 {
2806         int i, nmcast;
2807
2808         nmcast = ha->hw.nmcast;
2809
2810         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2811                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2812                         (ha->hw.mcast[i].addr[1] != 0) ||
2813                         (ha->hw.mcast[i].addr[2] != 0) ||
2814                         (ha->hw.mcast[i].addr[3] != 0) ||
2815                         (ha->hw.mcast[i].addr[4] != 0) ||
2816                         (ha->hw.mcast[i].addr[5] != 0)) {
2817
2818                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2819                                 return (-1);
2820
2821                         nmcast--;
2822                 }
2823         }
2824         return 0;
2825 }
2826
2827 static int
2828 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2829 {
2830         int i;
2831
2832         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2833
2834                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2835                         return 0; /* its been already added */
2836         }
2837
2838         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2839
2840                 if ((ha->hw.mcast[i].addr[0] == 0) && 
2841                         (ha->hw.mcast[i].addr[1] == 0) &&
2842                         (ha->hw.mcast[i].addr[2] == 0) &&
2843                         (ha->hw.mcast[i].addr[3] == 0) &&
2844                         (ha->hw.mcast[i].addr[4] == 0) &&
2845                         (ha->hw.mcast[i].addr[5] == 0)) {
2846
2847                         if (qla_config_mac_addr(ha, mta, 1))
2848                                 return (-1);
2849
2850                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2851                         ha->hw.nmcast++;        
2852
2853                         return 0;
2854                 }
2855         }
2856         return 0;
2857 }
2858
2859 static int
2860 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2861 {
2862         int i;
2863
2864         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2865                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2866
2867                         if (qla_config_mac_addr(ha, mta, 0))
2868                                 return (-1);
2869
2870                         ha->hw.mcast[i].addr[0] = 0;
2871                         ha->hw.mcast[i].addr[1] = 0;
2872                         ha->hw.mcast[i].addr[2] = 0;
2873                         ha->hw.mcast[i].addr[3] = 0;
2874                         ha->hw.mcast[i].addr[4] = 0;
2875                         ha->hw.mcast[i].addr[5] = 0;
2876
2877                         ha->hw.nmcast--;        
2878
2879                         return 0;
2880                 }
2881         }
2882         return 0;
2883 }
2884
2885 /*
2886  * Name: ql_hw_set_multi
2887  * Function: Sets the Multicast Addresses provided the host O.S into the
2888  *      hardware (for the given interface)
2889  */
2890 int
2891 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2892         uint32_t add_mac)
2893 {
2894         int i;
2895         uint8_t *mta = mcast;
2896         int ret = 0;
2897
2898         for (i = 0; i < mcnt; i++) {
2899                 if (add_mac) {
2900                         ret = qla_hw_add_mcast(ha, mta);
2901                         if (ret)
2902                                 break;
2903                 } else {
2904                         ret = qla_hw_del_mcast(ha, mta);
2905                         if (ret)
2906                                 break;
2907                 }
2908                         
2909                 mta += Q8_MAC_ADDR_LEN;
2910         }
2911         return (ret);
2912 }
2913
2914 /*
2915  * Name: qla_hw_tx_done_locked
2916  * Function: Handle Transmit Completions
2917  */
2918 static void
2919 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2920 {
2921         qla_tx_buf_t *txb;
2922         qla_hw_t *hw = &ha->hw;
2923         uint32_t comp_idx, comp_count = 0;
2924         qla_hw_tx_cntxt_t *hw_tx_cntxt;
2925
2926         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2927
2928         /* retrieve index of last entry in tx ring completed */
2929         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2930
2931         while (comp_idx != hw_tx_cntxt->txr_comp) {
2932
2933                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2934
2935                 hw_tx_cntxt->txr_comp++;
2936                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2937                         hw_tx_cntxt->txr_comp = 0;
2938
2939                 comp_count++;
2940
2941                 if (txb->m_head) {
2942                         ha->ifp->if_opackets++;
2943
2944                         bus_dmamap_sync(ha->tx_tag, txb->map,
2945                                 BUS_DMASYNC_POSTWRITE);
2946                         bus_dmamap_unload(ha->tx_tag, txb->map);
2947                         m_freem(txb->m_head);
2948
2949                         txb->m_head = NULL;
2950                 }
2951         }
2952
2953         hw_tx_cntxt->txr_free += comp_count;
2954         return;
2955 }
2956
2957 /*
2958  * Name: ql_hw_tx_done
2959  * Function: Handle Transmit Completions
2960  */
2961 void
2962 ql_hw_tx_done(qla_host_t *ha)
2963 {
2964         int i;
2965         uint32_t flag = 0;
2966
2967         if (!mtx_trylock(&ha->tx_lock)) {
2968                 QL_DPRINT8(ha, (ha->pci_dev,
2969                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2970                 return;
2971         }
2972         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2973                 qla_hw_tx_done_locked(ha, i);
2974                 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2975                         flag = 1;
2976         }
2977
2978         if (!flag)
2979                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2980
2981         QLA_TX_UNLOCK(ha);
2982         return;
2983 }
2984
2985 void
2986 ql_update_link_state(qla_host_t *ha)
2987 {
2988         uint32_t link_state;
2989         uint32_t prev_link_state;
2990
2991         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2992                 ha->hw.link_up = 0;
2993                 return;
2994         }
2995         link_state = READ_REG32(ha, Q8_LINK_STATE);
2996
2997         prev_link_state =  ha->hw.link_up;
2998
2999         if (ha->pci_func == 0) 
3000                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3001         else
3002                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3003
3004         if (prev_link_state !=  ha->hw.link_up) {
3005                 if (ha->hw.link_up) {
3006                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3007                 } else {
3008                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3009                 }
3010         }
3011         return;
3012 }
3013
3014 void
3015 ql_hw_stop_rcv(qla_host_t *ha)
3016 {
3017         int i, done, count = 100;
3018
3019         ha->flags.stop_rcv = 1;
3020
3021         while (count) {
3022                 done = 1;
3023                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3024                         if (ha->hw.sds[i].rcv_active)
3025                                 done = 0;
3026                 }
3027                 if (done)
3028                         break;
3029                 else 
3030                         qla_mdelay(__func__, 10);
3031                 count--;
3032         }
3033         if (!count)
3034                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3035
3036         return;
3037 }
3038
3039 int
3040 ql_hw_check_health(qla_host_t *ha)
3041 {
3042         uint32_t val;
3043
3044         ha->hw.health_count++;
3045
3046         if (ha->hw.health_count < 1000)
3047                 return 0;
3048
3049         ha->hw.health_count = 0;
3050
3051         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3052
3053         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3054                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3055                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3056                         __func__, val);
3057                 return -1;
3058         }
3059
3060         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3061
3062         if ((val != ha->hw.hbeat_value) &&
3063                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3064                 ha->hw.hbeat_value = val;
3065                 return 0;
3066         }
3067         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3068                 __func__, val);
3069
3070         return -1;
3071 }
3072
3073 static int
3074 qla_init_nic_func(qla_host_t *ha)
3075 {
3076         device_t                dev;
3077         q80_init_nic_func_t     *init_nic;
3078         q80_init_nic_func_rsp_t *init_nic_rsp;
3079         uint32_t                err;
3080
3081         dev = ha->pci_dev;
3082
3083         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3084         bzero(init_nic, sizeof(q80_init_nic_func_t));
3085
3086         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3087         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3088         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3089
3090         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3091         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3092         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3093
3094 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3095         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3096                 (sizeof (q80_init_nic_func_t) >> 2),
3097                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3098                 device_printf(dev, "%s: failed\n", __func__);
3099                 return -1;
3100         }
3101
3102         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3103 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3104
3105         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3106
3107         if (err) {
3108                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3109         }
3110
3111         return 0;
3112 }
3113
3114 static int
3115 qla_stop_nic_func(qla_host_t *ha)
3116 {
3117         device_t                dev;
3118         q80_stop_nic_func_t     *stop_nic;
3119         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3120         uint32_t                err;
3121
3122         dev = ha->pci_dev;
3123
3124         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3125         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3126
3127         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3128         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3129         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3130
3131         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3132         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3133
3134 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3135         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3136                 (sizeof (q80_stop_nic_func_t) >> 2),
3137                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3138                 device_printf(dev, "%s: failed\n", __func__);
3139                 return -1;
3140         }
3141
3142         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3143 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3144
3145         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3146
3147         if (err) {
3148                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3149         }
3150
3151         return 0;
3152 }
3153
3154 static int
3155 qla_query_fw_dcbx_caps(qla_host_t *ha)
3156 {
3157         device_t                        dev;
3158         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3159         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3160         uint32_t                        err;
3161
3162         dev = ha->pci_dev;
3163
3164         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3165         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3166
3167         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3168         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3169         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3170
3171         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3172         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3173                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3174                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3175                 device_printf(dev, "%s: failed\n", __func__);
3176                 return -1;
3177         }
3178
3179         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3180         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3181                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3182
3183         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3184
3185         if (err) {
3186                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3187         }
3188
3189         return 0;
3190 }
3191
3192 static int
3193 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3194         uint32_t aen_mb3, uint32_t aen_mb4)
3195 {
3196         device_t                dev;
3197         q80_idc_ack_t           *idc_ack;
3198         q80_idc_ack_rsp_t       *idc_ack_rsp;
3199         uint32_t                err;
3200         int                     count = 300;
3201
3202         dev = ha->pci_dev;
3203
3204         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3205         bzero(idc_ack, sizeof(q80_idc_ack_t));
3206
3207         idc_ack->opcode = Q8_MBX_IDC_ACK;
3208         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3209         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3210
3211         idc_ack->aen_mb1 = aen_mb1;
3212         idc_ack->aen_mb2 = aen_mb2;
3213         idc_ack->aen_mb3 = aen_mb3;
3214         idc_ack->aen_mb4 = aen_mb4;
3215
3216         ha->hw.imd_compl= 0;
3217
3218         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3219                 (sizeof (q80_idc_ack_t) >> 2),
3220                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3221                 device_printf(dev, "%s: failed\n", __func__);
3222                 return -1;
3223         }
3224
3225         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3226
3227         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3228
3229         if (err) {
3230                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3231                 return(-1);
3232         }
3233
3234         while (count && !ha->hw.imd_compl) {
3235                 qla_mdelay(__func__, 100);
3236                 count--;
3237         }
3238
3239         if (!count)
3240                 return -1;
3241         else
3242                 device_printf(dev, "%s: count %d\n", __func__, count);
3243
3244         return (0);
3245 }
3246
3247 static int
3248 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3249 {
3250         device_t                dev;
3251         q80_set_port_cfg_t      *pcfg;
3252         q80_set_port_cfg_rsp_t  *pfg_rsp;
3253         uint32_t                err;
3254         int                     count = 300;
3255
3256         dev = ha->pci_dev;
3257
3258         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3259         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3260
3261         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3262         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3263         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3264
3265         pcfg->cfg_bits = cfg_bits;
3266
3267         device_printf(dev, "%s: cfg_bits"
3268                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3269                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3270                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3271                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3272                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3273
3274         ha->hw.imd_compl= 0;
3275
3276         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3277                 (sizeof (q80_set_port_cfg_t) >> 2),
3278                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3279                 device_printf(dev, "%s: failed\n", __func__);
3280                 return -1;
3281         }
3282
3283         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3284
3285         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3286
3287         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3288                 while (count && !ha->hw.imd_compl) {
3289                         qla_mdelay(__func__, 100);
3290                         count--;
3291                 }
3292                 if (count) {
3293                         device_printf(dev, "%s: count %d\n", __func__, count);
3294
3295                         err = 0;
3296                 }
3297         }
3298
3299         if (err) {
3300                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3301                 return(-1);
3302         }
3303
3304         return (0);
3305 }
3306
3307
3308 static int
3309 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3310 {
3311         uint32_t                        err;
3312         device_t                        dev = ha->pci_dev;
3313         q80_config_md_templ_size_t      *md_size;
3314         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3315
3316 #ifndef QL_LDFLASH_FW
3317
3318         ql_minidump_template_hdr_t *hdr;
3319
3320         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3321         *size = hdr->size_of_template;
3322         return (0);
3323
3324 #endif /* #ifdef QL_LDFLASH_FW */
3325
3326         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3327         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3328
3329         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3330         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3331         md_size->count_version |= Q8_MBX_CMD_VERSION;
3332
3333         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3334                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3335                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3336
3337                 device_printf(dev, "%s: failed\n", __func__);
3338
3339                 return (-1);
3340         }
3341
3342         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3343
3344         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3345
3346         if (err) {
3347                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3348                 return(-1);
3349         }
3350
3351         *size = md_size_rsp->templ_size;
3352
3353         return (0);
3354 }
3355
3356 static int
3357 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3358 {
3359         device_t                dev;
3360         q80_get_port_cfg_t      *pcfg;
3361         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3362         uint32_t                err;
3363
3364         dev = ha->pci_dev;
3365
3366         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3367         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3368
3369         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3370         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3371         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3372
3373         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3374                 (sizeof (q80_get_port_cfg_t) >> 2),
3375                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3376                 device_printf(dev, "%s: failed\n", __func__);
3377                 return -1;
3378         }
3379
3380         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3381
3382         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3383
3384         if (err) {
3385                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3386                 return(-1);
3387         }
3388
3389         device_printf(dev, "%s: [cfg_bits, port type]"
3390                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3391                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3392                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3393                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3394                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3395                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3396                 );
3397
3398         *cfg_bits = pcfg_rsp->cfg_bits;
3399
3400         return (0);
3401 }
3402
3403 int
3404 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3405 {
3406         struct ether_vlan_header        *eh;
3407         uint16_t                        etype;
3408         struct ip                       *ip = NULL;
3409         struct ip6_hdr                  *ip6 = NULL;
3410         struct tcphdr                   *th = NULL;
3411         uint32_t                        hdrlen;
3412         uint32_t                        offset;
3413         uint8_t                         buf[sizeof(struct ip6_hdr)];
3414
3415         eh = mtod(mp, struct ether_vlan_header *);
3416
3417         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3418                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3419                 etype = ntohs(eh->evl_proto);
3420         } else {
3421                 hdrlen = ETHER_HDR_LEN;
3422                 etype = ntohs(eh->evl_encap_proto);
3423         }
3424
3425         if (etype == ETHERTYPE_IP) {
3426
3427                 offset = (hdrlen + sizeof (struct ip));
3428
3429                 if (mp->m_len >= offset) {
3430                         ip = (struct ip *)(mp->m_data + hdrlen);
3431                 } else {
3432                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3433                         ip = (struct ip *)buf;
3434                 }
3435
3436                 if (ip->ip_p == IPPROTO_TCP) {
3437
3438                         hdrlen += ip->ip_hl << 2;
3439                         offset = hdrlen + 4;
3440         
3441                         if (mp->m_len >= offset) {
3442                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3443                         } else {
3444                                 m_copydata(mp, hdrlen, 4, buf);
3445                                 th = (struct tcphdr *)buf;
3446                         }
3447                 }
3448
3449         } else if (etype == ETHERTYPE_IPV6) {
3450
3451                 offset = (hdrlen + sizeof (struct ip6_hdr));
3452
3453                 if (mp->m_len >= offset) {
3454                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3455                 } else {
3456                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3457                         ip6 = (struct ip6_hdr *)buf;
3458                 }
3459
3460                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3461
3462                         hdrlen += sizeof(struct ip6_hdr);
3463                         offset = hdrlen + 4;
3464
3465                         if (mp->m_len >= offset) {
3466                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3467                         } else {
3468                                 m_copydata(mp, hdrlen, 4, buf);
3469                                 th = (struct tcphdr *)buf;
3470                         }
3471                 }
3472         }
3473
3474         if (th != NULL) {
3475                 if ((th->th_sport == htons(3260)) ||
3476                         (th->th_dport == htons(3260)))
3477                         return 0;
3478         }
3479         return (-1);
3480 }
3481
3482 void
3483 qla_hw_async_event(qla_host_t *ha)
3484 {
3485         switch (ha->hw.aen_mb0) {
3486         case 0x8101:
3487                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3488                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3489
3490                 break;
3491
3492         default:
3493                 break;
3494         }
3495
3496         return;
3497 }
3498
3499 #ifdef QL_LDFLASH_FW
3500 static int
3501 ql_get_minidump_template(qla_host_t *ha)
3502 {
3503         uint32_t                        err;
3504         device_t                        dev = ha->pci_dev;
3505         q80_config_md_templ_cmd_t       *md_templ;
3506         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
3507
3508         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3509         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3510
3511         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3512         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3513         md_templ->count_version |= Q8_MBX_CMD_VERSION;
3514
3515         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3516         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3517
3518         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3519                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3520                  ha->hw.mbox,
3521                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3522
3523                 device_printf(dev, "%s: failed\n", __func__);
3524
3525                 return (-1);
3526         }
3527
3528         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3529
3530         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3531
3532         if (err) {
3533                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3534                 return (-1);
3535         }
3536
3537         return (0);
3538
3539 }
3540 #endif /* #ifdef QL_LDFLASH_FW */
3541
3542 /*
3543  * Minidump related functionality 
3544  */
3545
3546 static int ql_parse_template(qla_host_t *ha);
3547
3548 static uint32_t ql_rdcrb(qla_host_t *ha,
3549                         ql_minidump_entry_rdcrb_t *crb_entry,
3550                         uint32_t * data_buff);
3551
3552 static uint32_t ql_pollrd(qla_host_t *ha,
3553                         ql_minidump_entry_pollrd_t *entry,
3554                         uint32_t * data_buff);
3555
3556 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3557                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3558                         uint32_t *data_buff);
3559
3560 static uint32_t ql_L2Cache(qla_host_t *ha,
3561                         ql_minidump_entry_cache_t *cacheEntry,
3562                         uint32_t * data_buff);
3563
3564 static uint32_t ql_L1Cache(qla_host_t *ha,
3565                         ql_minidump_entry_cache_t *cacheEntry,
3566                         uint32_t *data_buff);
3567
3568 static uint32_t ql_rdocm(qla_host_t *ha,
3569                         ql_minidump_entry_rdocm_t *ocmEntry,
3570                         uint32_t *data_buff);
3571
3572 static uint32_t ql_rdmem(qla_host_t *ha,
3573                         ql_minidump_entry_rdmem_t *mem_entry,
3574                         uint32_t *data_buff);
3575
3576 static uint32_t ql_rdrom(qla_host_t *ha,
3577                         ql_minidump_entry_rdrom_t *romEntry,
3578                         uint32_t *data_buff);
3579
3580 static uint32_t ql_rdmux(qla_host_t *ha,
3581                         ql_minidump_entry_mux_t *muxEntry,
3582                         uint32_t *data_buff);
3583
3584 static uint32_t ql_rdmux2(qla_host_t *ha,
3585                         ql_minidump_entry_mux2_t *muxEntry,
3586                         uint32_t *data_buff);
3587
3588 static uint32_t ql_rdqueue(qla_host_t *ha,
3589                         ql_minidump_entry_queue_t *queueEntry,
3590                         uint32_t *data_buff);
3591
3592 static uint32_t ql_cntrl(qla_host_t *ha,
3593                         ql_minidump_template_hdr_t *template_hdr,
3594                         ql_minidump_entry_cntrl_t *crbEntry);
3595
3596
3597 static uint32_t
3598 ql_minidump_size(qla_host_t *ha)
3599 {
3600         uint32_t i, k;
3601         uint32_t size = 0;
3602         ql_minidump_template_hdr_t *hdr;
3603
3604         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3605
3606         i = 0x2;
3607
3608         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3609                 if (i & ha->hw.mdump_capture_mask)
3610                         size += hdr->capture_size_array[k];
3611                 i = i << 1;
3612         }
3613         return (size);
3614 }
3615
3616 static void
3617 ql_free_minidump_buffer(qla_host_t *ha)
3618 {
3619         if (ha->hw.mdump_buffer != NULL) {
3620                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3621                 ha->hw.mdump_buffer = NULL;
3622                 ha->hw.mdump_buffer_size = 0;
3623         }
3624         return;
3625 }
3626
3627 static int
3628 ql_alloc_minidump_buffer(qla_host_t *ha)
3629 {
3630         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3631
3632         if (!ha->hw.mdump_buffer_size)
3633                 return (-1);
3634
3635         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3636                                         M_NOWAIT);
3637
3638         if (ha->hw.mdump_buffer == NULL)
3639                 return (-1);
3640
3641         return (0);
3642 }
3643
3644 static void
3645 ql_free_minidump_template_buffer(qla_host_t *ha)
3646 {
3647         if (ha->hw.mdump_template != NULL) {
3648                 free(ha->hw.mdump_template, M_QLA83XXBUF);
3649                 ha->hw.mdump_template = NULL;
3650                 ha->hw.mdump_template_size = 0;
3651         }
3652         return;
3653 }
3654
3655 static int
3656 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3657 {
3658         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3659
3660         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3661                                         M_QLA83XXBUF, M_NOWAIT);
3662
3663         if (ha->hw.mdump_template == NULL)
3664                 return (-1);
3665
3666         return (0);
3667 }
3668
3669 static int
3670 ql_alloc_minidump_buffers(qla_host_t *ha)
3671 {
3672         int ret;
3673
3674         ret = ql_alloc_minidump_template_buffer(ha);
3675
3676         if (ret)
3677                 return (ret);
3678
3679         ret = ql_alloc_minidump_buffer(ha);
3680
3681         if (ret)
3682                 ql_free_minidump_template_buffer(ha);
3683
3684         return (ret);
3685 }
3686
3687
3688 static uint32_t
3689 ql_validate_minidump_checksum(qla_host_t *ha)
3690 {
3691         uint64_t sum = 0;
3692         int count;
3693         uint32_t *template_buff;
3694
3695         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3696         template_buff = ha->hw.dma_buf.minidump.dma_b;
3697
3698         while (count-- > 0) {
3699                 sum += *template_buff++;
3700         }
3701
3702         while (sum >> 32) {
3703                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3704         }
3705
3706         return (~sum);
3707 }
3708
3709 int
3710 ql_minidump_init(qla_host_t *ha)
3711 {
3712         int             ret = 0;
3713         uint32_t        template_size = 0;
3714         device_t        dev = ha->pci_dev;
3715
3716         /*
3717          * Get Minidump Template Size
3718          */
3719         ret = qla_get_minidump_tmplt_size(ha, &template_size);
3720
3721         if (ret || (template_size == 0)) {
3722                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3723                         template_size);
3724                 return (-1);
3725         }
3726
3727         /*
3728          * Allocate Memory for Minidump Template
3729          */
3730
3731         ha->hw.dma_buf.minidump.alignment = 8;
3732         ha->hw.dma_buf.minidump.size = template_size;
3733
3734 #ifdef QL_LDFLASH_FW
3735         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3736
3737                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3738
3739                 return (-1);
3740         }
3741         ha->hw.dma_buf.flags.minidump = 1;
3742
3743         /*
3744          * Retrieve Minidump Template
3745          */
3746         ret = ql_get_minidump_template(ha);
3747 #else
3748         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3749
3750 #endif /* #ifdef QL_LDFLASH_FW */
3751
3752         if (ret == 0) {
3753
3754                 ret = ql_validate_minidump_checksum(ha);
3755
3756                 if (ret == 0) {
3757
3758                         ret = ql_alloc_minidump_buffers(ha);
3759
3760                         if (ret == 0)
3761                 ha->hw.mdump_init = 1;
3762                         else
3763                                 device_printf(dev,
3764                                         "%s: ql_alloc_minidump_buffers"
3765                                         " failed\n", __func__);
3766                 } else {
3767                         device_printf(dev, "%s: ql_validate_minidump_checksum"
3768                                 " failed\n", __func__);
3769                 }
3770         } else {
3771                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
3772                          __func__);
3773         }
3774
3775         if (ret)
3776                 ql_minidump_free(ha);
3777
3778         return (ret);
3779 }
3780
3781 static void
3782 ql_minidump_free(qla_host_t *ha)
3783 {
3784         ha->hw.mdump_init = 0;
3785         if (ha->hw.dma_buf.flags.minidump) {
3786                 ha->hw.dma_buf.flags.minidump = 0;
3787                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3788         }
3789
3790         ql_free_minidump_template_buffer(ha);
3791         ql_free_minidump_buffer(ha);
3792
3793         return;
3794 }
3795
3796 void
3797 ql_minidump(qla_host_t *ha)
3798 {
3799         if (!ha->hw.mdump_init)
3800                 return;
3801
3802         if (ha->hw.mdump_done)
3803                 return;
3804
3805                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3806
3807         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
3808         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
3809
3810         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
3811                 ha->hw.mdump_template_size);
3812
3813         ql_parse_template(ha);
3814  
3815         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3816
3817         ha->hw.mdump_done = 1;
3818
3819         return;
3820 }
3821
3822
3823 /*
3824  * helper routines
3825  */
3826 static void 
3827 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
3828 {
3829         if (esize != entry->hdr.entry_capture_size) {
3830                 entry->hdr.entry_capture_size = esize;
3831                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
3832         }
3833         return;
3834 }
3835
3836
3837 static int 
3838 ql_parse_template(qla_host_t *ha)
3839 {
3840         uint32_t num_of_entries, buff_level, e_cnt, esize;
3841         uint32_t end_cnt, rv = 0;
3842         char *dump_buff, *dbuff;
3843         int sane_start = 0, sane_end = 0;
3844         ql_minidump_template_hdr_t *template_hdr;
3845         ql_minidump_entry_t *entry;
3846         uint32_t capture_mask; 
3847         uint32_t dump_size; 
3848
3849         /* Setup parameters */
3850         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
3851
3852         if (template_hdr->entry_type == TLHDR)
3853                 sane_start = 1;
3854         
3855         dump_buff = (char *) ha->hw.mdump_buffer;
3856
3857         num_of_entries = template_hdr->num_of_entries;
3858
3859         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
3860                         + template_hdr->first_entry_offset );
3861
3862         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
3863                 template_hdr->ocm_window_array[ha->pci_func];
3864         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
3865
3866         capture_mask = ha->hw.mdump_capture_mask;
3867         dump_size = ha->hw.mdump_buffer_size;
3868
3869         template_hdr->driver_capture_mask = capture_mask;
3870
3871         QL_DPRINT80(ha, (ha->pci_dev,
3872                 "%s: sane_start = %d num_of_entries = %d "
3873                 "capture_mask = 0x%x dump_size = %d \n", 
3874                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
3875
3876         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
3877
3878                 /*
3879                  * If the capture_mask of the entry does not match capture mask
3880                  * skip the entry after marking the driver_flags indicator.
3881                  */
3882                 
3883                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
3884
3885                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3886                         entry = (ql_minidump_entry_t *) ((char *) entry
3887                                         + entry->hdr.entry_size);
3888                         continue;
3889                 }
3890
3891                 /*
3892                  * This is ONLY needed in implementations where
3893                  * the capture buffer allocated is too small to capture
3894                  * all of the required entries for a given capture mask.
3895                  * We need to empty the buffer contents to a file
3896                  * if possible, before processing the next entry
3897                  * If the buff_full_flag is set, no further capture will happen
3898                  * and all remaining non-control entries will be skipped.
3899                  */
3900                 if (entry->hdr.entry_capture_size != 0) {
3901                         if ((buff_level + entry->hdr.entry_capture_size) >
3902                                 dump_size) {
3903                                 /*  Try to recover by emptying buffer to file */
3904                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3905                                 entry = (ql_minidump_entry_t *) ((char *) entry
3906                                                 + entry->hdr.entry_size);
3907                                 continue;
3908                         }
3909                 }
3910
3911                 /*
3912                  * Decode the entry type and process it accordingly
3913                  */
3914
3915                 switch (entry->hdr.entry_type) {
3916                 case RDNOP:
3917                         break;
3918
3919                 case RDEND:
3920                         if (sane_end == 0) {
3921                                 end_cnt = e_cnt;
3922                         }
3923                         sane_end++;
3924                         break;
3925
3926                 case RDCRB:
3927                         dbuff = dump_buff + buff_level;
3928                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
3929                         ql_entry_err_chk(entry, esize);
3930                         buff_level += esize;
3931                         break;
3932
3933                 case POLLRD:
3934                         dbuff = dump_buff + buff_level;
3935                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
3936                         ql_entry_err_chk(entry, esize);
3937                         buff_level += esize;
3938                         break;
3939
3940                 case POLLRDMWR:
3941                         dbuff = dump_buff + buff_level;
3942                         esize = ql_pollrd_modify_write(ha, (void *)entry,
3943                                         (void *)dbuff);
3944                         ql_entry_err_chk(entry, esize);
3945                         buff_level += esize;
3946                         break;
3947
3948                 case L2ITG:
3949                 case L2DTG:
3950                 case L2DAT:
3951                 case L2INS:
3952                         dbuff = dump_buff + buff_level;
3953                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
3954                         if (esize == -1) {
3955                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3956                         } else {
3957                                 ql_entry_err_chk(entry, esize);
3958                                 buff_level += esize;
3959                         }
3960                         break;
3961
3962                 case L1DAT:
3963                 case L1INS:
3964                         dbuff = dump_buff + buff_level;
3965                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
3966                         ql_entry_err_chk(entry, esize);
3967                         buff_level += esize;
3968                         break;
3969
3970                 case RDOCM:
3971                         dbuff = dump_buff + buff_level;
3972                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
3973                         ql_entry_err_chk(entry, esize);
3974                         buff_level += esize;
3975                         break;
3976
3977                 case RDMEM:
3978                         dbuff = dump_buff + buff_level;
3979                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
3980                         ql_entry_err_chk(entry, esize);
3981                         buff_level += esize;
3982                         break;
3983
3984                 case BOARD:
3985                 case RDROM:
3986                         dbuff = dump_buff + buff_level;
3987                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
3988                         ql_entry_err_chk(entry, esize);
3989                         buff_level += esize;
3990                         break;
3991
3992                 case RDMUX:
3993                         dbuff = dump_buff + buff_level;
3994                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
3995                         ql_entry_err_chk(entry, esize);
3996                         buff_level += esize;
3997                         break;
3998
3999                 case RDMUX2:
4000                         dbuff = dump_buff + buff_level;
4001                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4002                         ql_entry_err_chk(entry, esize);
4003                         buff_level += esize;
4004                         break;
4005
4006                 case QUEUE:
4007                         dbuff = dump_buff + buff_level;
4008                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4009                         ql_entry_err_chk(entry, esize);
4010                         buff_level += esize;
4011                         break;
4012
4013                 case CNTRL:
4014                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4015                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4016                         }
4017                         break;
4018                 default:
4019                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4020                         break;
4021                 }
4022                 /*  next entry in the template */
4023                 entry = (ql_minidump_entry_t *) ((char *) entry
4024                                                 + entry->hdr.entry_size);
4025         }
4026
4027         if (!sane_start || (sane_end > 1)) {
4028                 device_printf(ha->pci_dev,
4029                         "\n%s: Template configuration error. Check Template\n",
4030                         __func__);
4031         }
4032         
4033         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4034                 __func__, template_hdr->num_of_entries));
4035
4036         return 0;
4037 }
4038
4039 /*
4040  * Read CRB operation.
4041  */
4042 static uint32_t
4043 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4044         uint32_t * data_buff)
4045 {
4046         int loop_cnt;
4047         int ret;
4048         uint32_t op_count, addr, stride, value = 0;
4049
4050         addr = crb_entry->addr;
4051         op_count = crb_entry->op_count;
4052         stride = crb_entry->addr_stride;
4053
4054         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4055
4056                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4057
4058                 if (ret)
4059                         return (0);
4060
4061                 *data_buff++ = addr;
4062                 *data_buff++ = value;
4063                 addr = addr + stride;
4064         }
4065
4066         /*
4067          * for testing purpose we return amount of data written
4068          */
4069         return (op_count * (2 * sizeof(uint32_t)));
4070 }
4071
4072 /*
4073  * Handle L2 Cache.
4074  */
4075
4076 static uint32_t 
4077 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4078         uint32_t * data_buff)
4079 {
4080         int i, k;
4081         int loop_cnt;
4082         int ret;
4083
4084         uint32_t read_value;
4085         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4086         uint32_t tag_value, read_cnt;
4087         volatile uint8_t cntl_value_r;
4088         long timeout;
4089         uint32_t data;
4090
4091         loop_cnt = cacheEntry->op_count;
4092
4093         read_addr = cacheEntry->read_addr;
4094         cntrl_addr = cacheEntry->control_addr;
4095         cntl_value_w = (uint32_t) cacheEntry->write_value;
4096
4097         tag_reg_addr = cacheEntry->tag_reg_addr;
4098
4099         tag_value = cacheEntry->init_tag_value;
4100         read_cnt = cacheEntry->read_addr_cnt;
4101
4102         for (i = 0; i < loop_cnt; i++) {
4103
4104                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4105                 if (ret)
4106                         return (0);
4107
4108                 if (cacheEntry->write_value != 0) { 
4109
4110                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4111                                         &cntl_value_w, 0);
4112                         if (ret)
4113                                 return (0);
4114                 }
4115
4116                 if (cacheEntry->poll_mask != 0) { 
4117
4118                         timeout = cacheEntry->poll_wait;
4119
4120                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4121                         if (ret)
4122                                 return (0);
4123
4124                         cntl_value_r = (uint8_t)data;
4125
4126                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4127
4128                                 if (timeout) {
4129                                         qla_mdelay(__func__, 1);
4130                                         timeout--;
4131                                 } else
4132                                         break;
4133
4134                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4135                                                 &data, 1);
4136                                 if (ret)
4137                                         return (0);
4138
4139                                 cntl_value_r = (uint8_t)data;
4140                         }
4141                         if (!timeout) {
4142                                 /* Report timeout error. 
4143                                  * core dump capture failed
4144                                  * Skip remaining entries.
4145                                  * Write buffer out to file
4146                                  * Use driver specific fields in template header
4147                                  * to report this error.
4148                                  */
4149                                 return (-1);
4150                         }
4151                 }
4152
4153                 addr = read_addr;
4154                 for (k = 0; k < read_cnt; k++) {
4155
4156                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4157                         if (ret)
4158                                 return (0);
4159
4160                         *data_buff++ = read_value;
4161                         addr += cacheEntry->read_addr_stride;
4162                 }
4163
4164                 tag_value += cacheEntry->tag_value_stride;
4165         }
4166
4167         return (read_cnt * loop_cnt * sizeof(uint32_t));
4168 }
4169
4170 /*
4171  * Handle L1 Cache.
4172  */
4173
4174 static uint32_t 
4175 ql_L1Cache(qla_host_t *ha,
4176         ql_minidump_entry_cache_t *cacheEntry,
4177         uint32_t *data_buff)
4178 {
4179         int ret;
4180         int i, k;
4181         int loop_cnt;
4182
4183         uint32_t read_value;
4184         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4185         uint32_t tag_value, read_cnt;
4186         uint32_t cntl_value_w;
4187
4188         loop_cnt = cacheEntry->op_count;
4189
4190         read_addr = cacheEntry->read_addr;
4191         cntrl_addr = cacheEntry->control_addr;
4192         cntl_value_w = (uint32_t) cacheEntry->write_value;
4193
4194         tag_reg_addr = cacheEntry->tag_reg_addr;
4195
4196         tag_value = cacheEntry->init_tag_value;
4197         read_cnt = cacheEntry->read_addr_cnt;
4198
4199         for (i = 0; i < loop_cnt; i++) {
4200
4201                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4202                 if (ret)
4203                         return (0);
4204
4205                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4206                 if (ret)
4207                         return (0);
4208
4209                 addr = read_addr;
4210                 for (k = 0; k < read_cnt; k++) {
4211
4212                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4213                         if (ret)
4214                                 return (0);
4215
4216                         *data_buff++ = read_value;
4217                         addr += cacheEntry->read_addr_stride;
4218                 }
4219
4220                 tag_value += cacheEntry->tag_value_stride;
4221         }
4222
4223         return (read_cnt * loop_cnt * sizeof(uint32_t));
4224 }
4225
4226 /*
4227  * Reading OCM memory
4228  */
4229
4230 static uint32_t 
4231 ql_rdocm(qla_host_t *ha,
4232         ql_minidump_entry_rdocm_t *ocmEntry,
4233         uint32_t *data_buff)
4234 {
4235         int i, loop_cnt;
4236         volatile uint32_t addr;
4237         volatile uint32_t value;
4238
4239         addr = ocmEntry->read_addr;
4240         loop_cnt = ocmEntry->op_count;
4241
4242         for (i = 0; i < loop_cnt; i++) {
4243                 value = READ_REG32(ha, addr);
4244                 *data_buff++ = value;
4245                 addr += ocmEntry->read_addr_stride;
4246         }
4247         return (loop_cnt * sizeof(value));
4248 }
4249
4250 /*
4251  * Read memory
4252  */
4253
4254 static uint32_t 
4255 ql_rdmem(qla_host_t *ha,
4256         ql_minidump_entry_rdmem_t *mem_entry,
4257         uint32_t *data_buff)
4258 {
4259         int ret;
4260         int i, loop_cnt;
4261         volatile uint32_t addr;
4262         q80_offchip_mem_val_t val;
4263
4264         addr = mem_entry->read_addr;
4265
4266         /* size in bytes / 16 */
4267         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4268
4269         for (i = 0; i < loop_cnt; i++) {
4270
4271                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4272                 if (ret)
4273                         return (0);
4274
4275                 *data_buff++ = val.data_lo;
4276                 *data_buff++ = val.data_hi;
4277                 *data_buff++ = val.data_ulo;
4278                 *data_buff++ = val.data_uhi;
4279
4280                 addr += (sizeof(uint32_t) * 4);
4281         }
4282
4283         return (loop_cnt * (sizeof(uint32_t) * 4));
4284 }
4285
4286 /*
4287  * Read Rom
4288  */
4289
4290 static uint32_t 
4291 ql_rdrom(qla_host_t *ha,
4292         ql_minidump_entry_rdrom_t *romEntry,
4293         uint32_t *data_buff)
4294 {
4295         int ret;
4296         int i, loop_cnt;
4297         uint32_t addr;
4298         uint32_t value;
4299
4300         addr = romEntry->read_addr;
4301         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4302         loop_cnt /= sizeof(value);
4303
4304         for (i = 0; i < loop_cnt; i++) {
4305
4306                 ret = ql_rd_flash32(ha, addr, &value);
4307                 if (ret)
4308                         return (0);
4309
4310                 *data_buff++ = value;
4311                 addr += sizeof(value);
4312         }
4313
4314         return (loop_cnt * sizeof(value));
4315 }
4316
4317 /*
4318  * Read MUX data
4319  */
4320
4321 static uint32_t 
4322 ql_rdmux(qla_host_t *ha,
4323         ql_minidump_entry_mux_t *muxEntry,
4324         uint32_t *data_buff)
4325 {
4326         int ret;
4327         int loop_cnt;
4328         uint32_t read_value, sel_value;
4329         uint32_t read_addr, select_addr;
4330
4331         select_addr = muxEntry->select_addr;
4332         sel_value = muxEntry->select_value;
4333         read_addr = muxEntry->read_addr;
4334
4335         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4336
4337                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4338                 if (ret)
4339                         return (0);
4340
4341                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4342                 if (ret)
4343                         return (0);
4344
4345                 *data_buff++ = sel_value;
4346                 *data_buff++ = read_value;
4347
4348                 sel_value += muxEntry->select_value_stride;
4349         }
4350
4351         return (loop_cnt * (2 * sizeof(uint32_t)));
4352 }
4353
4354 static uint32_t
4355 ql_rdmux2(qla_host_t *ha,
4356         ql_minidump_entry_mux2_t *muxEntry,
4357         uint32_t *data_buff)
4358 {
4359         int ret;
4360         int loop_cnt;
4361
4362         uint32_t select_addr_1, select_addr_2;
4363         uint32_t select_value_1, select_value_2;
4364         uint32_t select_value_count, select_value_mask;
4365         uint32_t read_addr, read_value;
4366
4367         select_addr_1 = muxEntry->select_addr_1;
4368         select_addr_2 = muxEntry->select_addr_2;
4369         select_value_1 = muxEntry->select_value_1;
4370         select_value_2 = muxEntry->select_value_2;
4371         select_value_count = muxEntry->select_value_count;
4372         select_value_mask  = muxEntry->select_value_mask;
4373
4374         read_addr = muxEntry->read_addr;
4375
4376         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4377                 loop_cnt++) {
4378
4379                 uint32_t temp_sel_val;
4380
4381                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4382                 if (ret)
4383                         return (0);
4384
4385                 temp_sel_val = select_value_1 & select_value_mask;
4386
4387                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4388                 if (ret)
4389                         return (0);
4390
4391                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4392                 if (ret)
4393                         return (0);
4394
4395                 *data_buff++ = temp_sel_val;
4396                 *data_buff++ = read_value;
4397
4398                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4399                 if (ret)
4400                         return (0);
4401
4402                 temp_sel_val = select_value_2 & select_value_mask;
4403
4404                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4405                 if (ret)
4406                         return (0);
4407
4408                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4409                 if (ret)
4410                         return (0);
4411
4412                 *data_buff++ = temp_sel_val;
4413                 *data_buff++ = read_value;
4414
4415                 select_value_1 += muxEntry->select_value_stride;
4416                 select_value_2 += muxEntry->select_value_stride;
4417         }
4418
4419         return (loop_cnt * (4 * sizeof(uint32_t)));
4420 }
4421
4422 /*
4423  * Handling Queue State Reads.
4424  */
4425
4426 static uint32_t 
4427 ql_rdqueue(qla_host_t *ha,
4428         ql_minidump_entry_queue_t *queueEntry,
4429         uint32_t *data_buff)
4430 {
4431         int ret;
4432         int loop_cnt, k;
4433         uint32_t read_value;
4434         uint32_t read_addr, read_stride, select_addr;
4435         uint32_t queue_id, read_cnt;
4436
4437         read_cnt = queueEntry->read_addr_cnt;
4438         read_stride = queueEntry->read_addr_stride;
4439         select_addr = queueEntry->select_addr;
4440
4441         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4442                 loop_cnt++) {
4443
4444                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4445                 if (ret)
4446                         return (0);
4447
4448                 read_addr = queueEntry->read_addr;
4449
4450                 for (k = 0; k < read_cnt; k++) {
4451
4452                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4453                         if (ret)
4454                                 return (0);
4455
4456                         *data_buff++ = read_value;
4457                         read_addr += read_stride;
4458                 }
4459
4460                 queue_id += queueEntry->queue_id_stride;
4461         }
4462
4463         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4464 }
4465
4466 /*
4467  * Handling control entries.
4468  */
4469
4470 static uint32_t 
4471 ql_cntrl(qla_host_t *ha,
4472         ql_minidump_template_hdr_t *template_hdr,
4473         ql_minidump_entry_cntrl_t *crbEntry)
4474 {
4475         int ret;
4476         int count;
4477         uint32_t opcode, read_value, addr, entry_addr;
4478         long timeout;
4479
4480         entry_addr = crbEntry->addr;
4481
4482         for (count = 0; count < crbEntry->op_count; count++) {
4483                 opcode = crbEntry->opcode;
4484
4485                 if (opcode & QL_DBG_OPCODE_WR) {
4486
4487                         ret = ql_rdwr_indreg32(ha, entry_addr,
4488                                         &crbEntry->value_1, 0);
4489                         if (ret)
4490                                 return (0);
4491
4492                         opcode &= ~QL_DBG_OPCODE_WR;
4493                 }
4494
4495                 if (opcode & QL_DBG_OPCODE_RW) {
4496
4497                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4498                         if (ret)
4499                                 return (0);
4500
4501                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4502                         if (ret)
4503                                 return (0);
4504
4505                         opcode &= ~QL_DBG_OPCODE_RW;
4506                 }
4507
4508                 if (opcode & QL_DBG_OPCODE_AND) {
4509
4510                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4511                         if (ret)
4512                                 return (0);
4513
4514                         read_value &= crbEntry->value_2;
4515                         opcode &= ~QL_DBG_OPCODE_AND;
4516
4517                         if (opcode & QL_DBG_OPCODE_OR) {
4518                                 read_value |= crbEntry->value_3;
4519                                 opcode &= ~QL_DBG_OPCODE_OR;
4520                         }
4521
4522                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4523                         if (ret)
4524                                 return (0);
4525                 }
4526
4527                 if (opcode & QL_DBG_OPCODE_OR) {
4528
4529                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4530                         if (ret)
4531                                 return (0);
4532
4533                         read_value |= crbEntry->value_3;
4534
4535                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4536                         if (ret)
4537                                 return (0);
4538
4539                         opcode &= ~QL_DBG_OPCODE_OR;
4540                 }
4541
4542                 if (opcode & QL_DBG_OPCODE_POLL) {
4543
4544                         opcode &= ~QL_DBG_OPCODE_POLL;
4545                         timeout = crbEntry->poll_timeout;
4546                         addr = entry_addr;
4547
4548                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4549                         if (ret)
4550                                 return (0);
4551
4552                         while ((read_value & crbEntry->value_2)
4553                                 != crbEntry->value_1) {
4554
4555                                 if (timeout) {
4556                                         qla_mdelay(__func__, 1);
4557                                         timeout--;
4558                                 } else
4559                                         break;
4560
4561                                 ret = ql_rdwr_indreg32(ha, addr,
4562                                                 &read_value, 1);
4563                                 if (ret)
4564                                         return (0);
4565                         }
4566
4567                         if (!timeout) {
4568                                 /*
4569                                  * Report timeout error.
4570                                  * core dump capture failed
4571                                  * Skip remaining entries.
4572                                  * Write buffer out to file
4573                                  * Use driver specific fields in template header
4574                                  * to report this error.
4575                                  */
4576                                 return (-1);
4577                         }
4578                 }
4579
4580                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4581                         /*
4582                          * decide which address to use.
4583                          */
4584                         if (crbEntry->state_index_a) {
4585                                 addr = template_hdr->saved_state_array[
4586                                                 crbEntry-> state_index_a];
4587                         } else {
4588                                 addr = entry_addr;
4589                         }
4590
4591                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4592                         if (ret)
4593                                 return (0);
4594
4595                         template_hdr->saved_state_array[crbEntry->state_index_v]
4596                                         = read_value;
4597                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
4598                 }
4599
4600                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4601                         /*
4602                          * decide which value to use.
4603                          */
4604                         if (crbEntry->state_index_v) {
4605                                 read_value = template_hdr->saved_state_array[
4606                                                 crbEntry->state_index_v];
4607                         } else {
4608                                 read_value = crbEntry->value_1;
4609                         }
4610                         /*
4611                          * decide which address to use.
4612                          */
4613                         if (crbEntry->state_index_a) {
4614                                 addr = template_hdr->saved_state_array[
4615                                                 crbEntry-> state_index_a];
4616                         } else {
4617                                 addr = entry_addr;
4618                         }
4619
4620                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4621                         if (ret)
4622                                 return (0);
4623
4624                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
4625                 }
4626
4627                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4628                         /*  Read value from saved state using index */
4629                         read_value = template_hdr->saved_state_array[
4630                                                 crbEntry->state_index_v];
4631
4632                         read_value <<= crbEntry->shl; /*Shift left operation */
4633                         read_value >>= crbEntry->shr; /*Shift right operation */
4634
4635                         if (crbEntry->value_2) {
4636                                 /* check if AND mask is provided */
4637                                 read_value &= crbEntry->value_2;
4638                         }
4639
4640                         read_value |= crbEntry->value_3; /* OR operation */
4641                         read_value += crbEntry->value_1; /* increment op */
4642
4643                         /* Write value back to state area. */
4644
4645                         template_hdr->saved_state_array[crbEntry->state_index_v]
4646                                         = read_value;
4647                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
4648                 }
4649
4650                 entry_addr += crbEntry->addr_stride;
4651         }
4652
4653         return (0);
4654 }
4655
4656 /*
4657  * Handling rd poll entry.
4658  */
4659
4660 static uint32_t 
4661 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4662         uint32_t *data_buff)
4663 {
4664         int ret;
4665         int loop_cnt;
4666         uint32_t op_count, select_addr, select_value_stride, select_value;
4667         uint32_t read_addr, poll, mask, data_size, data;
4668         uint32_t wait_count = 0;
4669
4670         select_addr            = entry->select_addr;
4671         read_addr              = entry->read_addr;
4672         select_value           = entry->select_value;
4673         select_value_stride    = entry->select_value_stride;
4674         op_count               = entry->op_count;
4675         poll                   = entry->poll;
4676         mask                   = entry->mask;
4677         data_size              = entry->data_size;
4678
4679         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4680
4681                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4682                 if (ret)
4683                         return (0);
4684
4685                 wait_count = 0;
4686
4687                 while (wait_count < poll) {
4688
4689                         uint32_t temp;
4690
4691                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4692                         if (ret)
4693                                 return (0);
4694
4695                         if ( (temp & mask) != 0 ) {
4696                                 break;
4697                         }
4698                         wait_count++;
4699                 }
4700
4701                 if (wait_count == poll) {
4702                         device_printf(ha->pci_dev,
4703                                 "%s: Error in processing entry\n", __func__);
4704                         device_printf(ha->pci_dev,
4705                                 "%s: wait_count <0x%x> poll <0x%x>\n",
4706                                 __func__, wait_count, poll);
4707                         return 0;
4708                 }
4709
4710                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4711                 if (ret)
4712                         return (0);
4713
4714                 *data_buff++ = select_value;
4715                 *data_buff++ = data;
4716                 select_value = select_value + select_value_stride;
4717         }
4718
4719         /*
4720          * for testing purpose we return amount of data written
4721          */
4722         return (loop_cnt * (2 * sizeof(uint32_t)));
4723 }
4724
4725
4726 /*
4727  * Handling rd modify write poll entry.
4728  */
4729
4730 static uint32_t 
4731 ql_pollrd_modify_write(qla_host_t *ha,
4732         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4733         uint32_t *data_buff)
4734 {
4735         int ret;
4736         uint32_t addr_1, addr_2, value_1, value_2, data;
4737         uint32_t poll, mask, data_size, modify_mask;
4738         uint32_t wait_count = 0;
4739
4740         addr_1          = entry->addr_1;
4741         addr_2          = entry->addr_2;
4742         value_1         = entry->value_1;
4743         value_2         = entry->value_2;
4744
4745         poll            = entry->poll;
4746         mask            = entry->mask;
4747         modify_mask     = entry->modify_mask;
4748         data_size       = entry->data_size;
4749
4750
4751         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
4752         if (ret)
4753                 return (0);
4754
4755         wait_count = 0;
4756         while (wait_count < poll) {
4757
4758                 uint32_t temp;
4759
4760                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4761                 if (ret)
4762                         return (0);
4763
4764                 if ( (temp & mask) != 0 ) {
4765                         break;
4766                 }
4767                 wait_count++;
4768         }
4769
4770         if (wait_count == poll) {
4771                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
4772                         __func__);
4773         } else {
4774
4775                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
4776                 if (ret)
4777                         return (0);
4778
4779                 data = (data & modify_mask);
4780
4781                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
4782                 if (ret)
4783                         return (0);
4784
4785                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
4786                 if (ret)
4787                         return (0);
4788
4789                 /* Poll again */
4790                 wait_count = 0;
4791                 while (wait_count < poll) {
4792
4793                         uint32_t temp;
4794
4795                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4796                         if (ret)
4797                                 return (0);
4798
4799                         if ( (temp & mask) != 0 ) {
4800                                 break;
4801                         }
4802                         wait_count++;
4803                 }
4804                 *data_buff++ = addr_2;
4805                 *data_buff++ = data;
4806         }
4807
4808         /*
4809          * for testing purpose we return amount of data written
4810          */
4811         return (2 * sizeof(uint32_t));
4812 }
4813
4814