]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC 304249
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58         uint32_t num_intrs, uint32_t create);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61         int tenable, int rcv);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66                 uint8_t *hdr);
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static void qla_get_quick_stats(qla_host_t *ha);
77 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
78 static int qla_get_cam_search_mode(qla_host_t *ha);
79
80 static void ql_minidump_free(qla_host_t *ha);
81
82
83 static int
84 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
85 {
86         int err = 0, ret;
87         qla_host_t *ha;
88         uint32_t i;
89
90         err = sysctl_handle_int(oidp, &ret, 0, req);
91
92         if (err || !req->newptr)
93                 return (err);
94
95         if (ret == 1) {
96
97                 ha = (qla_host_t *)arg1;
98
99                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
100
101                         device_printf(ha->pci_dev,
102                                 "%s: sds_ring[%d] = %p\n", __func__,i,
103                                 (void *)ha->hw.sds[i].intr_count);
104
105                         device_printf(ha->pci_dev,
106                                 "%s: sds_ring[%d].spurious_intr_count = %p\n",
107                                 __func__,
108                                 i, (void *)ha->hw.sds[i].spurious_intr_count);
109
110                         device_printf(ha->pci_dev,
111                                 "%s: sds_ring[%d].rx_free = %d\n", __func__,i,
112                                 ha->hw.sds[i].rx_free);
113                 }
114
115                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
116                         device_printf(ha->pci_dev,
117                                 "%s: tx[%d] = %p\n", __func__,i,
118                                 (void *)ha->tx_ring[i].count);
119
120                 for (i = 0; i < ha->hw.num_rds_rings; i++)
121                         device_printf(ha->pci_dev,
122                                 "%s: rds_ring[%d] = %p\n", __func__,i,
123                                 (void *)ha->hw.rds[i].count);
124
125                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
126                         (void *)ha->lro_pkt_count);
127
128                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
129                         (void *)ha->lro_bytes);
130
131 #ifdef QL_ENABLE_ISCSI_TLV
132                 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
133                         (void *)ha->hw.iscsi_pkt_count);
134 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
135
136         }
137         return (err);
138 }
139
140 static int
141 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
142 {
143         int err, ret = 0;
144         qla_host_t *ha;
145
146         err = sysctl_handle_int(oidp, &ret, 0, req);
147
148         if (err || !req->newptr)
149                 return (err);
150
151         if (ret == 1) {
152                 ha = (qla_host_t *)arg1;
153                 qla_get_quick_stats(ha);
154         }
155         return (err);
156 }
157
158 #ifdef QL_DBG
159
160 static void
161 qla_stop_pegs(qla_host_t *ha)
162 {
163         uint32_t val = 1;
164
165         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
166         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
167         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
168         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
169         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
170         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
171 }
172
173 static int
174 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
175 {
176         int err, ret = 0;
177         qla_host_t *ha;
178         
179         err = sysctl_handle_int(oidp, &ret, 0, req);
180
181
182         if (err || !req->newptr)
183                 return (err);
184
185         if (ret == 1) {
186                 ha = (qla_host_t *)arg1;
187                 (void)QLA_LOCK(ha, __func__, 0);
188                 qla_stop_pegs(ha);      
189                 QLA_UNLOCK(ha, __func__);
190         }
191
192         return err;
193 }
194 #endif /* #ifdef QL_DBG */
195
196 static int
197 qla_validate_set_port_cfg_bit(uint32_t bits)
198 {
199         if ((bits & 0xF) > 1)
200                 return (-1);
201
202         if (((bits >> 4) & 0xF) > 2)
203                 return (-1);
204
205         if (((bits >> 8) & 0xF) > 2)
206                 return (-1);
207
208         return (0);
209 }
210
211 static int
212 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
213 {
214         int err, ret = 0;
215         qla_host_t *ha;
216         uint32_t cfg_bits;
217
218         err = sysctl_handle_int(oidp, &ret, 0, req);
219
220         if (err || !req->newptr)
221                 return (err);
222
223         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
224
225                 ha = (qla_host_t *)arg1;
226
227                 err = qla_get_port_config(ha, &cfg_bits);
228
229                 if (err)
230                         goto qla_sysctl_set_port_cfg_exit;
231
232                 if (ret & 0x1) {
233                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
234                 } else {
235                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
236                 }
237
238                 ret = ret >> 4;
239                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
240
241                 if ((ret & 0xF) == 0) {
242                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
243                 } else if ((ret & 0xF) == 1){
244                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
245                 } else {
246                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
247                 }
248
249                 ret = ret >> 4;
250                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
251
252                 if (ret == 0) {
253                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
254                 } else if (ret == 1){
255                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
256                 } else {
257                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
258                 }
259
260                 err = qla_set_port_config(ha, cfg_bits);
261         } else {
262                 ha = (qla_host_t *)arg1;
263
264                 err = qla_get_port_config(ha, &cfg_bits);
265         }
266
267 qla_sysctl_set_port_cfg_exit:
268         return err;
269 }
270
271 static int
272 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
273 {
274         int err, ret = 0;
275         qla_host_t *ha;
276
277         err = sysctl_handle_int(oidp, &ret, 0, req);
278
279         if (err || !req->newptr)
280                 return (err);
281
282         ha = (qla_host_t *)arg1;
283
284         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
285                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
286                 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
287         } else {
288                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
289         }
290
291         return (err);
292 }
293
294 static int
295 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
296 {
297         int err, ret = 0;
298         qla_host_t *ha;
299
300         err = sysctl_handle_int(oidp, &ret, 0, req);
301
302         if (err || !req->newptr)
303                 return (err);
304
305         ha = (qla_host_t *)arg1;
306         err = qla_get_cam_search_mode(ha);
307
308         return (err);
309 }
310
311
312 /*
313  * Name: ql_hw_add_sysctls
314  * Function: Add P3Plus specific sysctls
315  */
316 void
317 ql_hw_add_sysctls(qla_host_t *ha)
318 {
319         device_t        dev;
320
321         dev = ha->pci_dev;
322
323         ha->hw.num_sds_rings = MAX_SDS_RINGS;
324         ha->hw.num_rds_rings = MAX_RDS_RINGS;
325         ha->hw.num_tx_rings = NUM_TX_RINGS;
326
327         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
330                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
331
332         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
333                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
334                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
335                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
336
337         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
338                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
339                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
340                 ha->hw.num_tx_rings, "Number of Transmit Rings");
341
342         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
343                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
344                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
345                 ha->txr_idx, "Tx Ring Used");
346
347         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
348                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
349                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
350                 (void *)ha, 0,
351                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
352
353         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
354                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
355                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
356                 (void *)ha, 0,
357                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
358
359         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
360                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
362                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
363
364         ha->hw.sds_cidx_thres = 32;
365         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
366                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
368                 ha->hw.sds_cidx_thres,
369                 "Number of SDS entries to process before updating"
370                 " SDS Ring Consumer Index");
371
372         ha->hw.rds_pidx_thres = 32;
373         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
376                 ha->hw.rds_pidx_thres,
377                 "Number of Rcv Rings Entries to post before updating"
378                 " RDS Ring Producer Index");
379
380         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
381         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
382                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
383                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
384                 &ha->hw.rcv_intr_coalesce,
385                 ha->hw.rcv_intr_coalesce,
386                 "Rcv Intr Coalescing Parameters\n"
387                 "\tbits 15:0 max packets\n"
388                 "\tbits 31:16 max micro-seconds to wait\n"
389                 "\tplease run\n"
390                 "\tifconfig <if> down && ifconfig <if> up\n"
391                 "\tto take effect \n");
392
393         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
394         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
395                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
397                 &ha->hw.xmt_intr_coalesce,
398                 ha->hw.xmt_intr_coalesce,
399                 "Xmt Intr Coalescing Parameters\n"
400                 "\tbits 15:0 max packets\n"
401                 "\tbits 31:16 max micro-seconds to wait\n"
402                 "\tplease run\n"
403                 "\tifconfig <if> down && ifconfig <if> up\n"
404                 "\tto take effect \n");
405
406         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
407                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
409                 (void *)ha, 0,
410                 qla_sysctl_port_cfg, "I",
411                         "Set Port Configuration if values below "
412                         "otherwise Get Port Configuration\n"
413                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
414                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
415                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
416                         " 1 = xmt only; 2 = rcv only;\n"
417                 );
418
419         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
422                 (void *)ha, 0,
423                 qla_sysctl_set_cam_search_mode, "I",
424                         "Set CAM Search Mode"
425                         "\t 1 = search mode internal\n"
426                         "\t 2 = search mode auto\n");
427
428         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
431                 (void *)ha, 0,
432                 qla_sysctl_get_cam_search_mode, "I",
433                         "Get CAM Search Mode"
434                         "\t 1 = search mode internal\n"
435                         "\t 2 = search mode auto\n");
436
437         ha->hw.enable_9kb = 1;
438
439         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
440                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
442                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
443
444         ha->hw.mdump_active = 0;
445         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
448                 ha->hw.mdump_active,
449                 "Minidump retrieval is Active");
450
451         ha->hw.mdump_done = 0;
452         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
453                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454                 OID_AUTO, "mdump_done", CTLFLAG_RW,
455                 &ha->hw.mdump_done, ha->hw.mdump_done,
456                 "Minidump has been done and available for retrieval");
457
458         ha->hw.mdump_capture_mask = 0xF;
459         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
460                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
462                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
463                 "Minidump capture mask");
464 #ifdef QL_DBG
465
466         ha->err_inject = 0;
467         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
468                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469                 OID_AUTO, "err_inject",
470                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
471                 "Error to be injected\n"
472                 "\t\t\t 0: No Errors\n"
473                 "\t\t\t 1: rcv: rxb struct invalid\n"
474                 "\t\t\t 2: rcv: mp == NULL\n"
475                 "\t\t\t 3: lro: rxb struct invalid\n"
476                 "\t\t\t 4: lro: mp == NULL\n"
477                 "\t\t\t 5: rcv: num handles invalid\n"
478                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
479                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
480                 "\t\t\t 8: mbx: mailbox command failure\n"
481                 "\t\t\t 9: heartbeat failure\n"
482                 "\t\t\t A: temperature failure\n"
483                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
484
485         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
488                 (void *)ha, 0,
489                 qla_sysctl_stop_pegs, "I", "Peg Stop");
490
491 #endif /* #ifdef QL_DBG */
492
493         ha->hw.user_pri_nic = 0;
494         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
495                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
497                 ha->hw.user_pri_nic,
498                 "VLAN Tag User Priority for Normal Ethernet Packets");
499
500         ha->hw.user_pri_iscsi = 4;
501         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
502                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
504                 ha->hw.user_pri_iscsi,
505                 "VLAN Tag User Priority for iSCSI Packets");
506
507 }
508
509 void
510 ql_hw_link_status(qla_host_t *ha)
511 {
512         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
513
514         if (ha->hw.link_up) {
515                 device_printf(ha->pci_dev, "link Up\n");
516         } else {
517                 device_printf(ha->pci_dev, "link Down\n");
518         }
519
520         if (ha->hw.flags.fduplex) {
521                 device_printf(ha->pci_dev, "Full Duplex\n");
522         } else {
523                 device_printf(ha->pci_dev, "Half Duplex\n");
524         }
525
526         if (ha->hw.flags.autoneg) {
527                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
528         } else {
529                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
530         }
531
532         switch (ha->hw.link_speed) {
533         case 0x710:
534                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
535                 break;
536
537         case 0x3E8:
538                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
539                 break;
540
541         case 0x64:
542                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
543                 break;
544
545         default:
546                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
547                 break;
548         }
549
550         switch (ha->hw.module_type) {
551
552         case 0x01:
553                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
554                 break;
555
556         case 0x02:
557                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
558                 break;
559
560         case 0x03:
561                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
562                 break;
563
564         case 0x04:
565                 device_printf(ha->pci_dev,
566                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
567                         ha->hw.cable_length);
568                 break;
569
570         case 0x05:
571                 device_printf(ha->pci_dev, "Module Type 10GE Active"
572                         " Limiting Copper(Compliant)[%d m]\n",
573                         ha->hw.cable_length);
574                 break;
575
576         case 0x06:
577                 device_printf(ha->pci_dev,
578                         "Module Type 10GE Passive Copper"
579                         " (Legacy, Best Effort)[%d m]\n",
580                         ha->hw.cable_length);
581                 break;
582
583         case 0x07:
584                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
585                 break;
586
587         case 0x08:
588                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
589                 break;
590
591         case 0x09:
592                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
593                 break;
594
595         case 0x0A:
596                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
597                 break;
598
599         case 0x0B:
600                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
601                         "(Legacy, Best Effort)\n");
602                 break;
603
604         default:
605                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
606                         ha->hw.module_type);
607                 break;
608         }
609
610         if (ha->hw.link_faults == 1)
611                 device_printf(ha->pci_dev, "SFP Power Fault\n");
612 }
613
614 /*
615  * Name: ql_free_dma
616  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
617  */
618 void
619 ql_free_dma(qla_host_t *ha)
620 {
621         uint32_t i;
622
623         if (ha->hw.dma_buf.flags.sds_ring) {
624                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
625                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
626                 }
627                 ha->hw.dma_buf.flags.sds_ring = 0;
628         }
629
630         if (ha->hw.dma_buf.flags.rds_ring) {
631                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
632                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
633                 }
634                 ha->hw.dma_buf.flags.rds_ring = 0;
635         }
636
637         if (ha->hw.dma_buf.flags.tx_ring) {
638                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
639                 ha->hw.dma_buf.flags.tx_ring = 0;
640         }
641         ql_minidump_free(ha);
642 }
643
644 /*
645  * Name: ql_alloc_dma
646  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
647  */
648 int
649 ql_alloc_dma(qla_host_t *ha)
650 {
651         device_t                dev;
652         uint32_t                i, j, size, tx_ring_size;
653         qla_hw_t                *hw;
654         qla_hw_tx_cntxt_t       *tx_cntxt;
655         uint8_t                 *vaddr;
656         bus_addr_t              paddr;
657
658         dev = ha->pci_dev;
659
660         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
661
662         hw = &ha->hw;
663         /*
664          * Allocate Transmit Ring
665          */
666         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
667         size = (tx_ring_size * ha->hw.num_tx_rings);
668
669         hw->dma_buf.tx_ring.alignment = 8;
670         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
671         
672         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
673                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
674                 goto ql_alloc_dma_exit;
675         }
676
677         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
678         paddr = hw->dma_buf.tx_ring.dma_addr;
679         
680         for (i = 0; i < ha->hw.num_tx_rings; i++) {
681                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
682
683                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
684                 tx_cntxt->tx_ring_paddr = paddr;
685
686                 vaddr += tx_ring_size;
687                 paddr += tx_ring_size;
688         }
689
690         for (i = 0; i < ha->hw.num_tx_rings; i++) {
691                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
692
693                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
694                 tx_cntxt->tx_cons_paddr = paddr;
695
696                 vaddr += sizeof (uint32_t);
697                 paddr += sizeof (uint32_t);
698         }
699
700         ha->hw.dma_buf.flags.tx_ring = 1;
701
702         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
703                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
704                 hw->dma_buf.tx_ring.dma_b));
705         /*
706          * Allocate Receive Descriptor Rings
707          */
708
709         for (i = 0; i < hw->num_rds_rings; i++) {
710
711                 hw->dma_buf.rds_ring[i].alignment = 8;
712                 hw->dma_buf.rds_ring[i].size =
713                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
714
715                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
716                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
717                                 __func__, i);
718
719                         for (j = 0; j < i; j++)
720                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
721
722                         goto ql_alloc_dma_exit;
723                 }
724                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
725                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
726                         hw->dma_buf.rds_ring[i].dma_b));
727         }
728
729         hw->dma_buf.flags.rds_ring = 1;
730
731         /*
732          * Allocate Status Descriptor Rings
733          */
734
735         for (i = 0; i < hw->num_sds_rings; i++) {
736                 hw->dma_buf.sds_ring[i].alignment = 8;
737                 hw->dma_buf.sds_ring[i].size =
738                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
739
740                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
741                         device_printf(dev, "%s: sds ring alloc failed\n",
742                                 __func__);
743
744                         for (j = 0; j < i; j++)
745                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
746
747                         goto ql_alloc_dma_exit;
748                 }
749                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
750                         __func__, i,
751                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
752                         hw->dma_buf.sds_ring[i].dma_b));
753         }
754         for (i = 0; i < hw->num_sds_rings; i++) {
755                 hw->sds[i].sds_ring_base =
756                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
757         }
758
759         hw->dma_buf.flags.sds_ring = 1;
760
761         return 0;
762
763 ql_alloc_dma_exit:
764         ql_free_dma(ha);
765         return -1;
766 }
767
768 #define Q8_MBX_MSEC_DELAY       5000
769
770 static int
771 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
772         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
773 {
774         uint32_t i;
775         uint32_t data;
776         int ret = 0;
777
778         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
779                 ret = -3;
780                 ha->qla_initiate_recovery = 1;
781                 goto exit_qla_mbx_cmd;
782         }
783
784         if (no_pause)
785                 i = 1000;
786         else
787                 i = Q8_MBX_MSEC_DELAY;
788
789         while (i) {
790                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
791                 if (data == 0)
792                         break;
793                 if (no_pause) {
794                         DELAY(1000);
795                 } else {
796                         qla_mdelay(__func__, 1);
797                 }
798                 i--;
799         }
800
801         if (i == 0) {
802                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
803                         __func__, data);
804                 ret = -1;
805                 ha->qla_initiate_recovery = 1;
806                 goto exit_qla_mbx_cmd;
807         }
808
809         for (i = 0; i < n_hmbox; i++) {
810                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
811                 h_mbox++;
812         }
813
814         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
815
816
817         i = Q8_MBX_MSEC_DELAY;
818         while (i) {
819                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
820
821                 if ((data & 0x3) == 1) {
822                         data = READ_REG32(ha, Q8_FW_MBOX0);
823                         if ((data & 0xF000) != 0x8000)
824                                 break;
825                 }
826                 if (no_pause) {
827                         DELAY(1000);
828                 } else {
829                         qla_mdelay(__func__, 1);
830                 }
831                 i--;
832         }
833         if (i == 0) {
834                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
835                         __func__, data);
836                 ret = -2;
837                 ha->qla_initiate_recovery = 1;
838                 goto exit_qla_mbx_cmd;
839         }
840
841         for (i = 0; i < n_fwmbox; i++) {
842                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
843         }
844
845         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
846         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
847
848 exit_qla_mbx_cmd:
849         return (ret);
850 }
851
852 int
853 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
854         uint32_t *num_rcvq)
855 {
856         uint32_t *mbox, err;
857         device_t dev = ha->pci_dev;
858
859         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
860
861         mbox = ha->hw.mbox;
862
863         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
864
865         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
866                 device_printf(dev, "%s: failed0\n", __func__);
867                 return (-1);
868         }
869         err = mbox[0] >> 25; 
870
871         if (supports_9kb != NULL) {
872                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
873                         *supports_9kb = 1;
874                 else
875                         *supports_9kb = 0;
876         }
877
878         if (num_rcvq != NULL)
879                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
880
881         if ((err != 1) && (err != 0)) {
882                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
883                 return (-1);
884         }
885         return 0;
886 }
887
888 static int
889 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
890         uint32_t create)
891 {
892         uint32_t i, err;
893         device_t dev = ha->pci_dev;
894         q80_config_intr_t *c_intr;
895         q80_config_intr_rsp_t *c_intr_rsp;
896
897         c_intr = (q80_config_intr_t *)ha->hw.mbox;
898         bzero(c_intr, (sizeof (q80_config_intr_t)));
899
900         c_intr->opcode = Q8_MBX_CONFIG_INTR;
901
902         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
903         c_intr->count_version |= Q8_MBX_CMD_VERSION;
904
905         c_intr->nentries = num_intrs;
906
907         for (i = 0; i < num_intrs; i++) {
908                 if (create) {
909                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
910                         c_intr->intr[i].msix_index = start_idx + 1 + i;
911                 } else {
912                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
913                         c_intr->intr[i].msix_index =
914                                 ha->hw.intr_id[(start_idx + i)];
915                 }
916
917                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
918         }
919
920         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
921                 (sizeof (q80_config_intr_t) >> 2),
922                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
923                 device_printf(dev, "%s: failed0\n", __func__);
924                 return (-1);
925         }
926
927         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
928
929         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
930
931         if (err) {
932                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
933                         c_intr_rsp->nentries);
934
935                 for (i = 0; i < c_intr_rsp->nentries; i++) {
936                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
937                                 __func__, i, 
938                                 c_intr_rsp->intr[i].status,
939                                 c_intr_rsp->intr[i].intr_id,
940                                 c_intr_rsp->intr[i].intr_src);
941                 }
942
943                 return (-1);
944         }
945
946         for (i = 0; ((i < num_intrs) && create); i++) {
947                 if (!c_intr_rsp->intr[i].status) {
948                         ha->hw.intr_id[(start_idx + i)] =
949                                 c_intr_rsp->intr[i].intr_id;
950                         ha->hw.intr_src[(start_idx + i)] =
951                                 c_intr_rsp->intr[i].intr_src;
952                 }
953         }
954
955         return (0);
956 }
957
958 /*
959  * Name: qla_config_rss
960  * Function: Configure RSS for the context/interface.
961  */
962 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
963                         0x8030f20c77cb2da3ULL,
964                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
965                         0x255b0ec26d5a56daULL };
966
967 static int
968 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
969 {
970         q80_config_rss_t        *c_rss;
971         q80_config_rss_rsp_t    *c_rss_rsp;
972         uint32_t                err, i;
973         device_t                dev = ha->pci_dev;
974
975         c_rss = (q80_config_rss_t *)ha->hw.mbox;
976         bzero(c_rss, (sizeof (q80_config_rss_t)));
977
978         c_rss->opcode = Q8_MBX_CONFIG_RSS;
979
980         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
981         c_rss->count_version |= Q8_MBX_CMD_VERSION;
982
983         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
984                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
985         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
986         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
987
988         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
989         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
990
991         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
992
993         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
994         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
995
996         c_rss->cntxt_id = cntxt_id;
997
998         for (i = 0; i < 5; i++) {
999                 c_rss->rss_key[i] = rss_key[i];
1000         }
1001
1002         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1003                 (sizeof (q80_config_rss_t) >> 2),
1004                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1005                 device_printf(dev, "%s: failed0\n", __func__);
1006                 return (-1);
1007         }
1008         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1009
1010         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1011
1012         if (err) {
1013                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1014                 return (-1);
1015         }
1016         return 0;
1017 }
1018
1019 static int
1020 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1021         uint16_t cntxt_id, uint8_t *ind_table)
1022 {
1023         q80_config_rss_ind_table_t      *c_rss_ind;
1024         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1025         uint32_t                        err;
1026         device_t                        dev = ha->pci_dev;
1027
1028         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1029                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1030                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1031                         start_idx, count);
1032                 return (-1);
1033         }
1034
1035         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1036         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1037
1038         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1039         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1040         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1041
1042         c_rss_ind->start_idx = start_idx;
1043         c_rss_ind->end_idx = start_idx + count - 1;
1044         c_rss_ind->cntxt_id = cntxt_id;
1045         bcopy(ind_table, c_rss_ind->ind_table, count);
1046
1047         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1048                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1049                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1050                 device_printf(dev, "%s: failed0\n", __func__);
1051                 return (-1);
1052         }
1053
1054         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1055         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1056
1057         if (err) {
1058                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1059                 return (-1);
1060         }
1061         return 0;
1062 }
1063
1064 /*
1065  * Name: qla_config_intr_coalesce
1066  * Function: Configure Interrupt Coalescing.
1067  */
1068 static int
1069 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1070         int rcv)
1071 {
1072         q80_config_intr_coalesc_t       *intrc;
1073         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1074         uint32_t                        err, i;
1075         device_t                        dev = ha->pci_dev;
1076         
1077         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1078         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1079
1080         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1081         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1082         intrc->count_version |= Q8_MBX_CMD_VERSION;
1083
1084         if (rcv) {
1085                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1086                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1087                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1088         } else {
1089                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1090                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1091                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1092         }
1093
1094         intrc->cntxt_id = cntxt_id;
1095
1096         if (tenable) {
1097                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1098                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1099
1100                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1101                         intrc->sds_ring_mask |= (1 << i);
1102                 }
1103                 intrc->ms_timeout = 1000;
1104         }
1105
1106         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1107                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1108                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1109                 device_printf(dev, "%s: failed0\n", __func__);
1110                 return (-1);
1111         }
1112         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1113
1114         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1115
1116         if (err) {
1117                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1118                 return (-1);
1119         }
1120         
1121         return 0;
1122 }
1123
1124
1125 /*
1126  * Name: qla_config_mac_addr
1127  * Function: binds a MAC address to the context/interface.
1128  *      Can be unicast, multicast or broadcast.
1129  */
1130 static int
1131 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1132 {
1133         q80_config_mac_addr_t           *cmac;
1134         q80_config_mac_addr_rsp_t       *cmac_rsp;
1135         uint32_t                        err;
1136         device_t                        dev = ha->pci_dev;
1137
1138         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1139         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1140
1141         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1142         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1143         cmac->count_version |= Q8_MBX_CMD_VERSION;
1144
1145         if (add_mac) 
1146                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1147         else
1148                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1149                 
1150         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1151
1152         cmac->nmac_entries = 1;
1153         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1154         bcopy(mac_addr, cmac->mac_addr[0].addr, 6); 
1155
1156         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1157                 (sizeof (q80_config_mac_addr_t) >> 2),
1158                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1159                 device_printf(dev, "%s: %s failed0\n", __func__,
1160                         (add_mac ? "Add" : "Del"));
1161                 return (-1);
1162         }
1163         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1164
1165         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1166
1167         if (err) {
1168                 device_printf(dev, "%s: %s "
1169                         "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1170                         __func__, (add_mac ? "Add" : "Del"),
1171                         mac_addr[0], mac_addr[1], mac_addr[2],
1172                         mac_addr[3], mac_addr[4], mac_addr[5], err);
1173                 return (-1);
1174         }
1175         
1176         return 0;
1177 }
1178
1179
1180 /*
1181  * Name: qla_set_mac_rcv_mode
1182  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1183  */
1184 static int
1185 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1186 {
1187         q80_config_mac_rcv_mode_t       *rcv_mode;
1188         uint32_t                        err;
1189         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1190         device_t                        dev = ha->pci_dev;
1191
1192         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1193         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1194
1195         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1196         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1197         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1198
1199         rcv_mode->mode = mode;
1200
1201         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1202
1203         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1204                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1205                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1206                 device_printf(dev, "%s: failed0\n", __func__);
1207                 return (-1);
1208         }
1209         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1210
1211         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1212
1213         if (err) {
1214                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1215                 return (-1);
1216         }
1217         
1218         return 0;
1219 }
1220
1221 int
1222 ql_set_promisc(qla_host_t *ha)
1223 {
1224         int ret;
1225
1226         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1227         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1228         return (ret);
1229 }
1230
1231 void
1232 qla_reset_promisc(qla_host_t *ha)
1233 {
1234         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1235         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1236 }
1237
1238 int
1239 ql_set_allmulti(qla_host_t *ha)
1240 {
1241         int ret;
1242
1243         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1244         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1245         return (ret);
1246 }
1247
1248 void
1249 qla_reset_allmulti(qla_host_t *ha)
1250 {
1251         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1252         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1253 }
1254
1255 /*
1256  * Name: ql_set_max_mtu
1257  * Function:
1258  *      Sets the maximum transfer unit size for the specified rcv context.
1259  */
1260 int
1261 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1262 {
1263         device_t                dev;
1264         q80_set_max_mtu_t       *max_mtu;
1265         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1266         uint32_t                err;
1267
1268         dev = ha->pci_dev;
1269
1270         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1271         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1272
1273         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1274         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1275         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1276
1277         max_mtu->cntxt_id = cntxt_id;
1278         max_mtu->mtu = mtu;
1279
1280         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1281                 (sizeof (q80_set_max_mtu_t) >> 2),
1282                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1283                 device_printf(dev, "%s: failed\n", __func__);
1284                 return -1;
1285         }
1286
1287         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1288
1289         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1290
1291         if (err) {
1292                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1293         }
1294
1295         return 0;
1296 }
1297
1298 static int
1299 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1300 {
1301         device_t                dev;
1302         q80_link_event_t        *lnk;
1303         q80_link_event_rsp_t    *lnk_rsp;
1304         uint32_t                err;
1305
1306         dev = ha->pci_dev;
1307
1308         lnk = (q80_link_event_t *)ha->hw.mbox;
1309         bzero(lnk, (sizeof (q80_link_event_t)));
1310
1311         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1312         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1313         lnk->count_version |= Q8_MBX_CMD_VERSION;
1314
1315         lnk->cntxt_id = cntxt_id;
1316         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1317
1318         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1319                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1320                 device_printf(dev, "%s: failed\n", __func__);
1321                 return -1;
1322         }
1323
1324         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1325
1326         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1327
1328         if (err) {
1329                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1330         }
1331
1332         return 0;
1333 }
1334
1335 static int
1336 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1337 {
1338         device_t                dev;
1339         q80_config_fw_lro_t     *fw_lro;
1340         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1341         uint32_t                err;
1342
1343         dev = ha->pci_dev;
1344
1345         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1346         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1347
1348         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1349         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1350         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1351
1352         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1353         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1354
1355         fw_lro->cntxt_id = cntxt_id;
1356
1357         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1358                 (sizeof (q80_config_fw_lro_t) >> 2),
1359                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1360                 device_printf(dev, "%s: failed\n", __func__);
1361                 return -1;
1362         }
1363
1364         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1365
1366         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1367
1368         if (err) {
1369                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1370         }
1371
1372         return 0;
1373 }
1374
1375 static int
1376 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1377 {
1378         device_t                dev;
1379         q80_hw_config_t         *hw_config;
1380         q80_hw_config_rsp_t     *hw_config_rsp;
1381         uint32_t                err;
1382
1383         dev = ha->pci_dev;
1384
1385         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1386         bzero(hw_config, sizeof (q80_hw_config_t));
1387
1388         hw_config->opcode = Q8_MBX_HW_CONFIG;
1389         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1390         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1391
1392         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1393
1394         hw_config->u.set_cam_search_mode.mode = search_mode;
1395
1396         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1397                 (sizeof (q80_hw_config_t) >> 2),
1398                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1399                 device_printf(dev, "%s: failed\n", __func__);
1400                 return -1;
1401         }
1402         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1403
1404         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1405
1406         if (err) {
1407                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1408         }
1409
1410         return 0;
1411 }
1412
1413 static int
1414 qla_get_cam_search_mode(qla_host_t *ha)
1415 {
1416         device_t                dev;
1417         q80_hw_config_t         *hw_config;
1418         q80_hw_config_rsp_t     *hw_config_rsp;
1419         uint32_t                err;
1420
1421         dev = ha->pci_dev;
1422
1423         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1424         bzero(hw_config, sizeof (q80_hw_config_t));
1425
1426         hw_config->opcode = Q8_MBX_HW_CONFIG;
1427         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1428         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1429
1430         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1431
1432         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1433                 (sizeof (q80_hw_config_t) >> 2),
1434                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1435                 device_printf(dev, "%s: failed\n", __func__);
1436                 return -1;
1437         }
1438         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1439
1440         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1441
1442         if (err) {
1443                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1444         } else {
1445                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1446                         hw_config_rsp->u.get_cam_search_mode.mode);
1447         }
1448
1449         return 0;
1450 }
1451
1452
1453
1454 static void
1455 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1456 {
1457         device_t dev = ha->pci_dev;
1458
1459         if (i < ha->hw.num_tx_rings) {
1460                 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1461                         __func__, i, xstat->total_bytes);
1462                 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1463                         __func__, i, xstat->total_pkts);
1464                 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1465                         __func__, i, xstat->errors);
1466                 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1467                         __func__, i, xstat->pkts_dropped);
1468                 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1469                         __func__, i, xstat->switch_pkts);
1470                 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1471                         __func__, i, xstat->num_buffers);
1472         } else {
1473                 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1474                         __func__, xstat->total_bytes);
1475                 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1476                         __func__, xstat->total_pkts);
1477                 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1478                         __func__, xstat->errors);
1479                 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1480                         __func__, xstat->pkts_dropped);
1481                 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1482                         __func__, xstat->switch_pkts);
1483                 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1484                         __func__, xstat->num_buffers);
1485         }
1486 }
1487
1488 static void
1489 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1490 {
1491         device_t dev = ha->pci_dev;
1492
1493         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1494                 rstat->total_bytes);
1495         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1496                 rstat->total_pkts);
1497         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1498                 rstat->lro_pkt_count);
1499         device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1500                 rstat->sw_pkt_count);
1501         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1502                 rstat->ip_chksum_err);
1503         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1504                 rstat->pkts_wo_acntxts);
1505         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1506                 __func__, rstat->pkts_dropped_no_sds_card);
1507         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1508                 __func__, rstat->pkts_dropped_no_sds_host);
1509         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1510                 rstat->oversized_pkts);
1511         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1512                 __func__, rstat->pkts_dropped_no_rds);
1513         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1514                 __func__, rstat->unxpctd_mcast_pkts);
1515         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1516                 rstat->re1_fbq_error);
1517         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1518                 rstat->invalid_mac_addr);
1519         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1520                 rstat->rds_prime_trys);
1521         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1522                 rstat->rds_prime_success);
1523         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1524                 rstat->lro_flows_added);
1525         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1526                 rstat->lro_flows_deleted);
1527         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1528                 rstat->lro_flows_active);
1529         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1530                 __func__, rstat->pkts_droped_unknown);
1531 }
1532
1533 static void
1534 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1535 {
1536         device_t dev = ha->pci_dev;
1537
1538         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1539                 mstat->xmt_frames);
1540         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1541                 mstat->xmt_bytes);
1542         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1543                 mstat->xmt_mcast_pkts);
1544         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1545                 mstat->xmt_bcast_pkts);
1546         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1547                 mstat->xmt_pause_frames);
1548         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1549                 mstat->xmt_cntrl_pkts);
1550         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1551                 __func__, mstat->xmt_pkt_lt_64bytes);
1552         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1553                 __func__, mstat->xmt_pkt_lt_127bytes);
1554         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1555                 __func__, mstat->xmt_pkt_lt_255bytes);
1556         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1557                 __func__, mstat->xmt_pkt_lt_511bytes);
1558         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1559                 __func__, mstat->xmt_pkt_lt_1023bytes);
1560         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1561                 __func__, mstat->xmt_pkt_lt_1518bytes);
1562         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1563                 __func__, mstat->xmt_pkt_gt_1518bytes);
1564
1565         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1566                 mstat->rcv_frames);
1567         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1568                 mstat->rcv_bytes);
1569         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1570                 mstat->rcv_mcast_pkts);
1571         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1572                 mstat->rcv_bcast_pkts);
1573         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1574                 mstat->rcv_pause_frames);
1575         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1576                 mstat->rcv_cntrl_pkts);
1577         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1578                 __func__, mstat->rcv_pkt_lt_64bytes);
1579         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1580                 __func__, mstat->rcv_pkt_lt_127bytes);
1581         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1582                 __func__, mstat->rcv_pkt_lt_255bytes);
1583         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1584                 __func__, mstat->rcv_pkt_lt_511bytes);
1585         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1586                 __func__, mstat->rcv_pkt_lt_1023bytes);
1587         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1588                 __func__, mstat->rcv_pkt_lt_1518bytes);
1589         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1590                 __func__, mstat->rcv_pkt_gt_1518bytes);
1591
1592         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1593                 mstat->rcv_len_error);
1594         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1595                 mstat->rcv_len_small);
1596         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1597                 mstat->rcv_len_large);
1598         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1599                 mstat->rcv_jabber);
1600         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1601                 mstat->rcv_dropped);
1602         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1603                 mstat->fcs_error);
1604         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1605                 mstat->align_error);
1606 }
1607
1608
1609 static int
1610 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1611 {
1612         device_t                dev;
1613         q80_get_stats_t         *stat;
1614         q80_get_stats_rsp_t     *stat_rsp;
1615         uint32_t                err;
1616
1617         dev = ha->pci_dev;
1618
1619         stat = (q80_get_stats_t *)ha->hw.mbox;
1620         bzero(stat, (sizeof (q80_get_stats_t)));
1621
1622         stat->opcode = Q8_MBX_GET_STATS;
1623         stat->count_version = 2;
1624         stat->count_version |= Q8_MBX_CMD_VERSION;
1625
1626         stat->cmd = cmd;
1627
1628         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1629                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1630                 device_printf(dev, "%s: failed\n", __func__);
1631                 return -1;
1632         }
1633
1634         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1635
1636         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1637
1638         if (err) {
1639                 return -1;
1640         }
1641
1642         return 0;
1643 }
1644
1645 void
1646 ql_get_stats(qla_host_t *ha)
1647 {
1648         q80_get_stats_rsp_t     *stat_rsp;
1649         q80_mac_stats_t         *mstat;
1650         q80_xmt_stats_t         *xstat;
1651         q80_rcv_stats_t         *rstat;
1652         uint32_t                cmd;
1653         int                     i;
1654
1655         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1656         /*
1657          * Get MAC Statistics
1658          */
1659         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1660 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1661
1662         cmd |= ((ha->pci_func & 0x1) << 16);
1663
1664         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1665                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1666                 qla_mac_stats(ha, mstat);
1667         } else {
1668                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1669                         __func__, ha->hw.mbox[0]);
1670         }
1671         /*
1672          * Get RCV Statistics
1673          */
1674         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1675 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1676         cmd |= (ha->hw.rcv_cntxt_id << 16);
1677
1678         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1679                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1680                 qla_rcv_stats(ha, rstat);
1681         } else {
1682                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1683                         __func__, ha->hw.mbox[0]);
1684         }
1685         /*
1686          * Get XMT Statistics
1687          */
1688         for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1689                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1690 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
1691                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1692
1693                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1694                         == 0) {
1695                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1696                         qla_xmt_stats(ha, xstat, i);
1697                 } else {
1698                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1699                                 __func__, ha->hw.mbox[0]);
1700                 }
1701         }
1702         return;
1703 }
1704
1705 static void
1706 qla_get_quick_stats(qla_host_t *ha)
1707 {
1708         q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1709         q80_mac_stats_t         *mstat;
1710         q80_xmt_stats_t         *xstat;
1711         q80_rcv_stats_t         *rstat;
1712         uint32_t                cmd;
1713
1714         stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1715
1716         cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1717 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1718
1719 //      cmd |= ((ha->pci_func & 0x3) << 16);
1720         cmd |= (0xFFFF << 16);
1721
1722         if (qla_get_hw_stats(ha, cmd,
1723                         sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1724
1725                 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1726                 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1727                 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1728                 qla_mac_stats(ha, mstat);
1729                 qla_rcv_stats(ha, rstat);
1730                 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1731         } else {
1732                 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1733                         __func__, ha->hw.mbox[0]);
1734         }
1735         return;
1736 }
1737
1738 /*
1739  * Name: qla_tx_tso
1740  * Function: Checks if the packet to be transmitted is a candidate for
1741  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1742  *      Ring Structure are plugged in.
1743  */
1744 static int
1745 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1746 {
1747         struct ether_vlan_header *eh;
1748         struct ip *ip = NULL;
1749         struct ip6_hdr *ip6 = NULL;
1750         struct tcphdr *th = NULL;
1751         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1752         uint16_t etype, opcode, offload = 1;
1753         device_t dev;
1754
1755         dev = ha->pci_dev;
1756
1757
1758         eh = mtod(mp, struct ether_vlan_header *);
1759
1760         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1761                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1762                 etype = ntohs(eh->evl_proto);
1763         } else {
1764                 ehdrlen = ETHER_HDR_LEN;
1765                 etype = ntohs(eh->evl_encap_proto);
1766         }
1767
1768         hdrlen = 0;
1769
1770         switch (etype) {
1771                 case ETHERTYPE_IP:
1772
1773                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1774                                         sizeof(struct tcphdr);
1775
1776                         if (mp->m_len < tcp_opt_off) {
1777                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1778                                 ip = (struct ip *)(hdr + ehdrlen);
1779                         } else {
1780                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1781                         }
1782
1783                         ip_hlen = ip->ip_hl << 2;
1784                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1785
1786                                 
1787                         if ((ip->ip_p != IPPROTO_TCP) ||
1788                                 (ip_hlen != sizeof (struct ip))){
1789                                 /* IP Options are not supported */
1790
1791                                 offload = 0;
1792                         } else
1793                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1794
1795                 break;
1796
1797                 case ETHERTYPE_IPV6:
1798
1799                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1800                                         sizeof (struct tcphdr);
1801
1802                         if (mp->m_len < tcp_opt_off) {
1803                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1804                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1805                         } else {
1806                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1807                         }
1808
1809                         ip_hlen = sizeof(struct ip6_hdr);
1810                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1811
1812                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1813                                 //device_printf(dev, "%s: ipv6\n", __func__);
1814                                 offload = 0;
1815                         } else
1816                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1817                 break;
1818
1819                 default:
1820                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1821                         offload = 0;
1822                 break;
1823         }
1824
1825         if (!offload)
1826                 return (-1);
1827
1828         tcp_hlen = th->th_off << 2;
1829         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1830
1831         if (mp->m_len < hdrlen) {
1832                 if (mp->m_len < tcp_opt_off) {
1833                         if (tcp_hlen > sizeof(struct tcphdr)) {
1834                                 m_copydata(mp, tcp_opt_off,
1835                                         (tcp_hlen - sizeof(struct tcphdr)),
1836                                         &hdr[tcp_opt_off]);
1837                         }
1838                 } else {
1839                         m_copydata(mp, 0, hdrlen, hdr);
1840                 }
1841         }
1842
1843         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1844
1845         tx_cmd->flags_opcode = opcode ;
1846         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1847         tx_cmd->total_hdr_len = hdrlen;
1848
1849         /* Check for Multicast least significant bit of MSB == 1 */
1850         if (eh->evl_dhost[0] & 0x01) {
1851                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1852         }
1853
1854         if (mp->m_len < hdrlen) {
1855                 printf("%d\n", hdrlen);
1856                 return (1);
1857         }
1858
1859         return (0);
1860 }
1861
1862 /*
1863  * Name: qla_tx_chksum
1864  * Function: Checks if the packet to be transmitted is a candidate for
1865  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1866  *      Ring Structure are plugged in.
1867  */
1868 static int
1869 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1870         uint32_t *tcp_hdr_off)
1871 {
1872         struct ether_vlan_header *eh;
1873         struct ip *ip;
1874         struct ip6_hdr *ip6;
1875         uint32_t ehdrlen, ip_hlen;
1876         uint16_t etype, opcode, offload = 1;
1877         device_t dev;
1878         uint8_t buf[sizeof(struct ip6_hdr)];
1879
1880         dev = ha->pci_dev;
1881
1882         *op_code = 0;
1883
1884         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1885                 return (-1);
1886
1887         eh = mtod(mp, struct ether_vlan_header *);
1888
1889         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1890                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1891                 etype = ntohs(eh->evl_proto);
1892         } else {
1893                 ehdrlen = ETHER_HDR_LEN;
1894                 etype = ntohs(eh->evl_encap_proto);
1895         }
1896
1897                 
1898         switch (etype) {
1899                 case ETHERTYPE_IP:
1900                         ip = (struct ip *)(mp->m_data + ehdrlen);
1901
1902                         ip_hlen = sizeof (struct ip);
1903
1904                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1905                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1906                                 ip = (struct ip *)buf;
1907                         }
1908
1909                         if (ip->ip_p == IPPROTO_TCP)
1910                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1911                         else if (ip->ip_p == IPPROTO_UDP)
1912                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1913                         else {
1914                                 //device_printf(dev, "%s: ipv4\n", __func__);
1915                                 offload = 0;
1916                         }
1917                 break;
1918
1919                 case ETHERTYPE_IPV6:
1920                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1921
1922                         ip_hlen = sizeof(struct ip6_hdr);
1923
1924                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1925                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1926                                         buf);
1927                                 ip6 = (struct ip6_hdr *)buf;
1928                         }
1929
1930                         if (ip6->ip6_nxt == IPPROTO_TCP)
1931                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1932                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1933                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1934                         else {
1935                                 //device_printf(dev, "%s: ipv6\n", __func__);
1936                                 offload = 0;
1937                         }
1938                 break;
1939
1940                 default:
1941                         offload = 0;
1942                 break;
1943         }
1944         if (!offload)
1945                 return (-1);
1946
1947         *op_code = opcode;
1948         *tcp_hdr_off = (ip_hlen + ehdrlen);
1949
1950         return (0);
1951 }
1952
1953 #define QLA_TX_MIN_FREE 2
1954 /*
1955  * Name: ql_hw_send
1956  * Function: Transmits a packet. It first checks if the packet is a
1957  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1958  *      offload. If either of these creteria are not met, it is transmitted
1959  *      as a regular ethernet frame.
1960  */
1961 int
1962 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1963         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1964 {
1965         struct ether_vlan_header *eh;
1966         qla_hw_t *hw = &ha->hw;
1967         q80_tx_cmd_t *tx_cmd, tso_cmd;
1968         bus_dma_segment_t *c_seg;
1969         uint32_t num_tx_cmds, hdr_len = 0;
1970         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1971         device_t dev;
1972         int i, ret;
1973         uint8_t *src = NULL, *dst = NULL;
1974         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1975         uint32_t op_code = 0;
1976         uint32_t tcp_hdr_off = 0;
1977
1978         dev = ha->pci_dev;
1979
1980         /*
1981          * Always make sure there is atleast one empty slot in the tx_ring
1982          * tx_ring is considered full when there only one entry available
1983          */
1984         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1985
1986         total_length = mp->m_pkthdr.len;
1987         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1988                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1989                         __func__, total_length);
1990                 return (-1);
1991         }
1992         eh = mtod(mp, struct ether_vlan_header *);
1993
1994         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1995
1996                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1997
1998                 src = frame_hdr;
1999                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2000
2001                 if (!(ret & ~1)) {
2002                         /* find the additional tx_cmd descriptors required */
2003
2004                         if (mp->m_flags & M_VLANTAG)
2005                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2006
2007                         hdr_len = tso_cmd.total_hdr_len;
2008
2009                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2010                         bytes = QL_MIN(bytes, hdr_len);
2011
2012                         num_tx_cmds++;
2013                         hdr_len -= bytes;
2014
2015                         while (hdr_len) {
2016                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2017                                 hdr_len -= bytes;
2018                                 num_tx_cmds++;
2019                         }
2020                         hdr_len = tso_cmd.total_hdr_len;
2021
2022                         if (ret == 0)
2023                                 src = (uint8_t *)eh;
2024                 } else 
2025                         return (EINVAL);
2026         } else {
2027                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2028         }
2029
2030         if (iscsi_pdu)
2031                 ha->hw.iscsi_pkt_count++;
2032
2033         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2034                 qla_hw_tx_done_locked(ha, txr_idx);
2035                 if (hw->tx_cntxt[txr_idx].txr_free <=
2036                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2037                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2038                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2039                                 __func__));
2040                         return (-1);
2041                 }
2042         }
2043
2044         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2045
2046         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2047
2048                 if (nsegs > ha->hw.max_tx_segs)
2049                         ha->hw.max_tx_segs = nsegs;
2050
2051                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2052
2053                 if (op_code) {
2054                         tx_cmd->flags_opcode = op_code;
2055                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2056
2057                 } else {
2058                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2059                 }
2060         } else {
2061                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2062                 ha->tx_tso_frames++;
2063         }
2064
2065         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2066                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2067
2068                 if (iscsi_pdu)
2069                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2070
2071         } else if (mp->m_flags & M_VLANTAG) {
2072
2073                 if (hdr_len) { /* TSO */
2074                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2075                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2076                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2077                 } else
2078                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2079
2080                 ha->hw_vlan_tx_frames++;
2081                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2082
2083                 if (iscsi_pdu) {
2084                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2085                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2086                 }
2087         }
2088
2089
2090         tx_cmd->n_bufs = (uint8_t)nsegs;
2091         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2092         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2093         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2094
2095         c_seg = segs;
2096
2097         while (1) {
2098                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2099
2100                         switch (i) {
2101                         case 0:
2102                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2103                                 tx_cmd->buf1_len = c_seg->ds_len;
2104                                 break;
2105
2106                         case 1:
2107                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2108                                 tx_cmd->buf2_len = c_seg->ds_len;
2109                                 break;
2110
2111                         case 2:
2112                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2113                                 tx_cmd->buf3_len = c_seg->ds_len;
2114                                 break;
2115
2116                         case 3:
2117                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2118                                 tx_cmd->buf4_len = c_seg->ds_len;
2119                                 break;
2120                         }
2121
2122                         c_seg++;
2123                         nsegs--;
2124                 }
2125
2126                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2127                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2128                                 (NUM_TX_DESCRIPTORS - 1);
2129                 tx_cmd_count++;
2130
2131                 if (!nsegs)
2132                         break;
2133                 
2134                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2135                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2136         }
2137
2138         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2139
2140                 /* TSO : Copy the header in the following tx cmd descriptors */
2141
2142                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2143
2144                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2145                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2146
2147                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2148                 bytes = QL_MIN(bytes, hdr_len);
2149
2150                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2151
2152                 if (mp->m_flags & M_VLANTAG) {
2153                         /* first copy the src/dst MAC addresses */
2154                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2155                         dst += (ETHER_ADDR_LEN * 2);
2156                         src += (ETHER_ADDR_LEN * 2);
2157                         
2158                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2159                         dst += 2;
2160                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2161                         dst += 2;
2162
2163                         /* bytes left in src header */
2164                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2165                                         ETHER_VLAN_ENCAP_LEN);
2166
2167                         /* bytes left in TxCmd Entry */
2168                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2169
2170
2171                         bcopy(src, dst, bytes);
2172                         src += bytes;
2173                         hdr_len -= bytes;
2174                 } else {
2175                         bcopy(src, dst, bytes);
2176                         src += bytes;
2177                         hdr_len -= bytes;
2178                 }
2179
2180                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2181                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2182                                         (NUM_TX_DESCRIPTORS - 1);
2183                 tx_cmd_count++;
2184                 
2185                 while (hdr_len) {
2186                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2187                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2188
2189                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2190
2191                         bcopy(src, tx_cmd, bytes);
2192                         src += bytes;
2193                         hdr_len -= bytes;
2194
2195                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2196                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2197                                         (NUM_TX_DESCRIPTORS - 1);
2198                         tx_cmd_count++;
2199                 }
2200         }
2201
2202         hw->tx_cntxt[txr_idx].txr_free =
2203                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2204
2205         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2206                 txr_idx);
2207         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2208
2209         return (0);
2210 }
2211
2212
2213
2214 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2215 static int
2216 qla_config_rss_ind_table(qla_host_t *ha)
2217 {
2218         uint32_t i, count;
2219         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2220
2221
2222         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2223                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2224         }
2225
2226         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2227                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2228
2229                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2230                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2231                 } else {
2232                         count = Q8_CONFIG_IND_TBL_SIZE;
2233                 }
2234
2235                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2236                         rss_ind_tbl))
2237                         return (-1);
2238         }
2239
2240         return (0);
2241 }
2242
2243 /*
2244  * Name: ql_del_hw_if
2245  * Function: Destroys the hardware specific entities corresponding to an
2246  *      Ethernet Interface
2247  */
2248 void
2249 ql_del_hw_if(qla_host_t *ha)
2250 {
2251         uint32_t i;
2252         uint32_t num_msix;
2253
2254         (void)qla_stop_nic_func(ha);
2255
2256         qla_del_rcv_cntxt(ha);
2257         qla_del_xmt_cntxt(ha);
2258
2259         if (ha->hw.flags.init_intr_cnxt) {
2260                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2261
2262                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2263                                 num_msix = Q8_MAX_INTR_VECTORS;
2264                         else
2265                                 num_msix = ha->hw.num_sds_rings - i;
2266                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2267
2268                         i += num_msix;
2269                 }
2270
2271                 ha->hw.flags.init_intr_cnxt = 0;
2272         }
2273         return;
2274 }
2275
2276 void
2277 qla_confirm_9kb_enable(qla_host_t *ha)
2278 {
2279         uint32_t supports_9kb = 0;
2280
2281         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2282
2283         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2284         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2285         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2286
2287         qla_get_nic_partition(ha, &supports_9kb, NULL);
2288
2289         if (!supports_9kb)
2290                 ha->hw.enable_9kb = 0;
2291
2292         return;
2293 }
2294
2295
2296 /*
2297  * Name: ql_init_hw_if
2298  * Function: Creates the hardware specific entities corresponding to an
2299  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2300  *      corresponding to the interface. Enables LRO if allowed.
2301  */
2302 int
2303 ql_init_hw_if(qla_host_t *ha)
2304 {
2305         device_t        dev;
2306         uint32_t        i;
2307         uint8_t         bcast_mac[6];
2308         qla_rdesc_t     *rdesc;
2309         uint32_t        num_msix;
2310
2311         dev = ha->pci_dev;
2312
2313         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2314                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2315                         ha->hw.dma_buf.sds_ring[i].size);
2316         }
2317
2318         for (i = 0; i < ha->hw.num_sds_rings; ) {
2319
2320                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2321                         num_msix = Q8_MAX_INTR_VECTORS;
2322                 else
2323                         num_msix = ha->hw.num_sds_rings - i;
2324
2325                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2326
2327                         if (i > 0) {
2328
2329                                 num_msix = i;
2330
2331                                 for (i = 0; i < num_msix; ) {
2332                                         qla_config_intr_cntxt(ha, i,
2333                                                 Q8_MAX_INTR_VECTORS, 0);
2334                                         i += Q8_MAX_INTR_VECTORS;
2335                                 }
2336                         }
2337                         return (-1);
2338                 }
2339
2340                 i = i + num_msix;
2341         }
2342
2343         ha->hw.flags.init_intr_cnxt = 1;
2344
2345         /*
2346          * Create Receive Context
2347          */
2348         if (qla_init_rcv_cntxt(ha)) {
2349                 return (-1);
2350         }
2351
2352         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2353                 rdesc = &ha->hw.rds[i];
2354                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2355                 rdesc->rx_in = 0;
2356                 /* Update the RDS Producer Indices */
2357                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2358                         rdesc->rx_next);
2359         }
2360
2361
2362         /*
2363          * Create Transmit Context
2364          */
2365         if (qla_init_xmt_cntxt(ha)) {
2366                 qla_del_rcv_cntxt(ha);
2367                 return (-1);
2368         }
2369         ha->hw.max_tx_segs = 0;
2370
2371         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2372                 return(-1);
2373
2374         ha->hw.flags.unicast_mac = 1;
2375
2376         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2377         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2378
2379         if (qla_config_mac_addr(ha, bcast_mac, 1))
2380                 return (-1);
2381
2382         ha->hw.flags.bcast_mac = 1;
2383
2384         /*
2385          * program any cached multicast addresses
2386          */
2387         if (qla_hw_add_all_mcast(ha))
2388                 return (-1);
2389
2390         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2391                 return (-1);
2392
2393         if (qla_config_rss_ind_table(ha))
2394                 return (-1);
2395
2396         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2397                 return (-1);
2398
2399         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2400                 return (-1);
2401
2402         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2403                 return (-1);
2404
2405         if (qla_init_nic_func(ha))
2406                 return (-1);
2407
2408         if (qla_query_fw_dcbx_caps(ha))
2409                 return (-1);
2410
2411         for (i = 0; i < ha->hw.num_sds_rings; i++)
2412                 QL_ENABLE_INTERRUPTS(ha, i);
2413
2414         return (0);
2415 }
2416
2417 static int
2418 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2419 {
2420         device_t                dev = ha->pci_dev;
2421         q80_rq_map_sds_to_rds_t *map_rings;
2422         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2423         uint32_t                i, err;
2424         qla_hw_t                *hw = &ha->hw;
2425
2426         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2427         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2428
2429         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2430         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2431         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2432
2433         map_rings->cntxt_id = hw->rcv_cntxt_id;
2434         map_rings->num_rings = num_idx;
2435
2436         for (i = 0; i < num_idx; i++) {
2437                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2438                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2439         }
2440
2441         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2442                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2443                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2444                 device_printf(dev, "%s: failed0\n", __func__);
2445                 return (-1);
2446         }
2447
2448         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2449
2450         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2451
2452         if (err) {
2453                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2454                 return (-1);
2455         }
2456
2457         return (0);
2458 }
2459
2460 /*
2461  * Name: qla_init_rcv_cntxt
2462  * Function: Creates the Receive Context.
2463  */
2464 static int
2465 qla_init_rcv_cntxt(qla_host_t *ha)
2466 {
2467         q80_rq_rcv_cntxt_t      *rcntxt;
2468         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2469         q80_stat_desc_t         *sdesc;
2470         int                     i, j;
2471         qla_hw_t                *hw = &ha->hw;
2472         device_t                dev;
2473         uint32_t                err;
2474         uint32_t                rcntxt_sds_rings;
2475         uint32_t                rcntxt_rds_rings;
2476         uint32_t                max_idx;
2477
2478         dev = ha->pci_dev;
2479
2480         /*
2481          * Create Receive Context
2482          */
2483
2484         for (i = 0; i < hw->num_sds_rings; i++) {
2485                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2486
2487                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2488                         sdesc->data[0] = 1ULL;
2489                         sdesc->data[1] = 1ULL;
2490                 }
2491         }
2492
2493         rcntxt_sds_rings = hw->num_sds_rings;
2494         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2495                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2496
2497         rcntxt_rds_rings = hw->num_rds_rings;
2498
2499         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2500                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2501
2502         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2503         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2504
2505         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2506         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2507         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2508
2509         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2510                         Q8_RCV_CNTXT_CAP0_LRO |
2511                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2512                         Q8_RCV_CNTXT_CAP0_RSS |
2513                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2514
2515         if (ha->hw.enable_9kb)
2516                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2517         else
2518                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2519
2520         if (ha->hw.num_rds_rings > 1) {
2521                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2522                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2523         } else
2524                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2525
2526         rcntxt->nsds_rings = rcntxt_sds_rings;
2527
2528         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2529
2530         rcntxt->rcv_vpid = 0;
2531
2532         for (i = 0; i <  rcntxt_sds_rings; i++) {
2533                 rcntxt->sds[i].paddr =
2534                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2535                 rcntxt->sds[i].size =
2536                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2537                 if (ha->msix_count == 2) {
2538                         rcntxt->sds[i].intr_id =
2539                                 qla_host_to_le16(hw->intr_id[0]);
2540                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2541                 } else {
2542                         rcntxt->sds[i].intr_id =
2543                                 qla_host_to_le16(hw->intr_id[i]);
2544                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2545                 }
2546         }
2547
2548         for (i = 0; i <  rcntxt_rds_rings; i++) {
2549                 rcntxt->rds[i].paddr_std =
2550                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2551
2552                 if (ha->hw.enable_9kb)
2553                         rcntxt->rds[i].std_bsize =
2554                                 qla_host_to_le64(MJUM9BYTES);
2555                 else
2556                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2557
2558                 rcntxt->rds[i].std_nentries =
2559                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2560         }
2561
2562         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2563                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2564                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2565                 device_printf(dev, "%s: failed0\n", __func__);
2566                 return (-1);
2567         }
2568
2569         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2570
2571         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2572
2573         if (err) {
2574                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2575                 return (-1);
2576         }
2577
2578         for (i = 0; i <  rcntxt_sds_rings; i++) {
2579                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2580         }
2581
2582         for (i = 0; i <  rcntxt_rds_rings; i++) {
2583                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2584         }
2585
2586         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2587
2588         ha->hw.flags.init_rx_cnxt = 1;
2589
2590         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2591
2592                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2593
2594                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2595                                 max_idx = MAX_RCNTXT_SDS_RINGS;
2596                         else
2597                                 max_idx = hw->num_sds_rings - i;
2598
2599                         err = qla_add_rcv_rings(ha, i, max_idx);
2600                         if (err)
2601                                 return -1;
2602
2603                         i += max_idx;
2604                 }
2605         }
2606
2607         if (hw->num_rds_rings > 1) {
2608
2609                 for (i = 0; i < hw->num_rds_rings; ) {
2610
2611                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2612                                 max_idx = MAX_SDS_TO_RDS_MAP;
2613                         else
2614                                 max_idx = hw->num_rds_rings - i;
2615
2616                         err = qla_map_sds_to_rds(ha, i, max_idx);
2617                         if (err)
2618                                 return -1;
2619
2620                         i += max_idx;
2621                 }
2622         }
2623
2624         return (0);
2625 }
2626
2627 static int
2628 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2629 {
2630         device_t                dev = ha->pci_dev;
2631         q80_rq_add_rcv_rings_t  *add_rcv;
2632         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2633         uint32_t                i,j, err;
2634         qla_hw_t                *hw = &ha->hw;
2635
2636         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2637         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2638
2639         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2640         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2641         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2642
2643         add_rcv->nrds_sets_rings = nsds | (1 << 5);
2644         add_rcv->nsds_rings = nsds;
2645         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2646
2647         for (i = 0; i <  nsds; i++) {
2648
2649                 j = i + sds_idx;
2650
2651                 add_rcv->sds[i].paddr =
2652                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2653
2654                 add_rcv->sds[i].size =
2655                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2656
2657                 if (ha->msix_count == 2) {
2658                         add_rcv->sds[i].intr_id =
2659                                 qla_host_to_le16(hw->intr_id[0]);
2660                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2661                 } else {
2662                         add_rcv->sds[i].intr_id =
2663                                 qla_host_to_le16(hw->intr_id[j]);
2664                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2665                 }
2666
2667         }
2668         for (i = 0; (i <  nsds); i++) {
2669                 j = i + sds_idx;
2670
2671                 add_rcv->rds[i].paddr_std =
2672                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2673
2674                 if (ha->hw.enable_9kb)
2675                         add_rcv->rds[i].std_bsize =
2676                                 qla_host_to_le64(MJUM9BYTES);
2677                 else
2678                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2679
2680                 add_rcv->rds[i].std_nentries =
2681                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2682         }
2683
2684
2685         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2686                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2687                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2688                 device_printf(dev, "%s: failed0\n", __func__);
2689                 return (-1);
2690         }
2691
2692         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2693
2694         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2695
2696         if (err) {
2697                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2698                 return (-1);
2699         }
2700
2701         for (i = 0; i < nsds; i++) {
2702                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2703         }
2704
2705         for (i = 0; i < nsds; i++) {
2706                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2707         }
2708
2709         return (0);
2710 }
2711
2712 /*
2713  * Name: qla_del_rcv_cntxt
2714  * Function: Destroys the Receive Context.
2715  */
2716 static void
2717 qla_del_rcv_cntxt(qla_host_t *ha)
2718 {
2719         device_t                        dev = ha->pci_dev;
2720         q80_rcv_cntxt_destroy_t         *rcntxt;
2721         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2722         uint32_t                        err;
2723         uint8_t                         bcast_mac[6];
2724
2725         if (!ha->hw.flags.init_rx_cnxt)
2726                 return;
2727
2728         if (qla_hw_del_all_mcast(ha))
2729                 return;
2730
2731         if (ha->hw.flags.bcast_mac) {
2732
2733                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2734                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2735
2736                 if (qla_config_mac_addr(ha, bcast_mac, 0))
2737                         return;
2738                 ha->hw.flags.bcast_mac = 0;
2739
2740         }
2741
2742         if (ha->hw.flags.unicast_mac) {
2743                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2744                         return;
2745                 ha->hw.flags.unicast_mac = 0;
2746         }
2747
2748         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2749         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2750
2751         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2752         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2753         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2754
2755         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2756
2757         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2758                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2759                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2760                 device_printf(dev, "%s: failed0\n", __func__);
2761                 return;
2762         }
2763         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2764
2765         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2766
2767         if (err) {
2768                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2769         }
2770
2771         ha->hw.flags.init_rx_cnxt = 0;
2772         return;
2773 }
2774
2775 /*
2776  * Name: qla_init_xmt_cntxt
2777  * Function: Creates the Transmit Context.
2778  */
2779 static int
2780 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2781 {
2782         device_t                dev;
2783         qla_hw_t                *hw = &ha->hw;
2784         q80_rq_tx_cntxt_t       *tcntxt;
2785         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2786         uint32_t                err;
2787         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2788
2789         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2790
2791         dev = ha->pci_dev;
2792
2793         /*
2794          * Create Transmit Context
2795          */
2796         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2797         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2798
2799         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2800         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2801         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2802
2803 #ifdef QL_ENABLE_ISCSI_TLV
2804
2805         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2806                                 Q8_TX_CNTXT_CAP0_TC;
2807
2808         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2809                 tcntxt->traffic_class = 1;
2810         }
2811
2812 #else
2813
2814         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2815
2816 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2817
2818         tcntxt->ntx_rings = 1;
2819
2820         tcntxt->tx_ring[0].paddr =
2821                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2822         tcntxt->tx_ring[0].tx_consumer =
2823                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2824         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2825
2826         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2827         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2828
2829
2830         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2831         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2832
2833         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2834                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2835                 ha->hw.mbox,
2836                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2837                 device_printf(dev, "%s: failed0\n", __func__);
2838                 return (-1);
2839         }
2840         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2841
2842         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2843
2844         if (err) {
2845                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2846                 return -1;
2847         }
2848
2849         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2850         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2851
2852         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2853                 return (-1);
2854
2855         return (0);
2856 }
2857
2858
2859 /*
2860  * Name: qla_del_xmt_cntxt
2861  * Function: Destroys the Transmit Context.
2862  */
2863 static int
2864 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2865 {
2866         device_t                        dev = ha->pci_dev;
2867         q80_tx_cntxt_destroy_t          *tcntxt;
2868         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2869         uint32_t                        err;
2870
2871         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2872         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2873
2874         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2875         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2876         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2877
2878         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2879
2880         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2881                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2882                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2883                 device_printf(dev, "%s: failed0\n", __func__);
2884                 return (-1);
2885         }
2886         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2887
2888         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2889
2890         if (err) {
2891                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2892                 return (-1);
2893         }
2894
2895         return (0);
2896 }
2897 static void
2898 qla_del_xmt_cntxt(qla_host_t *ha)
2899 {
2900         uint32_t i;
2901
2902         if (!ha->hw.flags.init_tx_cnxt)
2903                 return;
2904
2905         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2906                 if (qla_del_xmt_cntxt_i(ha, i))
2907                         break;
2908         }
2909         ha->hw.flags.init_tx_cnxt = 0;
2910 }
2911
2912 static int
2913 qla_init_xmt_cntxt(qla_host_t *ha)
2914 {
2915         uint32_t i, j;
2916
2917         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2918                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2919                         for (j = 0; j < i; j++)
2920                                 qla_del_xmt_cntxt_i(ha, j);
2921                         return (-1);
2922                 }
2923         }
2924         ha->hw.flags.init_tx_cnxt = 1;
2925         return (0);
2926 }
2927
2928 static int
2929 qla_hw_add_all_mcast(qla_host_t *ha)
2930 {
2931         int i, nmcast;
2932
2933         nmcast = ha->hw.nmcast;
2934
2935         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2936                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2937                         (ha->hw.mcast[i].addr[1] != 0) ||
2938                         (ha->hw.mcast[i].addr[2] != 0) ||
2939                         (ha->hw.mcast[i].addr[3] != 0) ||
2940                         (ha->hw.mcast[i].addr[4] != 0) ||
2941                         (ha->hw.mcast[i].addr[5] != 0)) {
2942
2943                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2944                                 device_printf(ha->pci_dev, "%s: failed\n",
2945                                         __func__);
2946                                 return (-1);
2947                         }
2948
2949                         nmcast--;
2950                 }
2951         }
2952         return 0;
2953 }
2954
2955 static int
2956 qla_hw_del_all_mcast(qla_host_t *ha)
2957 {
2958         int i, nmcast;
2959
2960         nmcast = ha->hw.nmcast;
2961
2962         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2963                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2964                         (ha->hw.mcast[i].addr[1] != 0) ||
2965                         (ha->hw.mcast[i].addr[2] != 0) ||
2966                         (ha->hw.mcast[i].addr[3] != 0) ||
2967                         (ha->hw.mcast[i].addr[4] != 0) ||
2968                         (ha->hw.mcast[i].addr[5] != 0)) {
2969
2970                         if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2971                                 return (-1);
2972
2973                         nmcast--;
2974                 }
2975         }
2976         return 0;
2977 }
2978
2979 static int
2980 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2981 {
2982         int i;
2983
2984         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2985
2986                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2987                         return 0; /* its been already added */
2988         }
2989
2990         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2991
2992                 if ((ha->hw.mcast[i].addr[0] == 0) && 
2993                         (ha->hw.mcast[i].addr[1] == 0) &&
2994                         (ha->hw.mcast[i].addr[2] == 0) &&
2995                         (ha->hw.mcast[i].addr[3] == 0) &&
2996                         (ha->hw.mcast[i].addr[4] == 0) &&
2997                         (ha->hw.mcast[i].addr[5] == 0)) {
2998
2999                         if (qla_config_mac_addr(ha, mta, 1))
3000                                 return (-1);
3001
3002                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3003                         ha->hw.nmcast++;        
3004
3005                         return 0;
3006                 }
3007         }
3008         return 0;
3009 }
3010
3011 static int
3012 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
3013 {
3014         int i;
3015
3016         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3017                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3018
3019                         if (qla_config_mac_addr(ha, mta, 0))
3020                                 return (-1);
3021
3022                         ha->hw.mcast[i].addr[0] = 0;
3023                         ha->hw.mcast[i].addr[1] = 0;
3024                         ha->hw.mcast[i].addr[2] = 0;
3025                         ha->hw.mcast[i].addr[3] = 0;
3026                         ha->hw.mcast[i].addr[4] = 0;
3027                         ha->hw.mcast[i].addr[5] = 0;
3028
3029                         ha->hw.nmcast--;        
3030
3031                         return 0;
3032                 }
3033         }
3034         return 0;
3035 }
3036
3037 /*
3038  * Name: ql_hw_set_multi
3039  * Function: Sets the Multicast Addresses provided the host O.S into the
3040  *      hardware (for the given interface)
3041  */
3042 int
3043 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
3044         uint32_t add_mac)
3045 {
3046         int i;
3047         uint8_t *mta = mcast;
3048         int ret = 0;
3049
3050         for (i = 0; i < mcnt; i++) {
3051                 if (add_mac) {
3052                         ret = qla_hw_add_mcast(ha, mta);
3053                         if (ret)
3054                                 break;
3055                 } else {
3056                         ret = qla_hw_del_mcast(ha, mta);
3057                         if (ret)
3058                                 break;
3059                 }
3060                         
3061                 mta += Q8_MAC_ADDR_LEN;
3062         }
3063         return (ret);
3064 }
3065
3066 /*
3067  * Name: qla_hw_tx_done_locked
3068  * Function: Handle Transmit Completions
3069  */
3070 static void
3071 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3072 {
3073         qla_tx_buf_t *txb;
3074         qla_hw_t *hw = &ha->hw;
3075         uint32_t comp_idx, comp_count = 0;
3076         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3077
3078         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3079
3080         /* retrieve index of last entry in tx ring completed */
3081         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3082
3083         while (comp_idx != hw_tx_cntxt->txr_comp) {
3084
3085                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3086
3087                 hw_tx_cntxt->txr_comp++;
3088                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3089                         hw_tx_cntxt->txr_comp = 0;
3090
3091                 comp_count++;
3092
3093                 if (txb->m_head) {
3094                         ha->ifp->if_opackets++;
3095
3096                         bus_dmamap_sync(ha->tx_tag, txb->map,
3097                                 BUS_DMASYNC_POSTWRITE);
3098                         bus_dmamap_unload(ha->tx_tag, txb->map);
3099                         m_freem(txb->m_head);
3100
3101                         txb->m_head = NULL;
3102                 }
3103         }
3104
3105         hw_tx_cntxt->txr_free += comp_count;
3106         return;
3107 }
3108
3109 /*
3110  * Name: ql_hw_tx_done
3111  * Function: Handle Transmit Completions
3112  */
3113 void
3114 ql_hw_tx_done(qla_host_t *ha)
3115 {
3116         int i;
3117         uint32_t flag = 0;
3118
3119         if (!mtx_trylock(&ha->tx_lock)) {
3120                 QL_DPRINT8(ha, (ha->pci_dev,
3121                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
3122                 return;
3123         }
3124         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3125                 qla_hw_tx_done_locked(ha, i);
3126                 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
3127                         flag = 1;
3128         }
3129
3130         if (!flag)
3131                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3132
3133         QLA_TX_UNLOCK(ha);
3134         return;
3135 }
3136
3137 void
3138 ql_update_link_state(qla_host_t *ha)
3139 {
3140         uint32_t link_state;
3141         uint32_t prev_link_state;
3142
3143         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3144                 ha->hw.link_up = 0;
3145                 return;
3146         }
3147         link_state = READ_REG32(ha, Q8_LINK_STATE);
3148
3149         prev_link_state =  ha->hw.link_up;
3150
3151         if (ha->pci_func == 0) 
3152                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3153         else
3154                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3155
3156         if (prev_link_state !=  ha->hw.link_up) {
3157                 if (ha->hw.link_up) {
3158                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3159                 } else {
3160                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3161                 }
3162         }
3163         return;
3164 }
3165
3166 void
3167 ql_hw_stop_rcv(qla_host_t *ha)
3168 {
3169         int i, done, count = 100;
3170
3171         ha->flags.stop_rcv = 1;
3172
3173         while (count) {
3174                 done = 1;
3175                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3176                         if (ha->hw.sds[i].rcv_active)
3177                                 done = 0;
3178                 }
3179                 if (done)
3180                         break;
3181                 else 
3182                         qla_mdelay(__func__, 10);
3183                 count--;
3184         }
3185         if (!count)
3186                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3187
3188         return;
3189 }
3190
3191 int
3192 ql_hw_check_health(qla_host_t *ha)
3193 {
3194         uint32_t val;
3195
3196         ha->hw.health_count++;
3197
3198         if (ha->hw.health_count < 1000)
3199                 return 0;
3200
3201         ha->hw.health_count = 0;
3202
3203         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3204
3205         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3206                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3207                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3208                         __func__, val);
3209                 return -1;
3210         }
3211
3212         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3213
3214         if ((val != ha->hw.hbeat_value) &&
3215                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3216                 ha->hw.hbeat_value = val;
3217                 return 0;
3218         }
3219         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3220                 __func__, val);
3221
3222         return -1;
3223 }
3224
3225 static int
3226 qla_init_nic_func(qla_host_t *ha)
3227 {
3228         device_t                dev;
3229         q80_init_nic_func_t     *init_nic;
3230         q80_init_nic_func_rsp_t *init_nic_rsp;
3231         uint32_t                err;
3232
3233         dev = ha->pci_dev;
3234
3235         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3236         bzero(init_nic, sizeof(q80_init_nic_func_t));
3237
3238         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3239         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3240         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3241
3242         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3243         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3244         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3245
3246 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3247         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3248                 (sizeof (q80_init_nic_func_t) >> 2),
3249                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3250                 device_printf(dev, "%s: failed\n", __func__);
3251                 return -1;
3252         }
3253
3254         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3255 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3256
3257         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3258
3259         if (err) {
3260                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3261         }
3262
3263         return 0;
3264 }
3265
3266 static int
3267 qla_stop_nic_func(qla_host_t *ha)
3268 {
3269         device_t                dev;
3270         q80_stop_nic_func_t     *stop_nic;
3271         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3272         uint32_t                err;
3273
3274         dev = ha->pci_dev;
3275
3276         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3277         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3278
3279         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3280         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3281         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3282
3283         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3284         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3285
3286 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3287         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3288                 (sizeof (q80_stop_nic_func_t) >> 2),
3289                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3290                 device_printf(dev, "%s: failed\n", __func__);
3291                 return -1;
3292         }
3293
3294         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3295 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3296
3297         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3298
3299         if (err) {
3300                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3301         }
3302
3303         return 0;
3304 }
3305
3306 static int
3307 qla_query_fw_dcbx_caps(qla_host_t *ha)
3308 {
3309         device_t                        dev;
3310         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3311         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3312         uint32_t                        err;
3313
3314         dev = ha->pci_dev;
3315
3316         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3317         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3318
3319         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3320         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3321         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3322
3323         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3324         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3325                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3326                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3327                 device_printf(dev, "%s: failed\n", __func__);
3328                 return -1;
3329         }
3330
3331         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3332         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3333                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3334
3335         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3336
3337         if (err) {
3338                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3339         }
3340
3341         return 0;
3342 }
3343
3344 static int
3345 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3346         uint32_t aen_mb3, uint32_t aen_mb4)
3347 {
3348         device_t                dev;
3349         q80_idc_ack_t           *idc_ack;
3350         q80_idc_ack_rsp_t       *idc_ack_rsp;
3351         uint32_t                err;
3352         int                     count = 300;
3353
3354         dev = ha->pci_dev;
3355
3356         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3357         bzero(idc_ack, sizeof(q80_idc_ack_t));
3358
3359         idc_ack->opcode = Q8_MBX_IDC_ACK;
3360         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3361         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3362
3363         idc_ack->aen_mb1 = aen_mb1;
3364         idc_ack->aen_mb2 = aen_mb2;
3365         idc_ack->aen_mb3 = aen_mb3;
3366         idc_ack->aen_mb4 = aen_mb4;
3367
3368         ha->hw.imd_compl= 0;
3369
3370         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3371                 (sizeof (q80_idc_ack_t) >> 2),
3372                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3373                 device_printf(dev, "%s: failed\n", __func__);
3374                 return -1;
3375         }
3376
3377         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3378
3379         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3380
3381         if (err) {
3382                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3383                 return(-1);
3384         }
3385
3386         while (count && !ha->hw.imd_compl) {
3387                 qla_mdelay(__func__, 100);
3388                 count--;
3389         }
3390
3391         if (!count)
3392                 return -1;
3393         else
3394                 device_printf(dev, "%s: count %d\n", __func__, count);
3395
3396         return (0);
3397 }
3398
3399 static int
3400 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3401 {
3402         device_t                dev;
3403         q80_set_port_cfg_t      *pcfg;
3404         q80_set_port_cfg_rsp_t  *pfg_rsp;
3405         uint32_t                err;
3406         int                     count = 300;
3407
3408         dev = ha->pci_dev;
3409
3410         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3411         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3412
3413         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3414         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3415         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3416
3417         pcfg->cfg_bits = cfg_bits;
3418
3419         device_printf(dev, "%s: cfg_bits"
3420                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3421                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3422                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3423                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3424                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3425
3426         ha->hw.imd_compl= 0;
3427
3428         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3429                 (sizeof (q80_set_port_cfg_t) >> 2),
3430                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3431                 device_printf(dev, "%s: failed\n", __func__);
3432                 return -1;
3433         }
3434
3435         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3436
3437         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3438
3439         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3440                 while (count && !ha->hw.imd_compl) {
3441                         qla_mdelay(__func__, 100);
3442                         count--;
3443                 }
3444                 if (count) {
3445                         device_printf(dev, "%s: count %d\n", __func__, count);
3446
3447                         err = 0;
3448                 }
3449         }
3450
3451         if (err) {
3452                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3453                 return(-1);
3454         }
3455
3456         return (0);
3457 }
3458
3459
3460 static int
3461 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3462 {
3463         uint32_t                        err;
3464         device_t                        dev = ha->pci_dev;
3465         q80_config_md_templ_size_t      *md_size;
3466         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3467
3468 #ifndef QL_LDFLASH_FW
3469
3470         ql_minidump_template_hdr_t *hdr;
3471
3472         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3473         *size = hdr->size_of_template;
3474         return (0);
3475
3476 #endif /* #ifdef QL_LDFLASH_FW */
3477
3478         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3479         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3480
3481         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3482         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3483         md_size->count_version |= Q8_MBX_CMD_VERSION;
3484
3485         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3486                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3487                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3488
3489                 device_printf(dev, "%s: failed\n", __func__);
3490
3491                 return (-1);
3492         }
3493
3494         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3495
3496         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3497
3498         if (err) {
3499                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3500                 return(-1);
3501         }
3502
3503         *size = md_size_rsp->templ_size;
3504
3505         return (0);
3506 }
3507
3508 static int
3509 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3510 {
3511         device_t                dev;
3512         q80_get_port_cfg_t      *pcfg;
3513         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3514         uint32_t                err;
3515
3516         dev = ha->pci_dev;
3517
3518         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3519         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3520
3521         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3522         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3523         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3524
3525         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3526                 (sizeof (q80_get_port_cfg_t) >> 2),
3527                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3528                 device_printf(dev, "%s: failed\n", __func__);
3529                 return -1;
3530         }
3531
3532         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3533
3534         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3535
3536         if (err) {
3537                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3538                 return(-1);
3539         }
3540
3541         device_printf(dev, "%s: [cfg_bits, port type]"
3542                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3543                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3544                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3545                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3546                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3547                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3548                 );
3549
3550         *cfg_bits = pcfg_rsp->cfg_bits;
3551
3552         return (0);
3553 }
3554
3555 int
3556 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3557 {
3558         struct ether_vlan_header        *eh;
3559         uint16_t                        etype;
3560         struct ip                       *ip = NULL;
3561         struct ip6_hdr                  *ip6 = NULL;
3562         struct tcphdr                   *th = NULL;
3563         uint32_t                        hdrlen;
3564         uint32_t                        offset;
3565         uint8_t                         buf[sizeof(struct ip6_hdr)];
3566
3567         eh = mtod(mp, struct ether_vlan_header *);
3568
3569         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3570                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3571                 etype = ntohs(eh->evl_proto);
3572         } else {
3573                 hdrlen = ETHER_HDR_LEN;
3574                 etype = ntohs(eh->evl_encap_proto);
3575         }
3576
3577         if (etype == ETHERTYPE_IP) {
3578
3579                 offset = (hdrlen + sizeof (struct ip));
3580
3581                 if (mp->m_len >= offset) {
3582                         ip = (struct ip *)(mp->m_data + hdrlen);
3583                 } else {
3584                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3585                         ip = (struct ip *)buf;
3586                 }
3587
3588                 if (ip->ip_p == IPPROTO_TCP) {
3589
3590                         hdrlen += ip->ip_hl << 2;
3591                         offset = hdrlen + 4;
3592         
3593                         if (mp->m_len >= offset) {
3594                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3595                         } else {
3596                                 m_copydata(mp, hdrlen, 4, buf);
3597                                 th = (struct tcphdr *)buf;
3598                         }
3599                 }
3600
3601         } else if (etype == ETHERTYPE_IPV6) {
3602
3603                 offset = (hdrlen + sizeof (struct ip6_hdr));
3604
3605                 if (mp->m_len >= offset) {
3606                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3607                 } else {
3608                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3609                         ip6 = (struct ip6_hdr *)buf;
3610                 }
3611
3612                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3613
3614                         hdrlen += sizeof(struct ip6_hdr);
3615                         offset = hdrlen + 4;
3616
3617                         if (mp->m_len >= offset) {
3618                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3619                         } else {
3620                                 m_copydata(mp, hdrlen, 4, buf);
3621                                 th = (struct tcphdr *)buf;
3622                         }
3623                 }
3624         }
3625
3626         if (th != NULL) {
3627                 if ((th->th_sport == htons(3260)) ||
3628                         (th->th_dport == htons(3260)))
3629                         return 0;
3630         }
3631         return (-1);
3632 }
3633
3634 void
3635 qla_hw_async_event(qla_host_t *ha)
3636 {
3637         switch (ha->hw.aen_mb0) {
3638         case 0x8101:
3639                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3640                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3641
3642                 break;
3643
3644         default:
3645                 break;
3646         }
3647
3648         return;
3649 }
3650
3651 #ifdef QL_LDFLASH_FW
3652 static int
3653 ql_get_minidump_template(qla_host_t *ha)
3654 {
3655         uint32_t                        err;
3656         device_t                        dev = ha->pci_dev;
3657         q80_config_md_templ_cmd_t       *md_templ;
3658         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
3659
3660         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3661         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3662
3663         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3664         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3665         md_templ->count_version |= Q8_MBX_CMD_VERSION;
3666
3667         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3668         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3669
3670         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3671                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3672                  ha->hw.mbox,
3673                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3674
3675                 device_printf(dev, "%s: failed\n", __func__);
3676
3677                 return (-1);
3678         }
3679
3680         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3681
3682         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3683
3684         if (err) {
3685                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3686                 return (-1);
3687         }
3688
3689         return (0);
3690
3691 }
3692 #endif /* #ifdef QL_LDFLASH_FW */
3693
3694 /*
3695  * Minidump related functionality 
3696  */
3697
3698 static int ql_parse_template(qla_host_t *ha);
3699
3700 static uint32_t ql_rdcrb(qla_host_t *ha,
3701                         ql_minidump_entry_rdcrb_t *crb_entry,
3702                         uint32_t * data_buff);
3703
3704 static uint32_t ql_pollrd(qla_host_t *ha,
3705                         ql_minidump_entry_pollrd_t *entry,
3706                         uint32_t * data_buff);
3707
3708 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3709                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3710                         uint32_t *data_buff);
3711
3712 static uint32_t ql_L2Cache(qla_host_t *ha,
3713                         ql_minidump_entry_cache_t *cacheEntry,
3714                         uint32_t * data_buff);
3715
3716 static uint32_t ql_L1Cache(qla_host_t *ha,
3717                         ql_minidump_entry_cache_t *cacheEntry,
3718                         uint32_t *data_buff);
3719
3720 static uint32_t ql_rdocm(qla_host_t *ha,
3721                         ql_minidump_entry_rdocm_t *ocmEntry,
3722                         uint32_t *data_buff);
3723
3724 static uint32_t ql_rdmem(qla_host_t *ha,
3725                         ql_minidump_entry_rdmem_t *mem_entry,
3726                         uint32_t *data_buff);
3727
3728 static uint32_t ql_rdrom(qla_host_t *ha,
3729                         ql_minidump_entry_rdrom_t *romEntry,
3730                         uint32_t *data_buff);
3731
3732 static uint32_t ql_rdmux(qla_host_t *ha,
3733                         ql_minidump_entry_mux_t *muxEntry,
3734                         uint32_t *data_buff);
3735
3736 static uint32_t ql_rdmux2(qla_host_t *ha,
3737                         ql_minidump_entry_mux2_t *muxEntry,
3738                         uint32_t *data_buff);
3739
3740 static uint32_t ql_rdqueue(qla_host_t *ha,
3741                         ql_minidump_entry_queue_t *queueEntry,
3742                         uint32_t *data_buff);
3743
3744 static uint32_t ql_cntrl(qla_host_t *ha,
3745                         ql_minidump_template_hdr_t *template_hdr,
3746                         ql_minidump_entry_cntrl_t *crbEntry);
3747
3748
3749 static uint32_t
3750 ql_minidump_size(qla_host_t *ha)
3751 {
3752         uint32_t i, k;
3753         uint32_t size = 0;
3754         ql_minidump_template_hdr_t *hdr;
3755
3756         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3757
3758         i = 0x2;
3759
3760         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3761                 if (i & ha->hw.mdump_capture_mask)
3762                         size += hdr->capture_size_array[k];
3763                 i = i << 1;
3764         }
3765         return (size);
3766 }
3767
3768 static void
3769 ql_free_minidump_buffer(qla_host_t *ha)
3770 {
3771         if (ha->hw.mdump_buffer != NULL) {
3772                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3773                 ha->hw.mdump_buffer = NULL;
3774                 ha->hw.mdump_buffer_size = 0;
3775         }
3776         return;
3777 }
3778
3779 static int
3780 ql_alloc_minidump_buffer(qla_host_t *ha)
3781 {
3782         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3783
3784         if (!ha->hw.mdump_buffer_size)
3785                 return (-1);
3786
3787         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3788                                         M_NOWAIT);
3789
3790         if (ha->hw.mdump_buffer == NULL)
3791                 return (-1);
3792
3793         return (0);
3794 }
3795
3796 static void
3797 ql_free_minidump_template_buffer(qla_host_t *ha)
3798 {
3799         if (ha->hw.mdump_template != NULL) {
3800                 free(ha->hw.mdump_template, M_QLA83XXBUF);
3801                 ha->hw.mdump_template = NULL;
3802                 ha->hw.mdump_template_size = 0;
3803         }
3804         return;
3805 }
3806
3807 static int
3808 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3809 {
3810         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3811
3812         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3813                                         M_QLA83XXBUF, M_NOWAIT);
3814
3815         if (ha->hw.mdump_template == NULL)
3816                 return (-1);
3817
3818         return (0);
3819 }
3820
3821 static int
3822 ql_alloc_minidump_buffers(qla_host_t *ha)
3823 {
3824         int ret;
3825
3826         ret = ql_alloc_minidump_template_buffer(ha);
3827
3828         if (ret)
3829                 return (ret);
3830
3831         ret = ql_alloc_minidump_buffer(ha);
3832
3833         if (ret)
3834                 ql_free_minidump_template_buffer(ha);
3835
3836         return (ret);
3837 }
3838
3839
3840 static uint32_t
3841 ql_validate_minidump_checksum(qla_host_t *ha)
3842 {
3843         uint64_t sum = 0;
3844         int count;
3845         uint32_t *template_buff;
3846
3847         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3848         template_buff = ha->hw.dma_buf.minidump.dma_b;
3849
3850         while (count-- > 0) {
3851                 sum += *template_buff++;
3852         }
3853
3854         while (sum >> 32) {
3855                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3856         }
3857
3858         return (~sum);
3859 }
3860
3861 int
3862 ql_minidump_init(qla_host_t *ha)
3863 {
3864         int             ret = 0;
3865         uint32_t        template_size = 0;
3866         device_t        dev = ha->pci_dev;
3867
3868         /*
3869          * Get Minidump Template Size
3870          */
3871         ret = qla_get_minidump_tmplt_size(ha, &template_size);
3872
3873         if (ret || (template_size == 0)) {
3874                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3875                         template_size);
3876                 return (-1);
3877         }
3878
3879         /*
3880          * Allocate Memory for Minidump Template
3881          */
3882
3883         ha->hw.dma_buf.minidump.alignment = 8;
3884         ha->hw.dma_buf.minidump.size = template_size;
3885
3886 #ifdef QL_LDFLASH_FW
3887         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3888
3889                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3890
3891                 return (-1);
3892         }
3893         ha->hw.dma_buf.flags.minidump = 1;
3894
3895         /*
3896          * Retrieve Minidump Template
3897          */
3898         ret = ql_get_minidump_template(ha);
3899 #else
3900         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3901
3902 #endif /* #ifdef QL_LDFLASH_FW */
3903
3904         if (ret == 0) {
3905
3906                 ret = ql_validate_minidump_checksum(ha);
3907
3908                 if (ret == 0) {
3909
3910                         ret = ql_alloc_minidump_buffers(ha);
3911
3912                         if (ret == 0)
3913                 ha->hw.mdump_init = 1;
3914                         else
3915                                 device_printf(dev,
3916                                         "%s: ql_alloc_minidump_buffers"
3917                                         " failed\n", __func__);
3918                 } else {
3919                         device_printf(dev, "%s: ql_validate_minidump_checksum"
3920                                 " failed\n", __func__);
3921                 }
3922         } else {
3923                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
3924                          __func__);
3925         }
3926
3927         if (ret)
3928                 ql_minidump_free(ha);
3929
3930         return (ret);
3931 }
3932
3933 static void
3934 ql_minidump_free(qla_host_t *ha)
3935 {
3936         ha->hw.mdump_init = 0;
3937         if (ha->hw.dma_buf.flags.minidump) {
3938                 ha->hw.dma_buf.flags.minidump = 0;
3939                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3940         }
3941
3942         ql_free_minidump_template_buffer(ha);
3943         ql_free_minidump_buffer(ha);
3944
3945         return;
3946 }
3947
3948 void
3949 ql_minidump(qla_host_t *ha)
3950 {
3951         if (!ha->hw.mdump_init)
3952                 return;
3953
3954         if (ha->hw.mdump_done)
3955                 return;
3956
3957                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3958
3959         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
3960         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
3961
3962         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
3963                 ha->hw.mdump_template_size);
3964
3965         ql_parse_template(ha);
3966  
3967         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3968
3969         ha->hw.mdump_done = 1;
3970
3971         return;
3972 }
3973
3974
3975 /*
3976  * helper routines
3977  */
3978 static void 
3979 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
3980 {
3981         if (esize != entry->hdr.entry_capture_size) {
3982                 entry->hdr.entry_capture_size = esize;
3983                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
3984         }
3985         return;
3986 }
3987
3988
3989 static int 
3990 ql_parse_template(qla_host_t *ha)
3991 {
3992         uint32_t num_of_entries, buff_level, e_cnt, esize;
3993         uint32_t end_cnt, rv = 0;
3994         char *dump_buff, *dbuff;
3995         int sane_start = 0, sane_end = 0;
3996         ql_minidump_template_hdr_t *template_hdr;
3997         ql_minidump_entry_t *entry;
3998         uint32_t capture_mask; 
3999         uint32_t dump_size; 
4000
4001         /* Setup parameters */
4002         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4003
4004         if (template_hdr->entry_type == TLHDR)
4005                 sane_start = 1;
4006         
4007         dump_buff = (char *) ha->hw.mdump_buffer;
4008
4009         num_of_entries = template_hdr->num_of_entries;
4010
4011         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4012                         + template_hdr->first_entry_offset );
4013
4014         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4015                 template_hdr->ocm_window_array[ha->pci_func];
4016         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4017
4018         capture_mask = ha->hw.mdump_capture_mask;
4019         dump_size = ha->hw.mdump_buffer_size;
4020
4021         template_hdr->driver_capture_mask = capture_mask;
4022
4023         QL_DPRINT80(ha, (ha->pci_dev,
4024                 "%s: sane_start = %d num_of_entries = %d "
4025                 "capture_mask = 0x%x dump_size = %d \n", 
4026                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4027
4028         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4029
4030                 /*
4031                  * If the capture_mask of the entry does not match capture mask
4032                  * skip the entry after marking the driver_flags indicator.
4033                  */
4034                 
4035                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4036
4037                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4038                         entry = (ql_minidump_entry_t *) ((char *) entry
4039                                         + entry->hdr.entry_size);
4040                         continue;
4041                 }
4042
4043                 /*
4044                  * This is ONLY needed in implementations where
4045                  * the capture buffer allocated is too small to capture
4046                  * all of the required entries for a given capture mask.
4047                  * We need to empty the buffer contents to a file
4048                  * if possible, before processing the next entry
4049                  * If the buff_full_flag is set, no further capture will happen
4050                  * and all remaining non-control entries will be skipped.
4051                  */
4052                 if (entry->hdr.entry_capture_size != 0) {
4053                         if ((buff_level + entry->hdr.entry_capture_size) >
4054                                 dump_size) {
4055                                 /*  Try to recover by emptying buffer to file */
4056                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4057                                 entry = (ql_minidump_entry_t *) ((char *) entry
4058                                                 + entry->hdr.entry_size);
4059                                 continue;
4060                         }
4061                 }
4062
4063                 /*
4064                  * Decode the entry type and process it accordingly
4065                  */
4066
4067                 switch (entry->hdr.entry_type) {
4068                 case RDNOP:
4069                         break;
4070
4071                 case RDEND:
4072                         if (sane_end == 0) {
4073                                 end_cnt = e_cnt;
4074                         }
4075                         sane_end++;
4076                         break;
4077
4078                 case RDCRB:
4079                         dbuff = dump_buff + buff_level;
4080                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4081                         ql_entry_err_chk(entry, esize);
4082                         buff_level += esize;
4083                         break;
4084
4085                 case POLLRD:
4086                         dbuff = dump_buff + buff_level;
4087                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4088                         ql_entry_err_chk(entry, esize);
4089                         buff_level += esize;
4090                         break;
4091
4092                 case POLLRDMWR:
4093                         dbuff = dump_buff + buff_level;
4094                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4095                                         (void *)dbuff);
4096                         ql_entry_err_chk(entry, esize);
4097                         buff_level += esize;
4098                         break;
4099
4100                 case L2ITG:
4101                 case L2DTG:
4102                 case L2DAT:
4103                 case L2INS:
4104                         dbuff = dump_buff + buff_level;
4105                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4106                         if (esize == -1) {
4107                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4108                         } else {
4109                                 ql_entry_err_chk(entry, esize);
4110                                 buff_level += esize;
4111                         }
4112                         break;
4113
4114                 case L1DAT:
4115                 case L1INS:
4116                         dbuff = dump_buff + buff_level;
4117                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4118                         ql_entry_err_chk(entry, esize);
4119                         buff_level += esize;
4120                         break;
4121
4122                 case RDOCM:
4123                         dbuff = dump_buff + buff_level;
4124                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4125                         ql_entry_err_chk(entry, esize);
4126                         buff_level += esize;
4127                         break;
4128
4129                 case RDMEM:
4130                         dbuff = dump_buff + buff_level;
4131                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4132                         ql_entry_err_chk(entry, esize);
4133                         buff_level += esize;
4134                         break;
4135
4136                 case BOARD:
4137                 case RDROM:
4138                         dbuff = dump_buff + buff_level;
4139                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4140                         ql_entry_err_chk(entry, esize);
4141                         buff_level += esize;
4142                         break;
4143
4144                 case RDMUX:
4145                         dbuff = dump_buff + buff_level;
4146                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4147                         ql_entry_err_chk(entry, esize);
4148                         buff_level += esize;
4149                         break;
4150
4151                 case RDMUX2:
4152                         dbuff = dump_buff + buff_level;
4153                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4154                         ql_entry_err_chk(entry, esize);
4155                         buff_level += esize;
4156                         break;
4157
4158                 case QUEUE:
4159                         dbuff = dump_buff + buff_level;
4160                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4161                         ql_entry_err_chk(entry, esize);
4162                         buff_level += esize;
4163                         break;
4164
4165                 case CNTRL:
4166                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4167                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4168                         }
4169                         break;
4170                 default:
4171                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4172                         break;
4173                 }
4174                 /*  next entry in the template */
4175                 entry = (ql_minidump_entry_t *) ((char *) entry
4176                                                 + entry->hdr.entry_size);
4177         }
4178
4179         if (!sane_start || (sane_end > 1)) {
4180                 device_printf(ha->pci_dev,
4181                         "\n%s: Template configuration error. Check Template\n",
4182                         __func__);
4183         }
4184         
4185         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4186                 __func__, template_hdr->num_of_entries));
4187
4188         return 0;
4189 }
4190
4191 /*
4192  * Read CRB operation.
4193  */
4194 static uint32_t
4195 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4196         uint32_t * data_buff)
4197 {
4198         int loop_cnt;
4199         int ret;
4200         uint32_t op_count, addr, stride, value = 0;
4201
4202         addr = crb_entry->addr;
4203         op_count = crb_entry->op_count;
4204         stride = crb_entry->addr_stride;
4205
4206         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4207
4208                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4209
4210                 if (ret)
4211                         return (0);
4212
4213                 *data_buff++ = addr;
4214                 *data_buff++ = value;
4215                 addr = addr + stride;
4216         }
4217
4218         /*
4219          * for testing purpose we return amount of data written
4220          */
4221         return (op_count * (2 * sizeof(uint32_t)));
4222 }
4223
4224 /*
4225  * Handle L2 Cache.
4226  */
4227
4228 static uint32_t 
4229 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4230         uint32_t * data_buff)
4231 {
4232         int i, k;
4233         int loop_cnt;
4234         int ret;
4235
4236         uint32_t read_value;
4237         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4238         uint32_t tag_value, read_cnt;
4239         volatile uint8_t cntl_value_r;
4240         long timeout;
4241         uint32_t data;
4242
4243         loop_cnt = cacheEntry->op_count;
4244
4245         read_addr = cacheEntry->read_addr;
4246         cntrl_addr = cacheEntry->control_addr;
4247         cntl_value_w = (uint32_t) cacheEntry->write_value;
4248
4249         tag_reg_addr = cacheEntry->tag_reg_addr;
4250
4251         tag_value = cacheEntry->init_tag_value;
4252         read_cnt = cacheEntry->read_addr_cnt;
4253
4254         for (i = 0; i < loop_cnt; i++) {
4255
4256                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4257                 if (ret)
4258                         return (0);
4259
4260                 if (cacheEntry->write_value != 0) { 
4261
4262                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4263                                         &cntl_value_w, 0);
4264                         if (ret)
4265                                 return (0);
4266                 }
4267
4268                 if (cacheEntry->poll_mask != 0) { 
4269
4270                         timeout = cacheEntry->poll_wait;
4271
4272                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4273                         if (ret)
4274                                 return (0);
4275
4276                         cntl_value_r = (uint8_t)data;
4277
4278                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4279
4280                                 if (timeout) {
4281                                         qla_mdelay(__func__, 1);
4282                                         timeout--;
4283                                 } else
4284                                         break;
4285
4286                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4287                                                 &data, 1);
4288                                 if (ret)
4289                                         return (0);
4290
4291                                 cntl_value_r = (uint8_t)data;
4292                         }
4293                         if (!timeout) {
4294                                 /* Report timeout error. 
4295                                  * core dump capture failed
4296                                  * Skip remaining entries.
4297                                  * Write buffer out to file
4298                                  * Use driver specific fields in template header
4299                                  * to report this error.
4300                                  */
4301                                 return (-1);
4302                         }
4303                 }
4304
4305                 addr = read_addr;
4306                 for (k = 0; k < read_cnt; k++) {
4307
4308                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4309                         if (ret)
4310                                 return (0);
4311
4312                         *data_buff++ = read_value;
4313                         addr += cacheEntry->read_addr_stride;
4314                 }
4315
4316                 tag_value += cacheEntry->tag_value_stride;
4317         }
4318
4319         return (read_cnt * loop_cnt * sizeof(uint32_t));
4320 }
4321
4322 /*
4323  * Handle L1 Cache.
4324  */
4325
4326 static uint32_t 
4327 ql_L1Cache(qla_host_t *ha,
4328         ql_minidump_entry_cache_t *cacheEntry,
4329         uint32_t *data_buff)
4330 {
4331         int ret;
4332         int i, k;
4333         int loop_cnt;
4334
4335         uint32_t read_value;
4336         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4337         uint32_t tag_value, read_cnt;
4338         uint32_t cntl_value_w;
4339
4340         loop_cnt = cacheEntry->op_count;
4341
4342         read_addr = cacheEntry->read_addr;
4343         cntrl_addr = cacheEntry->control_addr;
4344         cntl_value_w = (uint32_t) cacheEntry->write_value;
4345
4346         tag_reg_addr = cacheEntry->tag_reg_addr;
4347
4348         tag_value = cacheEntry->init_tag_value;
4349         read_cnt = cacheEntry->read_addr_cnt;
4350
4351         for (i = 0; i < loop_cnt; i++) {
4352
4353                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4354                 if (ret)
4355                         return (0);
4356
4357                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4358                 if (ret)
4359                         return (0);
4360
4361                 addr = read_addr;
4362                 for (k = 0; k < read_cnt; k++) {
4363
4364                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4365                         if (ret)
4366                                 return (0);
4367
4368                         *data_buff++ = read_value;
4369                         addr += cacheEntry->read_addr_stride;
4370                 }
4371
4372                 tag_value += cacheEntry->tag_value_stride;
4373         }
4374
4375         return (read_cnt * loop_cnt * sizeof(uint32_t));
4376 }
4377
4378 /*
4379  * Reading OCM memory
4380  */
4381
4382 static uint32_t 
4383 ql_rdocm(qla_host_t *ha,
4384         ql_minidump_entry_rdocm_t *ocmEntry,
4385         uint32_t *data_buff)
4386 {
4387         int i, loop_cnt;
4388         volatile uint32_t addr;
4389         volatile uint32_t value;
4390
4391         addr = ocmEntry->read_addr;
4392         loop_cnt = ocmEntry->op_count;
4393
4394         for (i = 0; i < loop_cnt; i++) {
4395                 value = READ_REG32(ha, addr);
4396                 *data_buff++ = value;
4397                 addr += ocmEntry->read_addr_stride;
4398         }
4399         return (loop_cnt * sizeof(value));
4400 }
4401
4402 /*
4403  * Read memory
4404  */
4405
4406 static uint32_t 
4407 ql_rdmem(qla_host_t *ha,
4408         ql_minidump_entry_rdmem_t *mem_entry,
4409         uint32_t *data_buff)
4410 {
4411         int ret;
4412         int i, loop_cnt;
4413         volatile uint32_t addr;
4414         q80_offchip_mem_val_t val;
4415
4416         addr = mem_entry->read_addr;
4417
4418         /* size in bytes / 16 */
4419         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4420
4421         for (i = 0; i < loop_cnt; i++) {
4422
4423                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4424                 if (ret)
4425                         return (0);
4426
4427                 *data_buff++ = val.data_lo;
4428                 *data_buff++ = val.data_hi;
4429                 *data_buff++ = val.data_ulo;
4430                 *data_buff++ = val.data_uhi;
4431
4432                 addr += (sizeof(uint32_t) * 4);
4433         }
4434
4435         return (loop_cnt * (sizeof(uint32_t) * 4));
4436 }
4437
4438 /*
4439  * Read Rom
4440  */
4441
4442 static uint32_t 
4443 ql_rdrom(qla_host_t *ha,
4444         ql_minidump_entry_rdrom_t *romEntry,
4445         uint32_t *data_buff)
4446 {
4447         int ret;
4448         int i, loop_cnt;
4449         uint32_t addr;
4450         uint32_t value;
4451
4452         addr = romEntry->read_addr;
4453         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4454         loop_cnt /= sizeof(value);
4455
4456         for (i = 0; i < loop_cnt; i++) {
4457
4458                 ret = ql_rd_flash32(ha, addr, &value);
4459                 if (ret)
4460                         return (0);
4461
4462                 *data_buff++ = value;
4463                 addr += sizeof(value);
4464         }
4465
4466         return (loop_cnt * sizeof(value));
4467 }
4468
4469 /*
4470  * Read MUX data
4471  */
4472
4473 static uint32_t 
4474 ql_rdmux(qla_host_t *ha,
4475         ql_minidump_entry_mux_t *muxEntry,
4476         uint32_t *data_buff)
4477 {
4478         int ret;
4479         int loop_cnt;
4480         uint32_t read_value, sel_value;
4481         uint32_t read_addr, select_addr;
4482
4483         select_addr = muxEntry->select_addr;
4484         sel_value = muxEntry->select_value;
4485         read_addr = muxEntry->read_addr;
4486
4487         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4488
4489                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4490                 if (ret)
4491                         return (0);
4492
4493                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4494                 if (ret)
4495                         return (0);
4496
4497                 *data_buff++ = sel_value;
4498                 *data_buff++ = read_value;
4499
4500                 sel_value += muxEntry->select_value_stride;
4501         }
4502
4503         return (loop_cnt * (2 * sizeof(uint32_t)));
4504 }
4505
4506 static uint32_t
4507 ql_rdmux2(qla_host_t *ha,
4508         ql_minidump_entry_mux2_t *muxEntry,
4509         uint32_t *data_buff)
4510 {
4511         int ret;
4512         int loop_cnt;
4513
4514         uint32_t select_addr_1, select_addr_2;
4515         uint32_t select_value_1, select_value_2;
4516         uint32_t select_value_count, select_value_mask;
4517         uint32_t read_addr, read_value;
4518
4519         select_addr_1 = muxEntry->select_addr_1;
4520         select_addr_2 = muxEntry->select_addr_2;
4521         select_value_1 = muxEntry->select_value_1;
4522         select_value_2 = muxEntry->select_value_2;
4523         select_value_count = muxEntry->select_value_count;
4524         select_value_mask  = muxEntry->select_value_mask;
4525
4526         read_addr = muxEntry->read_addr;
4527
4528         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4529                 loop_cnt++) {
4530
4531                 uint32_t temp_sel_val;
4532
4533                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4534                 if (ret)
4535                         return (0);
4536
4537                 temp_sel_val = select_value_1 & select_value_mask;
4538
4539                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4540                 if (ret)
4541                         return (0);
4542
4543                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4544                 if (ret)
4545                         return (0);
4546
4547                 *data_buff++ = temp_sel_val;
4548                 *data_buff++ = read_value;
4549
4550                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4551                 if (ret)
4552                         return (0);
4553
4554                 temp_sel_val = select_value_2 & select_value_mask;
4555
4556                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4557                 if (ret)
4558                         return (0);
4559
4560                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4561                 if (ret)
4562                         return (0);
4563
4564                 *data_buff++ = temp_sel_val;
4565                 *data_buff++ = read_value;
4566
4567                 select_value_1 += muxEntry->select_value_stride;
4568                 select_value_2 += muxEntry->select_value_stride;
4569         }
4570
4571         return (loop_cnt * (4 * sizeof(uint32_t)));
4572 }
4573
4574 /*
4575  * Handling Queue State Reads.
4576  */
4577
4578 static uint32_t 
4579 ql_rdqueue(qla_host_t *ha,
4580         ql_minidump_entry_queue_t *queueEntry,
4581         uint32_t *data_buff)
4582 {
4583         int ret;
4584         int loop_cnt, k;
4585         uint32_t read_value;
4586         uint32_t read_addr, read_stride, select_addr;
4587         uint32_t queue_id, read_cnt;
4588
4589         read_cnt = queueEntry->read_addr_cnt;
4590         read_stride = queueEntry->read_addr_stride;
4591         select_addr = queueEntry->select_addr;
4592
4593         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4594                 loop_cnt++) {
4595
4596                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4597                 if (ret)
4598                         return (0);
4599
4600                 read_addr = queueEntry->read_addr;
4601
4602                 for (k = 0; k < read_cnt; k++) {
4603
4604                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4605                         if (ret)
4606                                 return (0);
4607
4608                         *data_buff++ = read_value;
4609                         read_addr += read_stride;
4610                 }
4611
4612                 queue_id += queueEntry->queue_id_stride;
4613         }
4614
4615         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4616 }
4617
4618 /*
4619  * Handling control entries.
4620  */
4621
4622 static uint32_t 
4623 ql_cntrl(qla_host_t *ha,
4624         ql_minidump_template_hdr_t *template_hdr,
4625         ql_minidump_entry_cntrl_t *crbEntry)
4626 {
4627         int ret;
4628         int count;
4629         uint32_t opcode, read_value, addr, entry_addr;
4630         long timeout;
4631
4632         entry_addr = crbEntry->addr;
4633
4634         for (count = 0; count < crbEntry->op_count; count++) {
4635                 opcode = crbEntry->opcode;
4636
4637                 if (opcode & QL_DBG_OPCODE_WR) {
4638
4639                         ret = ql_rdwr_indreg32(ha, entry_addr,
4640                                         &crbEntry->value_1, 0);
4641                         if (ret)
4642                                 return (0);
4643
4644                         opcode &= ~QL_DBG_OPCODE_WR;
4645                 }
4646
4647                 if (opcode & QL_DBG_OPCODE_RW) {
4648
4649                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4650                         if (ret)
4651                                 return (0);
4652
4653                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4654                         if (ret)
4655                                 return (0);
4656
4657                         opcode &= ~QL_DBG_OPCODE_RW;
4658                 }
4659
4660                 if (opcode & QL_DBG_OPCODE_AND) {
4661
4662                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4663                         if (ret)
4664                                 return (0);
4665
4666                         read_value &= crbEntry->value_2;
4667                         opcode &= ~QL_DBG_OPCODE_AND;
4668
4669                         if (opcode & QL_DBG_OPCODE_OR) {
4670                                 read_value |= crbEntry->value_3;
4671                                 opcode &= ~QL_DBG_OPCODE_OR;
4672                         }
4673
4674                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4675                         if (ret)
4676                                 return (0);
4677                 }
4678
4679                 if (opcode & QL_DBG_OPCODE_OR) {
4680
4681                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4682                         if (ret)
4683                                 return (0);
4684
4685                         read_value |= crbEntry->value_3;
4686
4687                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4688                         if (ret)
4689                                 return (0);
4690
4691                         opcode &= ~QL_DBG_OPCODE_OR;
4692                 }
4693
4694                 if (opcode & QL_DBG_OPCODE_POLL) {
4695
4696                         opcode &= ~QL_DBG_OPCODE_POLL;
4697                         timeout = crbEntry->poll_timeout;
4698                         addr = entry_addr;
4699
4700                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4701                         if (ret)
4702                                 return (0);
4703
4704                         while ((read_value & crbEntry->value_2)
4705                                 != crbEntry->value_1) {
4706
4707                                 if (timeout) {
4708                                         qla_mdelay(__func__, 1);
4709                                         timeout--;
4710                                 } else
4711                                         break;
4712
4713                                 ret = ql_rdwr_indreg32(ha, addr,
4714                                                 &read_value, 1);
4715                                 if (ret)
4716                                         return (0);
4717                         }
4718
4719                         if (!timeout) {
4720                                 /*
4721                                  * Report timeout error.
4722                                  * core dump capture failed
4723                                  * Skip remaining entries.
4724                                  * Write buffer out to file
4725                                  * Use driver specific fields in template header
4726                                  * to report this error.
4727                                  */
4728                                 return (-1);
4729                         }
4730                 }
4731
4732                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4733                         /*
4734                          * decide which address to use.
4735                          */
4736                         if (crbEntry->state_index_a) {
4737                                 addr = template_hdr->saved_state_array[
4738                                                 crbEntry-> state_index_a];
4739                         } else {
4740                                 addr = entry_addr;
4741                         }
4742
4743                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4744                         if (ret)
4745                                 return (0);
4746
4747                         template_hdr->saved_state_array[crbEntry->state_index_v]
4748                                         = read_value;
4749                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
4750                 }
4751
4752                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4753                         /*
4754                          * decide which value to use.
4755                          */
4756                         if (crbEntry->state_index_v) {
4757                                 read_value = template_hdr->saved_state_array[
4758                                                 crbEntry->state_index_v];
4759                         } else {
4760                                 read_value = crbEntry->value_1;
4761                         }
4762                         /*
4763                          * decide which address to use.
4764                          */
4765                         if (crbEntry->state_index_a) {
4766                                 addr = template_hdr->saved_state_array[
4767                                                 crbEntry-> state_index_a];
4768                         } else {
4769                                 addr = entry_addr;
4770                         }
4771
4772                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4773                         if (ret)
4774                                 return (0);
4775
4776                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
4777                 }
4778
4779                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4780                         /*  Read value from saved state using index */
4781                         read_value = template_hdr->saved_state_array[
4782                                                 crbEntry->state_index_v];
4783
4784                         read_value <<= crbEntry->shl; /*Shift left operation */
4785                         read_value >>= crbEntry->shr; /*Shift right operation */
4786
4787                         if (crbEntry->value_2) {
4788                                 /* check if AND mask is provided */
4789                                 read_value &= crbEntry->value_2;
4790                         }
4791
4792                         read_value |= crbEntry->value_3; /* OR operation */
4793                         read_value += crbEntry->value_1; /* increment op */
4794
4795                         /* Write value back to state area. */
4796
4797                         template_hdr->saved_state_array[crbEntry->state_index_v]
4798                                         = read_value;
4799                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
4800                 }
4801
4802                 entry_addr += crbEntry->addr_stride;
4803         }
4804
4805         return (0);
4806 }
4807
4808 /*
4809  * Handling rd poll entry.
4810  */
4811
4812 static uint32_t 
4813 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4814         uint32_t *data_buff)
4815 {
4816         int ret;
4817         int loop_cnt;
4818         uint32_t op_count, select_addr, select_value_stride, select_value;
4819         uint32_t read_addr, poll, mask, data_size, data;
4820         uint32_t wait_count = 0;
4821
4822         select_addr            = entry->select_addr;
4823         read_addr              = entry->read_addr;
4824         select_value           = entry->select_value;
4825         select_value_stride    = entry->select_value_stride;
4826         op_count               = entry->op_count;
4827         poll                   = entry->poll;
4828         mask                   = entry->mask;
4829         data_size              = entry->data_size;
4830
4831         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4832
4833                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4834                 if (ret)
4835                         return (0);
4836
4837                 wait_count = 0;
4838
4839                 while (wait_count < poll) {
4840
4841                         uint32_t temp;
4842
4843                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4844                         if (ret)
4845                                 return (0);
4846
4847                         if ( (temp & mask) != 0 ) {
4848                                 break;
4849                         }
4850                         wait_count++;
4851                 }
4852
4853                 if (wait_count == poll) {
4854                         device_printf(ha->pci_dev,
4855                                 "%s: Error in processing entry\n", __func__);
4856                         device_printf(ha->pci_dev,
4857                                 "%s: wait_count <0x%x> poll <0x%x>\n",
4858                                 __func__, wait_count, poll);
4859                         return 0;
4860                 }
4861
4862                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4863                 if (ret)
4864                         return (0);
4865
4866                 *data_buff++ = select_value;
4867                 *data_buff++ = data;
4868                 select_value = select_value + select_value_stride;
4869         }
4870
4871         /*
4872          * for testing purpose we return amount of data written
4873          */
4874         return (loop_cnt * (2 * sizeof(uint32_t)));
4875 }
4876
4877
4878 /*
4879  * Handling rd modify write poll entry.
4880  */
4881
4882 static uint32_t 
4883 ql_pollrd_modify_write(qla_host_t *ha,
4884         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4885         uint32_t *data_buff)
4886 {
4887         int ret;
4888         uint32_t addr_1, addr_2, value_1, value_2, data;
4889         uint32_t poll, mask, data_size, modify_mask;
4890         uint32_t wait_count = 0;
4891
4892         addr_1          = entry->addr_1;
4893         addr_2          = entry->addr_2;
4894         value_1         = entry->value_1;
4895         value_2         = entry->value_2;
4896
4897         poll            = entry->poll;
4898         mask            = entry->mask;
4899         modify_mask     = entry->modify_mask;
4900         data_size       = entry->data_size;
4901
4902
4903         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
4904         if (ret)
4905                 return (0);
4906
4907         wait_count = 0;
4908         while (wait_count < poll) {
4909
4910                 uint32_t temp;
4911
4912                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4913                 if (ret)
4914                         return (0);
4915
4916                 if ( (temp & mask) != 0 ) {
4917                         break;
4918                 }
4919                 wait_count++;
4920         }
4921
4922         if (wait_count == poll) {
4923                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
4924                         __func__);
4925         } else {
4926
4927                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
4928                 if (ret)
4929                         return (0);
4930
4931                 data = (data & modify_mask);
4932
4933                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
4934                 if (ret)
4935                         return (0);
4936
4937                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
4938                 if (ret)
4939                         return (0);
4940
4941                 /* Poll again */
4942                 wait_count = 0;
4943                 while (wait_count < poll) {
4944
4945                         uint32_t temp;
4946
4947                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4948                         if (ret)
4949                                 return (0);
4950
4951                         if ( (temp & mask) != 0 ) {
4952                                 break;
4953                         }
4954                         wait_count++;
4955                 }
4956                 *data_buff++ = addr_2;
4957                 *data_buff++ = data;
4958         }
4959
4960         /*
4961          * for testing purpose we return amount of data written
4962          */
4963         return (2 * sizeof(uint32_t));
4964 }
4965
4966