]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r320175
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static void qla_get_quick_stats(qla_host_t *ha);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78
79 static void ql_minidump_free(qla_host_t *ha);
80
81
82 static int
83 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
84 {
85         int err = 0, ret;
86         qla_host_t *ha;
87         uint32_t i;
88
89         err = sysctl_handle_int(oidp, &ret, 0, req);
90
91         if (err || !req->newptr)
92                 return (err);
93
94         if (ret == 1) {
95
96                 ha = (qla_host_t *)arg1;
97
98                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
99
100                         device_printf(ha->pci_dev,
101                                 "%s: sds_ring[%d] = %p\n", __func__,i,
102                                 (void *)ha->hw.sds[i].intr_count);
103
104                         device_printf(ha->pci_dev,
105                                 "%s: sds_ring[%d].spurious_intr_count = %p\n",
106                                 __func__,
107                                 i, (void *)ha->hw.sds[i].spurious_intr_count);
108
109                         device_printf(ha->pci_dev,
110                                 "%s: sds_ring[%d].rx_free = %d\n", __func__,i,
111                                 ha->hw.sds[i].rx_free);
112                 }
113
114                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
115                         device_printf(ha->pci_dev,
116                                 "%s: tx[%d] = %p\n", __func__,i,
117                                 (void *)ha->tx_ring[i].count);
118
119                 for (i = 0; i < ha->hw.num_rds_rings; i++)
120                         device_printf(ha->pci_dev,
121                                 "%s: rds_ring[%d] = %p\n", __func__,i,
122                                 (void *)ha->hw.rds[i].count);
123
124                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
125                         (void *)ha->lro_pkt_count);
126
127                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
128                         (void *)ha->lro_bytes);
129
130 #ifdef QL_ENABLE_ISCSI_TLV
131                 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
132                         (void *)ha->hw.iscsi_pkt_count);
133 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
134
135         }
136         return (err);
137 }
138
139 static int
140 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
141 {
142         int err, ret = 0;
143         qla_host_t *ha;
144
145         err = sysctl_handle_int(oidp, &ret, 0, req);
146
147         if (err || !req->newptr)
148                 return (err);
149
150         if (ret == 1) {
151                 ha = (qla_host_t *)arg1;
152                 qla_get_quick_stats(ha);
153         }
154         return (err);
155 }
156
157 #ifdef QL_DBG
158
159 static void
160 qla_stop_pegs(qla_host_t *ha)
161 {
162         uint32_t val = 1;
163
164         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
165         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
166         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
167         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
168         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
169         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
170 }
171
172 static int
173 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
174 {
175         int err, ret = 0;
176         qla_host_t *ha;
177         
178         err = sysctl_handle_int(oidp, &ret, 0, req);
179
180
181         if (err || !req->newptr)
182                 return (err);
183
184         if (ret == 1) {
185                 ha = (qla_host_t *)arg1;
186                 QLA_LOCK(ha);
187                 qla_stop_pegs(ha);      
188                 QLA_UNLOCK(ha);
189         }
190
191         return err;
192 }
193 #endif /* #ifdef QL_DBG */
194
195 static int
196 qla_validate_set_port_cfg_bit(uint32_t bits)
197 {
198         if ((bits & 0xF) > 1)
199                 return (-1);
200
201         if (((bits >> 4) & 0xF) > 2)
202                 return (-1);
203
204         if (((bits >> 8) & 0xF) > 2)
205                 return (-1);
206
207         return (0);
208 }
209
210 static int
211 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
212 {
213         int err, ret = 0;
214         qla_host_t *ha;
215         uint32_t cfg_bits;
216
217         err = sysctl_handle_int(oidp, &ret, 0, req);
218
219         if (err || !req->newptr)
220                 return (err);
221
222         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
223
224                 ha = (qla_host_t *)arg1;
225
226                 err = qla_get_port_config(ha, &cfg_bits);
227
228                 if (err)
229                         goto qla_sysctl_set_port_cfg_exit;
230
231                 if (ret & 0x1) {
232                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
233                 } else {
234                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
235                 }
236
237                 ret = ret >> 4;
238                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
239
240                 if ((ret & 0xF) == 0) {
241                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
242                 } else if ((ret & 0xF) == 1){
243                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
244                 } else {
245                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
246                 }
247
248                 ret = ret >> 4;
249                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
250
251                 if (ret == 0) {
252                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
253                 } else if (ret == 1){
254                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
255                 } else {
256                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
257                 }
258
259                 err = qla_set_port_config(ha, cfg_bits);
260         } else {
261                 ha = (qla_host_t *)arg1;
262
263                 err = qla_get_port_config(ha, &cfg_bits);
264         }
265
266 qla_sysctl_set_port_cfg_exit:
267         return err;
268 }
269
270 static int
271 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
272 {
273         int err, ret = 0;
274         qla_host_t *ha;
275
276         err = sysctl_handle_int(oidp, &ret, 0, req);
277
278         if (err || !req->newptr)
279                 return (err);
280
281         ha = (qla_host_t *)arg1;
282
283         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
284                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
285                 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
286         } else {
287                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
288         }
289
290         return (err);
291 }
292
293 static int
294 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
295 {
296         int err, ret = 0;
297         qla_host_t *ha;
298
299         err = sysctl_handle_int(oidp, &ret, 0, req);
300
301         if (err || !req->newptr)
302                 return (err);
303
304         ha = (qla_host_t *)arg1;
305         err = qla_get_cam_search_mode(ha);
306
307         return (err);
308 }
309
310
311 /*
312  * Name: ql_hw_add_sysctls
313  * Function: Add P3Plus specific sysctls
314  */
315 void
316 ql_hw_add_sysctls(qla_host_t *ha)
317 {
318         device_t        dev;
319
320         dev = ha->pci_dev;
321
322         ha->hw.num_sds_rings = MAX_SDS_RINGS;
323         ha->hw.num_rds_rings = MAX_RDS_RINGS;
324         ha->hw.num_tx_rings = NUM_TX_RINGS;
325
326         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
327                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
329                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
330
331         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
332                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
334                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
335
336         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
337                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
338                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
339                 ha->hw.num_tx_rings, "Number of Transmit Rings");
340
341         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
342                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
344                 ha->txr_idx, "Tx Ring Used");
345
346         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
347                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
348                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
349                 (void *)ha, 0,
350                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
351
352         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
355                 (void *)ha, 0,
356                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
357
358         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
359                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
360                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
361                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
362
363         ha->hw.sds_cidx_thres = 32;
364         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
365                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
366                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
367                 ha->hw.sds_cidx_thres,
368                 "Number of SDS entries to process before updating"
369                 " SDS Ring Consumer Index");
370
371         ha->hw.rds_pidx_thres = 32;
372         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
373                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
375                 ha->hw.rds_pidx_thres,
376                 "Number of Rcv Rings Entries to post before updating"
377                 " RDS Ring Producer Index");
378
379         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
380         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
383                 &ha->hw.rcv_intr_coalesce,
384                 ha->hw.rcv_intr_coalesce,
385                 "Rcv Intr Coalescing Parameters\n"
386                 "\tbits 15:0 max packets\n"
387                 "\tbits 31:16 max micro-seconds to wait\n"
388                 "\tplease run\n"
389                 "\tifconfig <if> down && ifconfig <if> up\n"
390                 "\tto take effect \n");
391
392         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
393         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
394                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
395                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
396                 &ha->hw.xmt_intr_coalesce,
397                 ha->hw.xmt_intr_coalesce,
398                 "Xmt Intr Coalescing Parameters\n"
399                 "\tbits 15:0 max packets\n"
400                 "\tbits 31:16 max micro-seconds to wait\n"
401                 "\tplease run\n"
402                 "\tifconfig <if> down && ifconfig <if> up\n"
403                 "\tto take effect \n");
404
405         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
408                 (void *)ha, 0,
409                 qla_sysctl_port_cfg, "I",
410                         "Set Port Configuration if values below "
411                         "otherwise Get Port Configuration\n"
412                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
413                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
414                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
415                         " 1 = xmt only; 2 = rcv only;\n"
416                 );
417
418         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
421                 (void *)ha, 0,
422                 qla_sysctl_set_cam_search_mode, "I",
423                         "Set CAM Search Mode"
424                         "\t 1 = search mode internal\n"
425                         "\t 2 = search mode auto\n");
426
427         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
430                 (void *)ha, 0,
431                 qla_sysctl_get_cam_search_mode, "I",
432                         "Get CAM Search Mode"
433                         "\t 1 = search mode internal\n"
434                         "\t 2 = search mode auto\n");
435
436         ha->hw.enable_9kb = 1;
437
438         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
439                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
440                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
441                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
442
443         ha->hw.enable_hw_lro = 1;
444
445         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
448                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
449                 "\t 1 : Hardware LRO if LRO is enabled\n"
450                 "\t 0 : Software LRO if LRO is enabled\n"
451                 "\t Any change requires ifconfig down/up to take effect\n"
452                 "\t Note that LRO may be turned off/on via ifconfig\n");
453
454         ha->hw.mdump_active = 0;
455         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
456                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
458                 ha->hw.mdump_active,
459                 "Minidump retrieval is Active");
460
461         ha->hw.mdump_done = 0;
462         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
463                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464                 OID_AUTO, "mdump_done", CTLFLAG_RW,
465                 &ha->hw.mdump_done, ha->hw.mdump_done,
466                 "Minidump has been done and available for retrieval");
467
468         ha->hw.mdump_capture_mask = 0xF;
469         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
470                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
472                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
473                 "Minidump capture mask");
474 #ifdef QL_DBG
475
476         ha->err_inject = 0;
477         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
478                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479                 OID_AUTO, "err_inject",
480                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
481                 "Error to be injected\n"
482                 "\t\t\t 0: No Errors\n"
483                 "\t\t\t 1: rcv: rxb struct invalid\n"
484                 "\t\t\t 2: rcv: mp == NULL\n"
485                 "\t\t\t 3: lro: rxb struct invalid\n"
486                 "\t\t\t 4: lro: mp == NULL\n"
487                 "\t\t\t 5: rcv: num handles invalid\n"
488                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
489                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
490                 "\t\t\t 8: mbx: mailbox command failure\n"
491                 "\t\t\t 9: heartbeat failure\n"
492                 "\t\t\t A: temperature failure\n"
493                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
494
495         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
496                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
497                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
498                 (void *)ha, 0,
499                 qla_sysctl_stop_pegs, "I", "Peg Stop");
500
501 #endif /* #ifdef QL_DBG */
502
503         ha->hw.user_pri_nic = 0;
504         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
505                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
507                 ha->hw.user_pri_nic,
508                 "VLAN Tag User Priority for Normal Ethernet Packets");
509
510         ha->hw.user_pri_iscsi = 4;
511         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
512                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
514                 ha->hw.user_pri_iscsi,
515                 "VLAN Tag User Priority for iSCSI Packets");
516
517 }
518
519 void
520 ql_hw_link_status(qla_host_t *ha)
521 {
522         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
523
524         if (ha->hw.link_up) {
525                 device_printf(ha->pci_dev, "link Up\n");
526         } else {
527                 device_printf(ha->pci_dev, "link Down\n");
528         }
529
530         if (ha->hw.flags.fduplex) {
531                 device_printf(ha->pci_dev, "Full Duplex\n");
532         } else {
533                 device_printf(ha->pci_dev, "Half Duplex\n");
534         }
535
536         if (ha->hw.flags.autoneg) {
537                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
538         } else {
539                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
540         }
541
542         switch (ha->hw.link_speed) {
543         case 0x710:
544                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
545                 break;
546
547         case 0x3E8:
548                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
549                 break;
550
551         case 0x64:
552                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
553                 break;
554
555         default:
556                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
557                 break;
558         }
559
560         switch (ha->hw.module_type) {
561
562         case 0x01:
563                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
564                 break;
565
566         case 0x02:
567                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
568                 break;
569
570         case 0x03:
571                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
572                 break;
573
574         case 0x04:
575                 device_printf(ha->pci_dev,
576                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
577                         ha->hw.cable_length);
578                 break;
579
580         case 0x05:
581                 device_printf(ha->pci_dev, "Module Type 10GE Active"
582                         " Limiting Copper(Compliant)[%d m]\n",
583                         ha->hw.cable_length);
584                 break;
585
586         case 0x06:
587                 device_printf(ha->pci_dev,
588                         "Module Type 10GE Passive Copper"
589                         " (Legacy, Best Effort)[%d m]\n",
590                         ha->hw.cable_length);
591                 break;
592
593         case 0x07:
594                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
595                 break;
596
597         case 0x08:
598                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
599                 break;
600
601         case 0x09:
602                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
603                 break;
604
605         case 0x0A:
606                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
607                 break;
608
609         case 0x0B:
610                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
611                         "(Legacy, Best Effort)\n");
612                 break;
613
614         default:
615                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
616                         ha->hw.module_type);
617                 break;
618         }
619
620         if (ha->hw.link_faults == 1)
621                 device_printf(ha->pci_dev, "SFP Power Fault\n");
622 }
623
624 /*
625  * Name: ql_free_dma
626  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
627  */
628 void
629 ql_free_dma(qla_host_t *ha)
630 {
631         uint32_t i;
632
633         if (ha->hw.dma_buf.flags.sds_ring) {
634                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
635                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
636                 }
637                 ha->hw.dma_buf.flags.sds_ring = 0;
638         }
639
640         if (ha->hw.dma_buf.flags.rds_ring) {
641                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
642                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
643                 }
644                 ha->hw.dma_buf.flags.rds_ring = 0;
645         }
646
647         if (ha->hw.dma_buf.flags.tx_ring) {
648                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
649                 ha->hw.dma_buf.flags.tx_ring = 0;
650         }
651         ql_minidump_free(ha);
652 }
653
654 /*
655  * Name: ql_alloc_dma
656  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
657  */
658 int
659 ql_alloc_dma(qla_host_t *ha)
660 {
661         device_t                dev;
662         uint32_t                i, j, size, tx_ring_size;
663         qla_hw_t                *hw;
664         qla_hw_tx_cntxt_t       *tx_cntxt;
665         uint8_t                 *vaddr;
666         bus_addr_t              paddr;
667
668         dev = ha->pci_dev;
669
670         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
671
672         hw = &ha->hw;
673         /*
674          * Allocate Transmit Ring
675          */
676         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
677         size = (tx_ring_size * ha->hw.num_tx_rings);
678
679         hw->dma_buf.tx_ring.alignment = 8;
680         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
681         
682         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
683                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
684                 goto ql_alloc_dma_exit;
685         }
686
687         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
688         paddr = hw->dma_buf.tx_ring.dma_addr;
689         
690         for (i = 0; i < ha->hw.num_tx_rings; i++) {
691                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
692
693                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
694                 tx_cntxt->tx_ring_paddr = paddr;
695
696                 vaddr += tx_ring_size;
697                 paddr += tx_ring_size;
698         }
699
700         for (i = 0; i < ha->hw.num_tx_rings; i++) {
701                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
702
703                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
704                 tx_cntxt->tx_cons_paddr = paddr;
705
706                 vaddr += sizeof (uint32_t);
707                 paddr += sizeof (uint32_t);
708         }
709
710         ha->hw.dma_buf.flags.tx_ring = 1;
711
712         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
713                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
714                 hw->dma_buf.tx_ring.dma_b));
715         /*
716          * Allocate Receive Descriptor Rings
717          */
718
719         for (i = 0; i < hw->num_rds_rings; i++) {
720
721                 hw->dma_buf.rds_ring[i].alignment = 8;
722                 hw->dma_buf.rds_ring[i].size =
723                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
724
725                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
726                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
727                                 __func__, i);
728
729                         for (j = 0; j < i; j++)
730                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
731
732                         goto ql_alloc_dma_exit;
733                 }
734                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
735                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
736                         hw->dma_buf.rds_ring[i].dma_b));
737         }
738
739         hw->dma_buf.flags.rds_ring = 1;
740
741         /*
742          * Allocate Status Descriptor Rings
743          */
744
745         for (i = 0; i < hw->num_sds_rings; i++) {
746                 hw->dma_buf.sds_ring[i].alignment = 8;
747                 hw->dma_buf.sds_ring[i].size =
748                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
749
750                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
751                         device_printf(dev, "%s: sds ring alloc failed\n",
752                                 __func__);
753
754                         for (j = 0; j < i; j++)
755                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
756
757                         goto ql_alloc_dma_exit;
758                 }
759                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
760                         __func__, i,
761                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
762                         hw->dma_buf.sds_ring[i].dma_b));
763         }
764         for (i = 0; i < hw->num_sds_rings; i++) {
765                 hw->sds[i].sds_ring_base =
766                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
767         }
768
769         hw->dma_buf.flags.sds_ring = 1;
770
771         return 0;
772
773 ql_alloc_dma_exit:
774         ql_free_dma(ha);
775         return -1;
776 }
777
778 #define Q8_MBX_MSEC_DELAY       5000
779
780 static int
781 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
782         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
783 {
784         uint32_t i;
785         uint32_t data;
786         int ret = 0;
787
788         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
789                 ret = -3;
790                 ha->qla_initiate_recovery = 1;
791                 goto exit_qla_mbx_cmd;
792         }
793
794         if (no_pause)
795                 i = 1000;
796         else
797                 i = Q8_MBX_MSEC_DELAY;
798
799         while (i) {
800                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
801                 if (data == 0)
802                         break;
803                 if (no_pause) {
804                         DELAY(1000);
805                 } else {
806                         qla_mdelay(__func__, 1);
807                 }
808                 i--;
809         }
810
811         if (i == 0) {
812                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
813                         __func__, data);
814                 ret = -1;
815                 ha->qla_initiate_recovery = 1;
816                 goto exit_qla_mbx_cmd;
817         }
818
819         for (i = 0; i < n_hmbox; i++) {
820                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
821                 h_mbox++;
822         }
823
824         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
825
826
827         i = Q8_MBX_MSEC_DELAY;
828         while (i) {
829                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
830
831                 if ((data & 0x3) == 1) {
832                         data = READ_REG32(ha, Q8_FW_MBOX0);
833                         if ((data & 0xF000) != 0x8000)
834                                 break;
835                 }
836                 if (no_pause) {
837                         DELAY(1000);
838                 } else {
839                         qla_mdelay(__func__, 1);
840                 }
841                 i--;
842         }
843         if (i == 0) {
844                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
845                         __func__, data);
846                 ret = -2;
847                 ha->qla_initiate_recovery = 1;
848                 goto exit_qla_mbx_cmd;
849         }
850
851         for (i = 0; i < n_fwmbox; i++) {
852                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
853         }
854
855         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
856         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
857
858 exit_qla_mbx_cmd:
859         return (ret);
860 }
861
862 int
863 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
864         uint32_t *num_rcvq)
865 {
866         uint32_t *mbox, err;
867         device_t dev = ha->pci_dev;
868
869         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
870
871         mbox = ha->hw.mbox;
872
873         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
874
875         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
876                 device_printf(dev, "%s: failed0\n", __func__);
877                 return (-1);
878         }
879         err = mbox[0] >> 25; 
880
881         if (supports_9kb != NULL) {
882                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
883                         *supports_9kb = 1;
884                 else
885                         *supports_9kb = 0;
886         }
887
888         if (num_rcvq != NULL)
889                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
890
891         if ((err != 1) && (err != 0)) {
892                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
893                 return (-1);
894         }
895         return 0;
896 }
897
898 static int
899 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
900         uint32_t create)
901 {
902         uint32_t i, err;
903         device_t dev = ha->pci_dev;
904         q80_config_intr_t *c_intr;
905         q80_config_intr_rsp_t *c_intr_rsp;
906
907         c_intr = (q80_config_intr_t *)ha->hw.mbox;
908         bzero(c_intr, (sizeof (q80_config_intr_t)));
909
910         c_intr->opcode = Q8_MBX_CONFIG_INTR;
911
912         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
913         c_intr->count_version |= Q8_MBX_CMD_VERSION;
914
915         c_intr->nentries = num_intrs;
916
917         for (i = 0; i < num_intrs; i++) {
918                 if (create) {
919                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
920                         c_intr->intr[i].msix_index = start_idx + 1 + i;
921                 } else {
922                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
923                         c_intr->intr[i].msix_index =
924                                 ha->hw.intr_id[(start_idx + i)];
925                 }
926
927                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
928         }
929
930         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
931                 (sizeof (q80_config_intr_t) >> 2),
932                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
933                 device_printf(dev, "%s: failed0\n", __func__);
934                 return (-1);
935         }
936
937         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
938
939         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
940
941         if (err) {
942                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
943                         c_intr_rsp->nentries);
944
945                 for (i = 0; i < c_intr_rsp->nentries; i++) {
946                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
947                                 __func__, i, 
948                                 c_intr_rsp->intr[i].status,
949                                 c_intr_rsp->intr[i].intr_id,
950                                 c_intr_rsp->intr[i].intr_src);
951                 }
952
953                 return (-1);
954         }
955
956         for (i = 0; ((i < num_intrs) && create); i++) {
957                 if (!c_intr_rsp->intr[i].status) {
958                         ha->hw.intr_id[(start_idx + i)] =
959                                 c_intr_rsp->intr[i].intr_id;
960                         ha->hw.intr_src[(start_idx + i)] =
961                                 c_intr_rsp->intr[i].intr_src;
962                 }
963         }
964
965         return (0);
966 }
967
968 /*
969  * Name: qla_config_rss
970  * Function: Configure RSS for the context/interface.
971  */
972 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
973                         0x8030f20c77cb2da3ULL,
974                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
975                         0x255b0ec26d5a56daULL };
976
977 static int
978 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
979 {
980         q80_config_rss_t        *c_rss;
981         q80_config_rss_rsp_t    *c_rss_rsp;
982         uint32_t                err, i;
983         device_t                dev = ha->pci_dev;
984
985         c_rss = (q80_config_rss_t *)ha->hw.mbox;
986         bzero(c_rss, (sizeof (q80_config_rss_t)));
987
988         c_rss->opcode = Q8_MBX_CONFIG_RSS;
989
990         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
991         c_rss->count_version |= Q8_MBX_CMD_VERSION;
992
993         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
994                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
995         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
996         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
997
998         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
999         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1000
1001         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1002
1003         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1004         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1005
1006         c_rss->cntxt_id = cntxt_id;
1007
1008         for (i = 0; i < 5; i++) {
1009                 c_rss->rss_key[i] = rss_key[i];
1010         }
1011
1012         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1013                 (sizeof (q80_config_rss_t) >> 2),
1014                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1015                 device_printf(dev, "%s: failed0\n", __func__);
1016                 return (-1);
1017         }
1018         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1019
1020         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1021
1022         if (err) {
1023                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1024                 return (-1);
1025         }
1026         return 0;
1027 }
1028
1029 static int
1030 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1031         uint16_t cntxt_id, uint8_t *ind_table)
1032 {
1033         q80_config_rss_ind_table_t      *c_rss_ind;
1034         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1035         uint32_t                        err;
1036         device_t                        dev = ha->pci_dev;
1037
1038         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1039                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1040                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1041                         start_idx, count);
1042                 return (-1);
1043         }
1044
1045         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1046         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1047
1048         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1049         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1050         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1051
1052         c_rss_ind->start_idx = start_idx;
1053         c_rss_ind->end_idx = start_idx + count - 1;
1054         c_rss_ind->cntxt_id = cntxt_id;
1055         bcopy(ind_table, c_rss_ind->ind_table, count);
1056
1057         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1058                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1059                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1060                 device_printf(dev, "%s: failed0\n", __func__);
1061                 return (-1);
1062         }
1063
1064         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1065         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1066
1067         if (err) {
1068                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1069                 return (-1);
1070         }
1071         return 0;
1072 }
1073
1074 /*
1075  * Name: qla_config_intr_coalesce
1076  * Function: Configure Interrupt Coalescing.
1077  */
1078 static int
1079 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1080         int rcv)
1081 {
1082         q80_config_intr_coalesc_t       *intrc;
1083         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1084         uint32_t                        err, i;
1085         device_t                        dev = ha->pci_dev;
1086         
1087         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1088         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1089
1090         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1091         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1092         intrc->count_version |= Q8_MBX_CMD_VERSION;
1093
1094         if (rcv) {
1095                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1096                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1097                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1098         } else {
1099                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1100                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1101                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1102         }
1103
1104         intrc->cntxt_id = cntxt_id;
1105
1106         if (tenable) {
1107                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1108                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1109
1110                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1111                         intrc->sds_ring_mask |= (1 << i);
1112                 }
1113                 intrc->ms_timeout = 1000;
1114         }
1115
1116         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1117                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1118                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1119                 device_printf(dev, "%s: failed0\n", __func__);
1120                 return (-1);
1121         }
1122         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1123
1124         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1125
1126         if (err) {
1127                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1128                 return (-1);
1129         }
1130         
1131         return 0;
1132 }
1133
1134
1135 /*
1136  * Name: qla_config_mac_addr
1137  * Function: binds a MAC address to the context/interface.
1138  *      Can be unicast, multicast or broadcast.
1139  */
1140 static int
1141 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1142         uint32_t num_mac)
1143 {
1144         q80_config_mac_addr_t           *cmac;
1145         q80_config_mac_addr_rsp_t       *cmac_rsp;
1146         uint32_t                        err;
1147         device_t                        dev = ha->pci_dev;
1148         int                             i;
1149         uint8_t                         *mac_cpy = mac_addr;
1150
1151         if (num_mac > Q8_MAX_MAC_ADDRS) {
1152                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1153                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1154                 return (-1);
1155         }
1156
1157         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1158         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1159
1160         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1161         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1162         cmac->count_version |= Q8_MBX_CMD_VERSION;
1163
1164         if (add_mac) 
1165                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1166         else
1167                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1168                 
1169         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1170
1171         cmac->nmac_entries = num_mac;
1172         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1173
1174         for (i = 0; i < num_mac; i++) {
1175                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1176                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1177         }
1178
1179         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1180                 (sizeof (q80_config_mac_addr_t) >> 2),
1181                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1182                 device_printf(dev, "%s: %s failed0\n", __func__,
1183                         (add_mac ? "Add" : "Del"));
1184                 return (-1);
1185         }
1186         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1187
1188         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1189
1190         if (err) {
1191                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1192                         (add_mac ? "Add" : "Del"), err);
1193                 for (i = 0; i < num_mac; i++) {
1194                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1195                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1196                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1197                         mac_cpy += ETHER_ADDR_LEN;
1198                 }
1199                 return (-1);
1200         }
1201         
1202         return 0;
1203 }
1204
1205
1206 /*
1207  * Name: qla_set_mac_rcv_mode
1208  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1209  */
1210 static int
1211 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1212 {
1213         q80_config_mac_rcv_mode_t       *rcv_mode;
1214         uint32_t                        err;
1215         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1216         device_t                        dev = ha->pci_dev;
1217
1218         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1219         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1220
1221         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1222         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1223         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1224
1225         rcv_mode->mode = mode;
1226
1227         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1228
1229         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1230                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1231                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1232                 device_printf(dev, "%s: failed0\n", __func__);
1233                 return (-1);
1234         }
1235         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1236
1237         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1238
1239         if (err) {
1240                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1241                 return (-1);
1242         }
1243         
1244         return 0;
1245 }
1246
1247 int
1248 ql_set_promisc(qla_host_t *ha)
1249 {
1250         int ret;
1251
1252         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1253         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1254         return (ret);
1255 }
1256
1257 void
1258 qla_reset_promisc(qla_host_t *ha)
1259 {
1260         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1261         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1262 }
1263
1264 int
1265 ql_set_allmulti(qla_host_t *ha)
1266 {
1267         int ret;
1268
1269         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1270         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1271         return (ret);
1272 }
1273
1274 void
1275 qla_reset_allmulti(qla_host_t *ha)
1276 {
1277         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1278         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1279 }
1280
1281 /*
1282  * Name: ql_set_max_mtu
1283  * Function:
1284  *      Sets the maximum transfer unit size for the specified rcv context.
1285  */
1286 int
1287 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1288 {
1289         device_t                dev;
1290         q80_set_max_mtu_t       *max_mtu;
1291         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1292         uint32_t                err;
1293
1294         dev = ha->pci_dev;
1295
1296         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1297         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1298
1299         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1300         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1301         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1302
1303         max_mtu->cntxt_id = cntxt_id;
1304         max_mtu->mtu = mtu;
1305
1306         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1307                 (sizeof (q80_set_max_mtu_t) >> 2),
1308                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1309                 device_printf(dev, "%s: failed\n", __func__);
1310                 return -1;
1311         }
1312
1313         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1314
1315         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1316
1317         if (err) {
1318                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1319         }
1320
1321         return 0;
1322 }
1323
1324 static int
1325 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1326 {
1327         device_t                dev;
1328         q80_link_event_t        *lnk;
1329         q80_link_event_rsp_t    *lnk_rsp;
1330         uint32_t                err;
1331
1332         dev = ha->pci_dev;
1333
1334         lnk = (q80_link_event_t *)ha->hw.mbox;
1335         bzero(lnk, (sizeof (q80_link_event_t)));
1336
1337         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1338         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1339         lnk->count_version |= Q8_MBX_CMD_VERSION;
1340
1341         lnk->cntxt_id = cntxt_id;
1342         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1343
1344         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1345                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1346                 device_printf(dev, "%s: failed\n", __func__);
1347                 return -1;
1348         }
1349
1350         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1351
1352         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1353
1354         if (err) {
1355                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1356         }
1357
1358         return 0;
1359 }
1360
1361 static int
1362 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1363 {
1364         device_t                dev;
1365         q80_config_fw_lro_t     *fw_lro;
1366         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1367         uint32_t                err;
1368
1369         dev = ha->pci_dev;
1370
1371         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1372         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1373
1374         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1375         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1376         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1377
1378         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1379         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1380
1381         fw_lro->cntxt_id = cntxt_id;
1382
1383         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1384                 (sizeof (q80_config_fw_lro_t) >> 2),
1385                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1386                 device_printf(dev, "%s: failed\n", __func__);
1387                 return -1;
1388         }
1389
1390         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1391
1392         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1393
1394         if (err) {
1395                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1396         }
1397
1398         return 0;
1399 }
1400
1401 static int
1402 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1403 {
1404         device_t                dev;
1405         q80_hw_config_t         *hw_config;
1406         q80_hw_config_rsp_t     *hw_config_rsp;
1407         uint32_t                err;
1408
1409         dev = ha->pci_dev;
1410
1411         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1412         bzero(hw_config, sizeof (q80_hw_config_t));
1413
1414         hw_config->opcode = Q8_MBX_HW_CONFIG;
1415         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1416         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1417
1418         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1419
1420         hw_config->u.set_cam_search_mode.mode = search_mode;
1421
1422         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1423                 (sizeof (q80_hw_config_t) >> 2),
1424                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1425                 device_printf(dev, "%s: failed\n", __func__);
1426                 return -1;
1427         }
1428         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1429
1430         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1431
1432         if (err) {
1433                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1434         }
1435
1436         return 0;
1437 }
1438
1439 static int
1440 qla_get_cam_search_mode(qla_host_t *ha)
1441 {
1442         device_t                dev;
1443         q80_hw_config_t         *hw_config;
1444         q80_hw_config_rsp_t     *hw_config_rsp;
1445         uint32_t                err;
1446
1447         dev = ha->pci_dev;
1448
1449         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1450         bzero(hw_config, sizeof (q80_hw_config_t));
1451
1452         hw_config->opcode = Q8_MBX_HW_CONFIG;
1453         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1454         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1455
1456         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1457
1458         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1459                 (sizeof (q80_hw_config_t) >> 2),
1460                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1461                 device_printf(dev, "%s: failed\n", __func__);
1462                 return -1;
1463         }
1464         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1465
1466         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1467
1468         if (err) {
1469                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1470         } else {
1471                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1472                         hw_config_rsp->u.get_cam_search_mode.mode);
1473         }
1474
1475         return 0;
1476 }
1477
1478
1479
1480 static void
1481 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1482 {
1483         device_t dev = ha->pci_dev;
1484
1485         if (i < ha->hw.num_tx_rings) {
1486                 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1487                         __func__, i, xstat->total_bytes);
1488                 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1489                         __func__, i, xstat->total_pkts);
1490                 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1491                         __func__, i, xstat->errors);
1492                 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1493                         __func__, i, xstat->pkts_dropped);
1494                 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1495                         __func__, i, xstat->switch_pkts);
1496                 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1497                         __func__, i, xstat->num_buffers);
1498         } else {
1499                 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1500                         __func__, xstat->total_bytes);
1501                 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1502                         __func__, xstat->total_pkts);
1503                 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1504                         __func__, xstat->errors);
1505                 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1506                         __func__, xstat->pkts_dropped);
1507                 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1508                         __func__, xstat->switch_pkts);
1509                 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1510                         __func__, xstat->num_buffers);
1511         }
1512 }
1513
1514 static void
1515 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1516 {
1517         device_t dev = ha->pci_dev;
1518
1519         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1520                 rstat->total_bytes);
1521         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1522                 rstat->total_pkts);
1523         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1524                 rstat->lro_pkt_count);
1525         device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1526                 rstat->sw_pkt_count);
1527         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1528                 rstat->ip_chksum_err);
1529         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1530                 rstat->pkts_wo_acntxts);
1531         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1532                 __func__, rstat->pkts_dropped_no_sds_card);
1533         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1534                 __func__, rstat->pkts_dropped_no_sds_host);
1535         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1536                 rstat->oversized_pkts);
1537         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1538                 __func__, rstat->pkts_dropped_no_rds);
1539         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1540                 __func__, rstat->unxpctd_mcast_pkts);
1541         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1542                 rstat->re1_fbq_error);
1543         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1544                 rstat->invalid_mac_addr);
1545         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1546                 rstat->rds_prime_trys);
1547         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1548                 rstat->rds_prime_success);
1549         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1550                 rstat->lro_flows_added);
1551         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1552                 rstat->lro_flows_deleted);
1553         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1554                 rstat->lro_flows_active);
1555         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1556                 __func__, rstat->pkts_droped_unknown);
1557         device_printf(dev, "%s: pkts_cnt_oversized\t\t%" PRIu64 "\n",
1558                 __func__, rstat->pkts_cnt_oversized);
1559 }
1560
1561 static void
1562 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1563 {
1564         device_t dev = ha->pci_dev;
1565
1566         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1567                 mstat->xmt_frames);
1568         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1569                 mstat->xmt_bytes);
1570         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1571                 mstat->xmt_mcast_pkts);
1572         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1573                 mstat->xmt_bcast_pkts);
1574         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1575                 mstat->xmt_pause_frames);
1576         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1577                 mstat->xmt_cntrl_pkts);
1578         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1579                 __func__, mstat->xmt_pkt_lt_64bytes);
1580         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1581                 __func__, mstat->xmt_pkt_lt_127bytes);
1582         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1583                 __func__, mstat->xmt_pkt_lt_255bytes);
1584         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1585                 __func__, mstat->xmt_pkt_lt_511bytes);
1586         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1587                 __func__, mstat->xmt_pkt_lt_1023bytes);
1588         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1589                 __func__, mstat->xmt_pkt_lt_1518bytes);
1590         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1591                 __func__, mstat->xmt_pkt_gt_1518bytes);
1592
1593         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1594                 mstat->rcv_frames);
1595         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1596                 mstat->rcv_bytes);
1597         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1598                 mstat->rcv_mcast_pkts);
1599         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1600                 mstat->rcv_bcast_pkts);
1601         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1602                 mstat->rcv_pause_frames);
1603         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1604                 mstat->rcv_cntrl_pkts);
1605         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1606                 __func__, mstat->rcv_pkt_lt_64bytes);
1607         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1608                 __func__, mstat->rcv_pkt_lt_127bytes);
1609         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1610                 __func__, mstat->rcv_pkt_lt_255bytes);
1611         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1612                 __func__, mstat->rcv_pkt_lt_511bytes);
1613         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1614                 __func__, mstat->rcv_pkt_lt_1023bytes);
1615         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1616                 __func__, mstat->rcv_pkt_lt_1518bytes);
1617         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1618                 __func__, mstat->rcv_pkt_gt_1518bytes);
1619
1620         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1621                 mstat->rcv_len_error);
1622         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1623                 mstat->rcv_len_small);
1624         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1625                 mstat->rcv_len_large);
1626         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1627                 mstat->rcv_jabber);
1628         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1629                 mstat->rcv_dropped);
1630         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1631                 mstat->fcs_error);
1632         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1633                 mstat->align_error);
1634 }
1635
1636
1637 static int
1638 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1639 {
1640         device_t                dev;
1641         q80_get_stats_t         *stat;
1642         q80_get_stats_rsp_t     *stat_rsp;
1643         uint32_t                err;
1644
1645         dev = ha->pci_dev;
1646
1647         stat = (q80_get_stats_t *)ha->hw.mbox;
1648         bzero(stat, (sizeof (q80_get_stats_t)));
1649
1650         stat->opcode = Q8_MBX_GET_STATS;
1651         stat->count_version = 2;
1652         stat->count_version |= Q8_MBX_CMD_VERSION;
1653
1654         stat->cmd = cmd;
1655
1656         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1657                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1658                 device_printf(dev, "%s: failed\n", __func__);
1659                 return -1;
1660         }
1661
1662         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1663
1664         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1665
1666         if (err) {
1667                 return -1;
1668         }
1669
1670         return 0;
1671 }
1672
1673 void
1674 ql_get_stats(qla_host_t *ha)
1675 {
1676         q80_get_stats_rsp_t     *stat_rsp;
1677         q80_mac_stats_t         *mstat;
1678         q80_xmt_stats_t         *xstat;
1679         q80_rcv_stats_t         *rstat;
1680         uint32_t                cmd;
1681         int                     i;
1682
1683         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1684         /*
1685          * Get MAC Statistics
1686          */
1687         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1688 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1689
1690         cmd |= ((ha->pci_func & 0x1) << 16);
1691
1692         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1693                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1694                 qla_mac_stats(ha, mstat);
1695         } else {
1696                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1697                         __func__, ha->hw.mbox[0]);
1698         }
1699         /*
1700          * Get RCV Statistics
1701          */
1702         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1703 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1704         cmd |= (ha->hw.rcv_cntxt_id << 16);
1705
1706         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1707                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1708                 qla_rcv_stats(ha, rstat);
1709         } else {
1710                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1711                         __func__, ha->hw.mbox[0]);
1712         }
1713         /*
1714          * Get XMT Statistics
1715          */
1716         for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1717                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1718 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
1719                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1720
1721                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1722                         == 0) {
1723                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1724                         qla_xmt_stats(ha, xstat, i);
1725                 } else {
1726                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1727                                 __func__, ha->hw.mbox[0]);
1728                 }
1729         }
1730         return;
1731 }
1732
1733 static void
1734 qla_get_quick_stats(qla_host_t *ha)
1735 {
1736         q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1737         q80_mac_stats_t         *mstat;
1738         q80_xmt_stats_t         *xstat;
1739         q80_rcv_stats_t         *rstat;
1740         uint32_t                cmd;
1741
1742         stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1743
1744         cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1745 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1746
1747 //      cmd |= ((ha->pci_func & 0x3) << 16);
1748         cmd |= (0xFFFF << 16);
1749
1750         if (qla_get_hw_stats(ha, cmd,
1751                         sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1752
1753                 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1754                 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1755                 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1756                 qla_mac_stats(ha, mstat);
1757                 qla_rcv_stats(ha, rstat);
1758                 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1759         } else {
1760                 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1761                         __func__, ha->hw.mbox[0]);
1762         }
1763         return;
1764 }
1765
1766 /*
1767  * Name: qla_tx_tso
1768  * Function: Checks if the packet to be transmitted is a candidate for
1769  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1770  *      Ring Structure are plugged in.
1771  */
1772 static int
1773 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1774 {
1775         struct ether_vlan_header *eh;
1776         struct ip *ip = NULL;
1777         struct ip6_hdr *ip6 = NULL;
1778         struct tcphdr *th = NULL;
1779         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1780         uint16_t etype, opcode, offload = 1;
1781         device_t dev;
1782
1783         dev = ha->pci_dev;
1784
1785
1786         eh = mtod(mp, struct ether_vlan_header *);
1787
1788         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1789                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1790                 etype = ntohs(eh->evl_proto);
1791         } else {
1792                 ehdrlen = ETHER_HDR_LEN;
1793                 etype = ntohs(eh->evl_encap_proto);
1794         }
1795
1796         hdrlen = 0;
1797
1798         switch (etype) {
1799                 case ETHERTYPE_IP:
1800
1801                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1802                                         sizeof(struct tcphdr);
1803
1804                         if (mp->m_len < tcp_opt_off) {
1805                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1806                                 ip = (struct ip *)(hdr + ehdrlen);
1807                         } else {
1808                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1809                         }
1810
1811                         ip_hlen = ip->ip_hl << 2;
1812                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1813
1814                                 
1815                         if ((ip->ip_p != IPPROTO_TCP) ||
1816                                 (ip_hlen != sizeof (struct ip))){
1817                                 /* IP Options are not supported */
1818
1819                                 offload = 0;
1820                         } else
1821                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1822
1823                 break;
1824
1825                 case ETHERTYPE_IPV6:
1826
1827                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1828                                         sizeof (struct tcphdr);
1829
1830                         if (mp->m_len < tcp_opt_off) {
1831                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1832                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1833                         } else {
1834                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1835                         }
1836
1837                         ip_hlen = sizeof(struct ip6_hdr);
1838                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1839
1840                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1841                                 //device_printf(dev, "%s: ipv6\n", __func__);
1842                                 offload = 0;
1843                         } else
1844                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1845                 break;
1846
1847                 default:
1848                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1849                         offload = 0;
1850                 break;
1851         }
1852
1853         if (!offload)
1854                 return (-1);
1855
1856         tcp_hlen = th->th_off << 2;
1857         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1858
1859         if (mp->m_len < hdrlen) {
1860                 if (mp->m_len < tcp_opt_off) {
1861                         if (tcp_hlen > sizeof(struct tcphdr)) {
1862                                 m_copydata(mp, tcp_opt_off,
1863                                         (tcp_hlen - sizeof(struct tcphdr)),
1864                                         &hdr[tcp_opt_off]);
1865                         }
1866                 } else {
1867                         m_copydata(mp, 0, hdrlen, hdr);
1868                 }
1869         }
1870
1871         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1872
1873         tx_cmd->flags_opcode = opcode ;
1874         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1875         tx_cmd->total_hdr_len = hdrlen;
1876
1877         /* Check for Multicast least significant bit of MSB == 1 */
1878         if (eh->evl_dhost[0] & 0x01) {
1879                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1880         }
1881
1882         if (mp->m_len < hdrlen) {
1883                 printf("%d\n", hdrlen);
1884                 return (1);
1885         }
1886
1887         return (0);
1888 }
1889
1890 /*
1891  * Name: qla_tx_chksum
1892  * Function: Checks if the packet to be transmitted is a candidate for
1893  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1894  *      Ring Structure are plugged in.
1895  */
1896 static int
1897 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1898         uint32_t *tcp_hdr_off)
1899 {
1900         struct ether_vlan_header *eh;
1901         struct ip *ip;
1902         struct ip6_hdr *ip6;
1903         uint32_t ehdrlen, ip_hlen;
1904         uint16_t etype, opcode, offload = 1;
1905         device_t dev;
1906         uint8_t buf[sizeof(struct ip6_hdr)];
1907
1908         dev = ha->pci_dev;
1909
1910         *op_code = 0;
1911
1912         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1913                 return (-1);
1914
1915         eh = mtod(mp, struct ether_vlan_header *);
1916
1917         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1918                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1919                 etype = ntohs(eh->evl_proto);
1920         } else {
1921                 ehdrlen = ETHER_HDR_LEN;
1922                 etype = ntohs(eh->evl_encap_proto);
1923         }
1924
1925                 
1926         switch (etype) {
1927                 case ETHERTYPE_IP:
1928                         ip = (struct ip *)(mp->m_data + ehdrlen);
1929
1930                         ip_hlen = sizeof (struct ip);
1931
1932                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1933                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1934                                 ip = (struct ip *)buf;
1935                         }
1936
1937                         if (ip->ip_p == IPPROTO_TCP)
1938                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1939                         else if (ip->ip_p == IPPROTO_UDP)
1940                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1941                         else {
1942                                 //device_printf(dev, "%s: ipv4\n", __func__);
1943                                 offload = 0;
1944                         }
1945                 break;
1946
1947                 case ETHERTYPE_IPV6:
1948                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1949
1950                         ip_hlen = sizeof(struct ip6_hdr);
1951
1952                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1953                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1954                                         buf);
1955                                 ip6 = (struct ip6_hdr *)buf;
1956                         }
1957
1958                         if (ip6->ip6_nxt == IPPROTO_TCP)
1959                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1960                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1961                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1962                         else {
1963                                 //device_printf(dev, "%s: ipv6\n", __func__);
1964                                 offload = 0;
1965                         }
1966                 break;
1967
1968                 default:
1969                         offload = 0;
1970                 break;
1971         }
1972         if (!offload)
1973                 return (-1);
1974
1975         *op_code = opcode;
1976         *tcp_hdr_off = (ip_hlen + ehdrlen);
1977
1978         return (0);
1979 }
1980
1981 #define QLA_TX_MIN_FREE 2
1982 /*
1983  * Name: ql_hw_send
1984  * Function: Transmits a packet. It first checks if the packet is a
1985  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1986  *      offload. If either of these creteria are not met, it is transmitted
1987  *      as a regular ethernet frame.
1988  */
1989 int
1990 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1991         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1992 {
1993         struct ether_vlan_header *eh;
1994         qla_hw_t *hw = &ha->hw;
1995         q80_tx_cmd_t *tx_cmd, tso_cmd;
1996         bus_dma_segment_t *c_seg;
1997         uint32_t num_tx_cmds, hdr_len = 0;
1998         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1999         device_t dev;
2000         int i, ret;
2001         uint8_t *src = NULL, *dst = NULL;
2002         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2003         uint32_t op_code = 0;
2004         uint32_t tcp_hdr_off = 0;
2005
2006         dev = ha->pci_dev;
2007
2008         /*
2009          * Always make sure there is atleast one empty slot in the tx_ring
2010          * tx_ring is considered full when there only one entry available
2011          */
2012         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2013
2014         total_length = mp->m_pkthdr.len;
2015         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2016                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2017                         __func__, total_length);
2018                 return (-1);
2019         }
2020         eh = mtod(mp, struct ether_vlan_header *);
2021
2022         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2023
2024                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2025
2026                 src = frame_hdr;
2027                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2028
2029                 if (!(ret & ~1)) {
2030                         /* find the additional tx_cmd descriptors required */
2031
2032                         if (mp->m_flags & M_VLANTAG)
2033                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2034
2035                         hdr_len = tso_cmd.total_hdr_len;
2036
2037                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2038                         bytes = QL_MIN(bytes, hdr_len);
2039
2040                         num_tx_cmds++;
2041                         hdr_len -= bytes;
2042
2043                         while (hdr_len) {
2044                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2045                                 hdr_len -= bytes;
2046                                 num_tx_cmds++;
2047                         }
2048                         hdr_len = tso_cmd.total_hdr_len;
2049
2050                         if (ret == 0)
2051                                 src = (uint8_t *)eh;
2052                 } else 
2053                         return (EINVAL);
2054         } else {
2055                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2056         }
2057
2058         if (iscsi_pdu)
2059                 ha->hw.iscsi_pkt_count++;
2060
2061         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2062                 ql_hw_tx_done_locked(ha, txr_idx);
2063                 if (hw->tx_cntxt[txr_idx].txr_free <=
2064                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2065                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2066                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2067                                 __func__));
2068                         return (-1);
2069                 }
2070         }
2071
2072         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2073
2074         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2075
2076                 if (nsegs > ha->hw.max_tx_segs)
2077                         ha->hw.max_tx_segs = nsegs;
2078
2079                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2080
2081                 if (op_code) {
2082                         tx_cmd->flags_opcode = op_code;
2083                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2084
2085                 } else {
2086                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2087                 }
2088         } else {
2089                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2090                 ha->tx_tso_frames++;
2091         }
2092
2093         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2094                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2095
2096                 if (iscsi_pdu)
2097                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2098
2099         } else if (mp->m_flags & M_VLANTAG) {
2100
2101                 if (hdr_len) { /* TSO */
2102                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2103                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2104                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2105                 } else
2106                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2107
2108                 ha->hw_vlan_tx_frames++;
2109                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2110
2111                 if (iscsi_pdu) {
2112                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2113                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2114                 }
2115         }
2116
2117
2118         tx_cmd->n_bufs = (uint8_t)nsegs;
2119         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2120         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2121         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2122
2123         c_seg = segs;
2124
2125         while (1) {
2126                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2127
2128                         switch (i) {
2129                         case 0:
2130                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2131                                 tx_cmd->buf1_len = c_seg->ds_len;
2132                                 break;
2133
2134                         case 1:
2135                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2136                                 tx_cmd->buf2_len = c_seg->ds_len;
2137                                 break;
2138
2139                         case 2:
2140                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2141                                 tx_cmd->buf3_len = c_seg->ds_len;
2142                                 break;
2143
2144                         case 3:
2145                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2146                                 tx_cmd->buf4_len = c_seg->ds_len;
2147                                 break;
2148                         }
2149
2150                         c_seg++;
2151                         nsegs--;
2152                 }
2153
2154                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2155                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2156                                 (NUM_TX_DESCRIPTORS - 1);
2157                 tx_cmd_count++;
2158
2159                 if (!nsegs)
2160                         break;
2161                 
2162                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2163                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2164         }
2165
2166         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2167
2168                 /* TSO : Copy the header in the following tx cmd descriptors */
2169
2170                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2171
2172                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2173                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2174
2175                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2176                 bytes = QL_MIN(bytes, hdr_len);
2177
2178                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2179
2180                 if (mp->m_flags & M_VLANTAG) {
2181                         /* first copy the src/dst MAC addresses */
2182                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2183                         dst += (ETHER_ADDR_LEN * 2);
2184                         src += (ETHER_ADDR_LEN * 2);
2185                         
2186                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2187                         dst += 2;
2188                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2189                         dst += 2;
2190
2191                         /* bytes left in src header */
2192                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2193                                         ETHER_VLAN_ENCAP_LEN);
2194
2195                         /* bytes left in TxCmd Entry */
2196                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2197
2198
2199                         bcopy(src, dst, bytes);
2200                         src += bytes;
2201                         hdr_len -= bytes;
2202                 } else {
2203                         bcopy(src, dst, bytes);
2204                         src += bytes;
2205                         hdr_len -= bytes;
2206                 }
2207
2208                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2209                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2210                                         (NUM_TX_DESCRIPTORS - 1);
2211                 tx_cmd_count++;
2212                 
2213                 while (hdr_len) {
2214                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2215                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2216
2217                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2218
2219                         bcopy(src, tx_cmd, bytes);
2220                         src += bytes;
2221                         hdr_len -= bytes;
2222
2223                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2224                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2225                                         (NUM_TX_DESCRIPTORS - 1);
2226                         tx_cmd_count++;
2227                 }
2228         }
2229
2230         hw->tx_cntxt[txr_idx].txr_free =
2231                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2232
2233         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2234                 txr_idx);
2235         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2236
2237         return (0);
2238 }
2239
2240
2241
2242 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2243 static int
2244 qla_config_rss_ind_table(qla_host_t *ha)
2245 {
2246         uint32_t i, count;
2247         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2248
2249
2250         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2251                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2252         }
2253
2254         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2255                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2256
2257                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2258                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2259                 } else {
2260                         count = Q8_CONFIG_IND_TBL_SIZE;
2261                 }
2262
2263                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2264                         rss_ind_tbl))
2265                         return (-1);
2266         }
2267
2268         return (0);
2269 }
2270
2271 static int
2272 qla_config_soft_lro(qla_host_t *ha)
2273 {
2274         int i;
2275         qla_hw_t *hw = &ha->hw;
2276         struct lro_ctrl *lro;
2277
2278         for (i = 0; i < hw->num_sds_rings; i++) {
2279                 lro = &hw->sds[i].lro;
2280
2281                 bzero(lro, sizeof(struct lro_ctrl));
2282
2283 #if (__FreeBSD_version >= 1100101)
2284                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2285                         device_printf(ha->pci_dev,
2286                                 "%s: tcp_lro_init_args [%d] failed\n",
2287                                 __func__, i);
2288                         return (-1);
2289                 }
2290 #else
2291                 if (tcp_lro_init(lro)) {
2292                         device_printf(ha->pci_dev,
2293                                 "%s: tcp_lro_init [%d] failed\n",
2294                                 __func__, i);
2295                         return (-1);
2296                 }
2297 #endif /* #if (__FreeBSD_version >= 1100101) */
2298
2299                 lro->ifp = ha->ifp;
2300         }
2301
2302         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2303         return (0);
2304 }
2305
2306 static void
2307 qla_drain_soft_lro(qla_host_t *ha)
2308 {
2309         int i;
2310         qla_hw_t *hw = &ha->hw;
2311         struct lro_ctrl *lro;
2312
2313         for (i = 0; i < hw->num_sds_rings; i++) {
2314                 lro = &hw->sds[i].lro;
2315
2316 #if (__FreeBSD_version >= 1100101)
2317                 tcp_lro_flush_all(lro);
2318 #else
2319                 struct lro_entry *queued;
2320
2321                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2322                         queued = SLIST_FIRST(&lro->lro_active);
2323                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2324                         tcp_lro_flush(lro, queued);
2325                 }
2326 #endif /* #if (__FreeBSD_version >= 1100101) */
2327         }
2328
2329         return;
2330 }
2331
2332 static void
2333 qla_free_soft_lro(qla_host_t *ha)
2334 {
2335         int i;
2336         qla_hw_t *hw = &ha->hw;
2337         struct lro_ctrl *lro;
2338
2339         for (i = 0; i < hw->num_sds_rings; i++) {
2340                 lro = &hw->sds[i].lro;
2341                 tcp_lro_free(lro);
2342         }
2343
2344         return;
2345 }
2346
2347
2348 /*
2349  * Name: ql_del_hw_if
2350  * Function: Destroys the hardware specific entities corresponding to an
2351  *      Ethernet Interface
2352  */
2353 void
2354 ql_del_hw_if(qla_host_t *ha)
2355 {
2356         uint32_t i;
2357         uint32_t num_msix;
2358
2359         (void)qla_stop_nic_func(ha);
2360
2361         qla_del_rcv_cntxt(ha);
2362
2363         qla_del_xmt_cntxt(ha);
2364
2365         if (ha->hw.flags.init_intr_cnxt) {
2366                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2367
2368                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2369                                 num_msix = Q8_MAX_INTR_VECTORS;
2370                         else
2371                                 num_msix = ha->hw.num_sds_rings - i;
2372                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2373
2374                         i += num_msix;
2375                 }
2376
2377                 ha->hw.flags.init_intr_cnxt = 0;
2378         }
2379
2380         if (ha->hw.enable_soft_lro) {
2381                 qla_drain_soft_lro(ha);
2382                 qla_free_soft_lro(ha);
2383         }
2384
2385         return;
2386 }
2387
2388 void
2389 qla_confirm_9kb_enable(qla_host_t *ha)
2390 {
2391         uint32_t supports_9kb = 0;
2392
2393         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2394
2395         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2396         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2397         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2398
2399         qla_get_nic_partition(ha, &supports_9kb, NULL);
2400
2401         if (!supports_9kb)
2402                 ha->hw.enable_9kb = 0;
2403
2404         return;
2405 }
2406
2407 /*
2408  * Name: ql_init_hw_if
2409  * Function: Creates the hardware specific entities corresponding to an
2410  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2411  *      corresponding to the interface. Enables LRO if allowed.
2412  */
2413 int
2414 ql_init_hw_if(qla_host_t *ha)
2415 {
2416         device_t        dev;
2417         uint32_t        i;
2418         uint8_t         bcast_mac[6];
2419         qla_rdesc_t     *rdesc;
2420         uint32_t        num_msix;
2421
2422         dev = ha->pci_dev;
2423
2424         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2425                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2426                         ha->hw.dma_buf.sds_ring[i].size);
2427         }
2428
2429         for (i = 0; i < ha->hw.num_sds_rings; ) {
2430
2431                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2432                         num_msix = Q8_MAX_INTR_VECTORS;
2433                 else
2434                         num_msix = ha->hw.num_sds_rings - i;
2435
2436                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2437
2438                         if (i > 0) {
2439
2440                                 num_msix = i;
2441
2442                                 for (i = 0; i < num_msix; ) {
2443                                         qla_config_intr_cntxt(ha, i,
2444                                                 Q8_MAX_INTR_VECTORS, 0);
2445                                         i += Q8_MAX_INTR_VECTORS;
2446                                 }
2447                         }
2448                         return (-1);
2449                 }
2450
2451                 i = i + num_msix;
2452         }
2453
2454         ha->hw.flags.init_intr_cnxt = 1;
2455
2456         /*
2457          * Create Receive Context
2458          */
2459         if (qla_init_rcv_cntxt(ha)) {
2460                 return (-1);
2461         }
2462
2463         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2464                 rdesc = &ha->hw.rds[i];
2465                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2466                 rdesc->rx_in = 0;
2467                 /* Update the RDS Producer Indices */
2468                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2469                         rdesc->rx_next);
2470         }
2471
2472
2473         /*
2474          * Create Transmit Context
2475          */
2476         if (qla_init_xmt_cntxt(ha)) {
2477                 qla_del_rcv_cntxt(ha);
2478                 return (-1);
2479         }
2480         ha->hw.max_tx_segs = 0;
2481
2482         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2483                 return(-1);
2484
2485         ha->hw.flags.unicast_mac = 1;
2486
2487         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2488         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2489
2490         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2491                 return (-1);
2492
2493         ha->hw.flags.bcast_mac = 1;
2494
2495         /*
2496          * program any cached multicast addresses
2497          */
2498         if (qla_hw_add_all_mcast(ha))
2499                 return (-1);
2500
2501         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2502                 return (-1);
2503
2504         if (qla_config_rss_ind_table(ha))
2505                 return (-1);
2506
2507         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2508                 return (-1);
2509
2510         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2511                 return (-1);
2512
2513         if (ha->ifp->if_capenable & IFCAP_LRO) {
2514                 if (ha->hw.enable_hw_lro) {
2515                         ha->hw.enable_soft_lro = 0;
2516
2517                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2518                                 return (-1);
2519                 } else {
2520                         ha->hw.enable_soft_lro = 1;
2521
2522                         if (qla_config_soft_lro(ha))
2523                                 return (-1);
2524                 }
2525         }
2526
2527         if (qla_init_nic_func(ha))
2528                 return (-1);
2529
2530         if (qla_query_fw_dcbx_caps(ha))
2531                 return (-1);
2532
2533         for (i = 0; i < ha->hw.num_sds_rings; i++)
2534                 QL_ENABLE_INTERRUPTS(ha, i);
2535
2536         return (0);
2537 }
2538
2539 static int
2540 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2541 {
2542         device_t                dev = ha->pci_dev;
2543         q80_rq_map_sds_to_rds_t *map_rings;
2544         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2545         uint32_t                i, err;
2546         qla_hw_t                *hw = &ha->hw;
2547
2548         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2549         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2550
2551         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2552         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2553         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2554
2555         map_rings->cntxt_id = hw->rcv_cntxt_id;
2556         map_rings->num_rings = num_idx;
2557
2558         for (i = 0; i < num_idx; i++) {
2559                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2560                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2561         }
2562
2563         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2564                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2565                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2566                 device_printf(dev, "%s: failed0\n", __func__);
2567                 return (-1);
2568         }
2569
2570         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2571
2572         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2573
2574         if (err) {
2575                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2576                 return (-1);
2577         }
2578
2579         return (0);
2580 }
2581
2582 /*
2583  * Name: qla_init_rcv_cntxt
2584  * Function: Creates the Receive Context.
2585  */
2586 static int
2587 qla_init_rcv_cntxt(qla_host_t *ha)
2588 {
2589         q80_rq_rcv_cntxt_t      *rcntxt;
2590         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2591         q80_stat_desc_t         *sdesc;
2592         int                     i, j;
2593         qla_hw_t                *hw = &ha->hw;
2594         device_t                dev;
2595         uint32_t                err;
2596         uint32_t                rcntxt_sds_rings;
2597         uint32_t                rcntxt_rds_rings;
2598         uint32_t                max_idx;
2599
2600         dev = ha->pci_dev;
2601
2602         /*
2603          * Create Receive Context
2604          */
2605
2606         for (i = 0; i < hw->num_sds_rings; i++) {
2607                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2608
2609                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2610                         sdesc->data[0] = 1ULL;
2611                         sdesc->data[1] = 1ULL;
2612                 }
2613         }
2614
2615         rcntxt_sds_rings = hw->num_sds_rings;
2616         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2617                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2618
2619         rcntxt_rds_rings = hw->num_rds_rings;
2620
2621         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2622                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2623
2624         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2625         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2626
2627         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2628         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2629         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2630
2631         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2632                         Q8_RCV_CNTXT_CAP0_LRO |
2633                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2634                         Q8_RCV_CNTXT_CAP0_RSS |
2635                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2636
2637         if (ha->hw.enable_9kb)
2638                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2639         else
2640                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2641
2642         if (ha->hw.num_rds_rings > 1) {
2643                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2644                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2645         } else
2646                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2647
2648         rcntxt->nsds_rings = rcntxt_sds_rings;
2649
2650         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2651
2652         rcntxt->rcv_vpid = 0;
2653
2654         for (i = 0; i <  rcntxt_sds_rings; i++) {
2655                 rcntxt->sds[i].paddr =
2656                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2657                 rcntxt->sds[i].size =
2658                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2659                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2660                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2661         }
2662
2663         for (i = 0; i <  rcntxt_rds_rings; i++) {
2664                 rcntxt->rds[i].paddr_std =
2665                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2666
2667                 if (ha->hw.enable_9kb)
2668                         rcntxt->rds[i].std_bsize =
2669                                 qla_host_to_le64(MJUM9BYTES);
2670                 else
2671                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2672
2673                 rcntxt->rds[i].std_nentries =
2674                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2675         }
2676
2677         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2678                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2679                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2680                 device_printf(dev, "%s: failed0\n", __func__);
2681                 return (-1);
2682         }
2683
2684         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2685
2686         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2687
2688         if (err) {
2689                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2690                 return (-1);
2691         }
2692
2693         for (i = 0; i <  rcntxt_sds_rings; i++) {
2694                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2695         }
2696
2697         for (i = 0; i <  rcntxt_rds_rings; i++) {
2698                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2699         }
2700
2701         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2702
2703         ha->hw.flags.init_rx_cnxt = 1;
2704
2705         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2706
2707                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2708
2709                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2710                                 max_idx = MAX_RCNTXT_SDS_RINGS;
2711                         else
2712                                 max_idx = hw->num_sds_rings - i;
2713
2714                         err = qla_add_rcv_rings(ha, i, max_idx);
2715                         if (err)
2716                                 return -1;
2717
2718                         i += max_idx;
2719                 }
2720         }
2721
2722         if (hw->num_rds_rings > 1) {
2723
2724                 for (i = 0; i < hw->num_rds_rings; ) {
2725
2726                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2727                                 max_idx = MAX_SDS_TO_RDS_MAP;
2728                         else
2729                                 max_idx = hw->num_rds_rings - i;
2730
2731                         err = qla_map_sds_to_rds(ha, i, max_idx);
2732                         if (err)
2733                                 return -1;
2734
2735                         i += max_idx;
2736                 }
2737         }
2738
2739         return (0);
2740 }
2741
2742 static int
2743 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2744 {
2745         device_t                dev = ha->pci_dev;
2746         q80_rq_add_rcv_rings_t  *add_rcv;
2747         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2748         uint32_t                i,j, err;
2749         qla_hw_t                *hw = &ha->hw;
2750
2751         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2752         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2753
2754         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2755         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2756         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2757
2758         add_rcv->nrds_sets_rings = nsds | (1 << 5);
2759         add_rcv->nsds_rings = nsds;
2760         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2761
2762         for (i = 0; i <  nsds; i++) {
2763
2764                 j = i + sds_idx;
2765
2766                 add_rcv->sds[i].paddr =
2767                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2768
2769                 add_rcv->sds[i].size =
2770                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2771
2772                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
2773                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2774
2775         }
2776
2777         for (i = 0; (i <  nsds); i++) {
2778                 j = i + sds_idx;
2779
2780                 add_rcv->rds[i].paddr_std =
2781                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2782
2783                 if (ha->hw.enable_9kb)
2784                         add_rcv->rds[i].std_bsize =
2785                                 qla_host_to_le64(MJUM9BYTES);
2786                 else
2787                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2788
2789                 add_rcv->rds[i].std_nentries =
2790                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2791         }
2792
2793
2794         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2795                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2796                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2797                 device_printf(dev, "%s: failed0\n", __func__);
2798                 return (-1);
2799         }
2800
2801         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2802
2803         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2804
2805         if (err) {
2806                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2807                 return (-1);
2808         }
2809
2810         for (i = 0; i < nsds; i++) {
2811                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2812         }
2813
2814         for (i = 0; i < nsds; i++) {
2815                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2816         }
2817
2818         return (0);
2819 }
2820
2821 /*
2822  * Name: qla_del_rcv_cntxt
2823  * Function: Destroys the Receive Context.
2824  */
2825 static void
2826 qla_del_rcv_cntxt(qla_host_t *ha)
2827 {
2828         device_t                        dev = ha->pci_dev;
2829         q80_rcv_cntxt_destroy_t         *rcntxt;
2830         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2831         uint32_t                        err;
2832         uint8_t                         bcast_mac[6];
2833
2834         if (!ha->hw.flags.init_rx_cnxt)
2835                 return;
2836
2837         if (qla_hw_del_all_mcast(ha))
2838                 return;
2839
2840         if (ha->hw.flags.bcast_mac) {
2841
2842                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2843                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2844
2845                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
2846                         return;
2847                 ha->hw.flags.bcast_mac = 0;
2848
2849         }
2850
2851         if (ha->hw.flags.unicast_mac) {
2852                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
2853                         return;
2854                 ha->hw.flags.unicast_mac = 0;
2855         }
2856
2857         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2858         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2859
2860         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2861         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2862         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2863
2864         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2865
2866         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2867                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2868                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2869                 device_printf(dev, "%s: failed0\n", __func__);
2870                 return;
2871         }
2872         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2873
2874         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2875
2876         if (err) {
2877                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2878         }
2879
2880         ha->hw.flags.init_rx_cnxt = 0;
2881         return;
2882 }
2883
2884 /*
2885  * Name: qla_init_xmt_cntxt
2886  * Function: Creates the Transmit Context.
2887  */
2888 static int
2889 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2890 {
2891         device_t                dev;
2892         qla_hw_t                *hw = &ha->hw;
2893         q80_rq_tx_cntxt_t       *tcntxt;
2894         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2895         uint32_t                err;
2896         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2897         uint32_t                intr_idx;
2898
2899         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2900
2901         dev = ha->pci_dev;
2902
2903         /*
2904          * Create Transmit Context
2905          */
2906         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2907         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2908
2909         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2910         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2911         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2912
2913         intr_idx = txr_idx;
2914
2915 #ifdef QL_ENABLE_ISCSI_TLV
2916
2917         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2918                                 Q8_TX_CNTXT_CAP0_TC;
2919
2920         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2921                 tcntxt->traffic_class = 1;
2922         }
2923
2924         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
2925
2926 #else
2927         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2928
2929 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2930
2931         tcntxt->ntx_rings = 1;
2932
2933         tcntxt->tx_ring[0].paddr =
2934                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2935         tcntxt->tx_ring[0].tx_consumer =
2936                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2937         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2938
2939         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
2940         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2941
2942         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2943         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2944
2945         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2946                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2947                 ha->hw.mbox,
2948                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2949                 device_printf(dev, "%s: failed0\n", __func__);
2950                 return (-1);
2951         }
2952         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2953
2954         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2955
2956         if (err) {
2957                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2958                 return -1;
2959         }
2960
2961         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2962         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2963
2964         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2965                 return (-1);
2966
2967         return (0);
2968 }
2969
2970
2971 /*
2972  * Name: qla_del_xmt_cntxt
2973  * Function: Destroys the Transmit Context.
2974  */
2975 static int
2976 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2977 {
2978         device_t                        dev = ha->pci_dev;
2979         q80_tx_cntxt_destroy_t          *tcntxt;
2980         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2981         uint32_t                        err;
2982
2983         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2984         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2985
2986         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2987         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2988         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2989
2990         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2991
2992         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2993                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2994                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2995                 device_printf(dev, "%s: failed0\n", __func__);
2996                 return (-1);
2997         }
2998         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2999
3000         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3001
3002         if (err) {
3003                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3004                 return (-1);
3005         }
3006
3007         return (0);
3008 }
3009 static void
3010 qla_del_xmt_cntxt(qla_host_t *ha)
3011 {
3012         uint32_t i;
3013
3014         if (!ha->hw.flags.init_tx_cnxt)
3015                 return;
3016
3017         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3018                 if (qla_del_xmt_cntxt_i(ha, i))
3019                         break;
3020         }
3021         ha->hw.flags.init_tx_cnxt = 0;
3022 }
3023
3024 static int
3025 qla_init_xmt_cntxt(qla_host_t *ha)
3026 {
3027         uint32_t i, j;
3028
3029         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3030                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3031                         for (j = 0; j < i; j++)
3032                                 qla_del_xmt_cntxt_i(ha, j);
3033                         return (-1);
3034                 }
3035         }
3036         ha->hw.flags.init_tx_cnxt = 1;
3037         return (0);
3038 }
3039
3040 static int
3041 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3042 {
3043         int i, nmcast;
3044         uint32_t count = 0;
3045         uint8_t *mcast;
3046
3047         nmcast = ha->hw.nmcast;
3048
3049         QL_DPRINT2(ha, (ha->pci_dev,
3050                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3051
3052         mcast = ha->hw.mac_addr_arr;
3053         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3054
3055         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3056                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3057                         (ha->hw.mcast[i].addr[1] != 0) ||
3058                         (ha->hw.mcast[i].addr[2] != 0) ||
3059                         (ha->hw.mcast[i].addr[3] != 0) ||
3060                         (ha->hw.mcast[i].addr[4] != 0) ||
3061                         (ha->hw.mcast[i].addr[5] != 0)) {
3062
3063                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3064                         mcast = mcast + ETHER_ADDR_LEN;
3065                         count++;
3066                         
3067                         if (count == Q8_MAX_MAC_ADDRS) {
3068                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3069                                         add_mcast, count)) {
3070                                         device_printf(ha->pci_dev,
3071                                                 "%s: failed\n", __func__);
3072                                         return (-1);
3073                                 }
3074
3075                                 count = 0;
3076                                 mcast = ha->hw.mac_addr_arr;
3077                                 memset(mcast, 0,
3078                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3079                         }
3080
3081                         nmcast--;
3082                 }
3083         }
3084
3085         if (count) {
3086                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3087                         count)) {
3088                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3089                         return (-1);
3090                 }
3091         }
3092         QL_DPRINT2(ha, (ha->pci_dev,
3093                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3094
3095         return 0;
3096 }
3097
3098 static int
3099 qla_hw_add_all_mcast(qla_host_t *ha)
3100 {
3101         int ret;
3102
3103         ret = qla_hw_all_mcast(ha, 1);
3104
3105         return (ret);
3106 }
3107
3108 static int
3109 qla_hw_del_all_mcast(qla_host_t *ha)
3110 {
3111         int ret;
3112
3113         ret = qla_hw_all_mcast(ha, 0);
3114
3115         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3116         ha->hw.nmcast = 0;
3117
3118         return (ret);
3119 }
3120
3121 static int
3122 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3123 {
3124         int i;
3125
3126         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3127                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3128                         return (0); /* its been already added */
3129         }
3130         return (-1);
3131 }
3132
3133 static int
3134 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3135 {
3136         int i;
3137
3138         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3139
3140                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3141                         (ha->hw.mcast[i].addr[1] == 0) &&
3142                         (ha->hw.mcast[i].addr[2] == 0) &&
3143                         (ha->hw.mcast[i].addr[3] == 0) &&
3144                         (ha->hw.mcast[i].addr[4] == 0) &&
3145                         (ha->hw.mcast[i].addr[5] == 0)) {
3146
3147                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3148                         ha->hw.nmcast++;        
3149
3150                         mta = mta + ETHER_ADDR_LEN;
3151                         nmcast--;
3152
3153                         if (nmcast == 0)
3154                                 break;
3155                 }
3156
3157         }
3158         return 0;
3159 }
3160
3161 static int
3162 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3163 {
3164         int i;
3165
3166         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3167                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3168
3169                         ha->hw.mcast[i].addr[0] = 0;
3170                         ha->hw.mcast[i].addr[1] = 0;
3171                         ha->hw.mcast[i].addr[2] = 0;
3172                         ha->hw.mcast[i].addr[3] = 0;
3173                         ha->hw.mcast[i].addr[4] = 0;
3174                         ha->hw.mcast[i].addr[5] = 0;
3175
3176                         ha->hw.nmcast--;        
3177
3178                         mta = mta + ETHER_ADDR_LEN;
3179                         nmcast--;
3180
3181                         if (nmcast == 0)
3182                                 break;
3183                 }
3184         }
3185         return 0;
3186 }
3187
3188 /*
3189  * Name: ql_hw_set_multi
3190  * Function: Sets the Multicast Addresses provided by the host O.S into the
3191  *      hardware (for the given interface)
3192  */
3193 int
3194 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3195         uint32_t add_mac)
3196 {
3197         uint8_t *mta = mcast_addr;
3198         int i;
3199         int ret = 0;
3200         uint32_t count = 0;
3201         uint8_t *mcast;
3202
3203         mcast = ha->hw.mac_addr_arr;
3204         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3205
3206         for (i = 0; i < mcnt; i++) {
3207                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3208                         if (add_mac) {
3209                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3210                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3211                                         mcast = mcast + ETHER_ADDR_LEN;
3212                                         count++;
3213                                 }
3214                         } else {
3215                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3216                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3217                                         mcast = mcast + ETHER_ADDR_LEN;
3218                                         count++;
3219                                 }
3220                         }
3221                 }
3222                 if (count == Q8_MAX_MAC_ADDRS) {
3223                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3224                                 add_mac, count)) {
3225                                 device_printf(ha->pci_dev, "%s: failed\n",
3226                                         __func__);
3227                                 return (-1);
3228                         }
3229
3230                         if (add_mac) {
3231                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3232                                         count);
3233                         } else {
3234                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3235                                         count);
3236                         }
3237
3238                         count = 0;
3239                         mcast = ha->hw.mac_addr_arr;
3240                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3241                 }
3242                         
3243                 mta += Q8_MAC_ADDR_LEN;
3244         }
3245
3246         if (count) {
3247                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3248                         count)) {
3249                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3250                         return (-1);
3251                 }
3252                 if (add_mac) {
3253                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3254                 } else {
3255                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3256                 }
3257         }
3258
3259         return (ret);
3260 }
3261
3262 /*
3263  * Name: ql_hw_tx_done_locked
3264  * Function: Handle Transmit Completions
3265  */
3266 void
3267 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3268 {
3269         qla_tx_buf_t *txb;
3270         qla_hw_t *hw = &ha->hw;
3271         uint32_t comp_idx, comp_count = 0;
3272         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3273
3274         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3275
3276         /* retrieve index of last entry in tx ring completed */
3277         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3278
3279         while (comp_idx != hw_tx_cntxt->txr_comp) {
3280
3281                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3282
3283                 hw_tx_cntxt->txr_comp++;
3284                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3285                         hw_tx_cntxt->txr_comp = 0;
3286
3287                 comp_count++;
3288
3289                 if (txb->m_head) {
3290                         ha->ifp->if_opackets++;
3291
3292                         bus_dmamap_sync(ha->tx_tag, txb->map,
3293                                 BUS_DMASYNC_POSTWRITE);
3294                         bus_dmamap_unload(ha->tx_tag, txb->map);
3295                         m_freem(txb->m_head);
3296
3297                         txb->m_head = NULL;
3298                 }
3299         }
3300
3301         hw_tx_cntxt->txr_free += comp_count;
3302         return;
3303 }
3304
3305 void
3306 ql_update_link_state(qla_host_t *ha)
3307 {
3308         uint32_t link_state;
3309         uint32_t prev_link_state;
3310
3311         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3312                 ha->hw.link_up = 0;
3313                 return;
3314         }
3315         link_state = READ_REG32(ha, Q8_LINK_STATE);
3316
3317         prev_link_state =  ha->hw.link_up;
3318
3319         if (ha->pci_func == 0) 
3320                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3321         else
3322                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3323
3324         if (prev_link_state !=  ha->hw.link_up) {
3325                 if (ha->hw.link_up) {
3326                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3327                 } else {
3328                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3329                 }
3330         }
3331         return;
3332 }
3333
3334 void
3335 ql_hw_stop_rcv(qla_host_t *ha)
3336 {
3337         int i, done, count = 100;
3338
3339         ha->flags.stop_rcv = 1;
3340
3341         while (count) {
3342                 done = 1;
3343                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3344                         if (ha->hw.sds[i].rcv_active)
3345                                 done = 0;
3346                 }
3347                 if (done)
3348                         break;
3349                 else 
3350                         qla_mdelay(__func__, 10);
3351                 count--;
3352         }
3353         if (!count)
3354                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3355
3356         return;
3357 }
3358
3359 int
3360 ql_hw_check_health(qla_host_t *ha)
3361 {
3362         uint32_t val;
3363
3364         ha->hw.health_count++;
3365
3366         if (ha->hw.health_count < 1000)
3367                 return 0;
3368
3369         ha->hw.health_count = 0;
3370
3371         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3372
3373         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3374                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3375                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3376                         __func__, val);
3377                 return -1;
3378         }
3379
3380         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3381
3382         if ((val != ha->hw.hbeat_value) &&
3383                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3384                 ha->hw.hbeat_value = val;
3385                 return 0;
3386         }
3387         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3388                 __func__, val);
3389
3390         return -1;
3391 }
3392
3393 static int
3394 qla_init_nic_func(qla_host_t *ha)
3395 {
3396         device_t                dev;
3397         q80_init_nic_func_t     *init_nic;
3398         q80_init_nic_func_rsp_t *init_nic_rsp;
3399         uint32_t                err;
3400
3401         dev = ha->pci_dev;
3402
3403         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3404         bzero(init_nic, sizeof(q80_init_nic_func_t));
3405
3406         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3407         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3408         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3409
3410         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3411         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3412         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3413
3414 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3415         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3416                 (sizeof (q80_init_nic_func_t) >> 2),
3417                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3418                 device_printf(dev, "%s: failed\n", __func__);
3419                 return -1;
3420         }
3421
3422         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3423 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3424
3425         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3426
3427         if (err) {
3428                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3429         }
3430
3431         return 0;
3432 }
3433
3434 static int
3435 qla_stop_nic_func(qla_host_t *ha)
3436 {
3437         device_t                dev;
3438         q80_stop_nic_func_t     *stop_nic;
3439         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3440         uint32_t                err;
3441
3442         dev = ha->pci_dev;
3443
3444         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3445         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3446
3447         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3448         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3449         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3450
3451         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3452         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3453
3454 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3455         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3456                 (sizeof (q80_stop_nic_func_t) >> 2),
3457                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3458                 device_printf(dev, "%s: failed\n", __func__);
3459                 return -1;
3460         }
3461
3462         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3463 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3464
3465         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3466
3467         if (err) {
3468                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3469         }
3470
3471         return 0;
3472 }
3473
3474 static int
3475 qla_query_fw_dcbx_caps(qla_host_t *ha)
3476 {
3477         device_t                        dev;
3478         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3479         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3480         uint32_t                        err;
3481
3482         dev = ha->pci_dev;
3483
3484         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3485         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3486
3487         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3488         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3489         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3490
3491         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3492         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3493                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3494                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3495                 device_printf(dev, "%s: failed\n", __func__);
3496                 return -1;
3497         }
3498
3499         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3500         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3501                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3502
3503         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3504
3505         if (err) {
3506                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3507         }
3508
3509         return 0;
3510 }
3511
3512 static int
3513 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3514         uint32_t aen_mb3, uint32_t aen_mb4)
3515 {
3516         device_t                dev;
3517         q80_idc_ack_t           *idc_ack;
3518         q80_idc_ack_rsp_t       *idc_ack_rsp;
3519         uint32_t                err;
3520         int                     count = 300;
3521
3522         dev = ha->pci_dev;
3523
3524         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3525         bzero(idc_ack, sizeof(q80_idc_ack_t));
3526
3527         idc_ack->opcode = Q8_MBX_IDC_ACK;
3528         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3529         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3530
3531         idc_ack->aen_mb1 = aen_mb1;
3532         idc_ack->aen_mb2 = aen_mb2;
3533         idc_ack->aen_mb3 = aen_mb3;
3534         idc_ack->aen_mb4 = aen_mb4;
3535
3536         ha->hw.imd_compl= 0;
3537
3538         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3539                 (sizeof (q80_idc_ack_t) >> 2),
3540                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3541                 device_printf(dev, "%s: failed\n", __func__);
3542                 return -1;
3543         }
3544
3545         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3546
3547         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3548
3549         if (err) {
3550                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3551                 return(-1);
3552         }
3553
3554         while (count && !ha->hw.imd_compl) {
3555                 qla_mdelay(__func__, 100);
3556                 count--;
3557         }
3558
3559         if (!count)
3560                 return -1;
3561         else
3562                 device_printf(dev, "%s: count %d\n", __func__, count);
3563
3564         return (0);
3565 }
3566
3567 static int
3568 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3569 {
3570         device_t                dev;
3571         q80_set_port_cfg_t      *pcfg;
3572         q80_set_port_cfg_rsp_t  *pfg_rsp;
3573         uint32_t                err;
3574         int                     count = 300;
3575
3576         dev = ha->pci_dev;
3577
3578         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3579         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3580
3581         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3582         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3583         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3584
3585         pcfg->cfg_bits = cfg_bits;
3586
3587         device_printf(dev, "%s: cfg_bits"
3588                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3589                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3590                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3591                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3592                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3593
3594         ha->hw.imd_compl= 0;
3595
3596         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3597                 (sizeof (q80_set_port_cfg_t) >> 2),
3598                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3599                 device_printf(dev, "%s: failed\n", __func__);
3600                 return -1;
3601         }
3602
3603         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3604
3605         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3606
3607         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3608                 while (count && !ha->hw.imd_compl) {
3609                         qla_mdelay(__func__, 100);
3610                         count--;
3611                 }
3612                 if (count) {
3613                         device_printf(dev, "%s: count %d\n", __func__, count);
3614
3615                         err = 0;
3616                 }
3617         }
3618
3619         if (err) {
3620                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3621                 return(-1);
3622         }
3623
3624         return (0);
3625 }
3626
3627
3628 static int
3629 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3630 {
3631         uint32_t                        err;
3632         device_t                        dev = ha->pci_dev;
3633         q80_config_md_templ_size_t      *md_size;
3634         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3635
3636 #ifndef QL_LDFLASH_FW
3637
3638         ql_minidump_template_hdr_t *hdr;
3639
3640         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3641         *size = hdr->size_of_template;
3642         return (0);
3643
3644 #endif /* #ifdef QL_LDFLASH_FW */
3645
3646         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3647         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3648
3649         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3650         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3651         md_size->count_version |= Q8_MBX_CMD_VERSION;
3652
3653         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3654                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3655                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3656
3657                 device_printf(dev, "%s: failed\n", __func__);
3658
3659                 return (-1);
3660         }
3661
3662         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3663
3664         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3665
3666         if (err) {
3667                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3668                 return(-1);
3669         }
3670
3671         *size = md_size_rsp->templ_size;
3672
3673         return (0);
3674 }
3675
3676 static int
3677 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3678 {
3679         device_t                dev;
3680         q80_get_port_cfg_t      *pcfg;
3681         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3682         uint32_t                err;
3683
3684         dev = ha->pci_dev;
3685
3686         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3687         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3688
3689         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3690         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3691         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3692
3693         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3694                 (sizeof (q80_get_port_cfg_t) >> 2),
3695                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3696                 device_printf(dev, "%s: failed\n", __func__);
3697                 return -1;
3698         }
3699
3700         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3701
3702         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3703
3704         if (err) {
3705                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3706                 return(-1);
3707         }
3708
3709         device_printf(dev, "%s: [cfg_bits, port type]"
3710                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3711                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3712                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3713                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3714                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3715                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3716                 );
3717
3718         *cfg_bits = pcfg_rsp->cfg_bits;
3719
3720         return (0);
3721 }
3722
3723 int
3724 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3725 {
3726         struct ether_vlan_header        *eh;
3727         uint16_t                        etype;
3728         struct ip                       *ip = NULL;
3729         struct ip6_hdr                  *ip6 = NULL;
3730         struct tcphdr                   *th = NULL;
3731         uint32_t                        hdrlen;
3732         uint32_t                        offset;
3733         uint8_t                         buf[sizeof(struct ip6_hdr)];
3734
3735         eh = mtod(mp, struct ether_vlan_header *);
3736
3737         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3738                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3739                 etype = ntohs(eh->evl_proto);
3740         } else {
3741                 hdrlen = ETHER_HDR_LEN;
3742                 etype = ntohs(eh->evl_encap_proto);
3743         }
3744
3745         if (etype == ETHERTYPE_IP) {
3746
3747                 offset = (hdrlen + sizeof (struct ip));
3748
3749                 if (mp->m_len >= offset) {
3750                         ip = (struct ip *)(mp->m_data + hdrlen);
3751                 } else {
3752                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3753                         ip = (struct ip *)buf;
3754                 }
3755
3756                 if (ip->ip_p == IPPROTO_TCP) {
3757
3758                         hdrlen += ip->ip_hl << 2;
3759                         offset = hdrlen + 4;
3760         
3761                         if (mp->m_len >= offset) {
3762                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3763                         } else {
3764                                 m_copydata(mp, hdrlen, 4, buf);
3765                                 th = (struct tcphdr *)buf;
3766                         }
3767                 }
3768
3769         } else if (etype == ETHERTYPE_IPV6) {
3770
3771                 offset = (hdrlen + sizeof (struct ip6_hdr));
3772
3773                 if (mp->m_len >= offset) {
3774                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3775                 } else {
3776                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3777                         ip6 = (struct ip6_hdr *)buf;
3778                 }
3779
3780                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3781
3782                         hdrlen += sizeof(struct ip6_hdr);
3783                         offset = hdrlen + 4;
3784
3785                         if (mp->m_len >= offset) {
3786                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3787                         } else {
3788                                 m_copydata(mp, hdrlen, 4, buf);
3789                                 th = (struct tcphdr *)buf;
3790                         }
3791                 }
3792         }
3793
3794         if (th != NULL) {
3795                 if ((th->th_sport == htons(3260)) ||
3796                         (th->th_dport == htons(3260)))
3797                         return 0;
3798         }
3799         return (-1);
3800 }
3801
3802 void
3803 qla_hw_async_event(qla_host_t *ha)
3804 {
3805         switch (ha->hw.aen_mb0) {
3806         case 0x8101:
3807                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3808                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3809
3810                 break;
3811
3812         default:
3813                 break;
3814         }
3815
3816         return;
3817 }
3818
3819 #ifdef QL_LDFLASH_FW
3820 static int
3821 ql_get_minidump_template(qla_host_t *ha)
3822 {
3823         uint32_t                        err;
3824         device_t                        dev = ha->pci_dev;
3825         q80_config_md_templ_cmd_t       *md_templ;
3826         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
3827
3828         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3829         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3830
3831         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3832         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3833         md_templ->count_version |= Q8_MBX_CMD_VERSION;
3834
3835         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3836         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3837
3838         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3839                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3840                  ha->hw.mbox,
3841                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3842
3843                 device_printf(dev, "%s: failed\n", __func__);
3844
3845                 return (-1);
3846         }
3847
3848         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3849
3850         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3851
3852         if (err) {
3853                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3854                 return (-1);
3855         }
3856
3857         return (0);
3858
3859 }
3860 #endif /* #ifdef QL_LDFLASH_FW */
3861
3862 /*
3863  * Minidump related functionality 
3864  */
3865
3866 static int ql_parse_template(qla_host_t *ha);
3867
3868 static uint32_t ql_rdcrb(qla_host_t *ha,
3869                         ql_minidump_entry_rdcrb_t *crb_entry,
3870                         uint32_t * data_buff);
3871
3872 static uint32_t ql_pollrd(qla_host_t *ha,
3873                         ql_minidump_entry_pollrd_t *entry,
3874                         uint32_t * data_buff);
3875
3876 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3877                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3878                         uint32_t *data_buff);
3879
3880 static uint32_t ql_L2Cache(qla_host_t *ha,
3881                         ql_minidump_entry_cache_t *cacheEntry,
3882                         uint32_t * data_buff);
3883
3884 static uint32_t ql_L1Cache(qla_host_t *ha,
3885                         ql_minidump_entry_cache_t *cacheEntry,
3886                         uint32_t *data_buff);
3887
3888 static uint32_t ql_rdocm(qla_host_t *ha,
3889                         ql_minidump_entry_rdocm_t *ocmEntry,
3890                         uint32_t *data_buff);
3891
3892 static uint32_t ql_rdmem(qla_host_t *ha,
3893                         ql_minidump_entry_rdmem_t *mem_entry,
3894                         uint32_t *data_buff);
3895
3896 static uint32_t ql_rdrom(qla_host_t *ha,
3897                         ql_minidump_entry_rdrom_t *romEntry,
3898                         uint32_t *data_buff);
3899
3900 static uint32_t ql_rdmux(qla_host_t *ha,
3901                         ql_minidump_entry_mux_t *muxEntry,
3902                         uint32_t *data_buff);
3903
3904 static uint32_t ql_rdmux2(qla_host_t *ha,
3905                         ql_minidump_entry_mux2_t *muxEntry,
3906                         uint32_t *data_buff);
3907
3908 static uint32_t ql_rdqueue(qla_host_t *ha,
3909                         ql_minidump_entry_queue_t *queueEntry,
3910                         uint32_t *data_buff);
3911
3912 static uint32_t ql_cntrl(qla_host_t *ha,
3913                         ql_minidump_template_hdr_t *template_hdr,
3914                         ql_minidump_entry_cntrl_t *crbEntry);
3915
3916
3917 static uint32_t
3918 ql_minidump_size(qla_host_t *ha)
3919 {
3920         uint32_t i, k;
3921         uint32_t size = 0;
3922         ql_minidump_template_hdr_t *hdr;
3923
3924         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3925
3926         i = 0x2;
3927
3928         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3929                 if (i & ha->hw.mdump_capture_mask)
3930                         size += hdr->capture_size_array[k];
3931                 i = i << 1;
3932         }
3933         return (size);
3934 }
3935
3936 static void
3937 ql_free_minidump_buffer(qla_host_t *ha)
3938 {
3939         if (ha->hw.mdump_buffer != NULL) {
3940                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3941                 ha->hw.mdump_buffer = NULL;
3942                 ha->hw.mdump_buffer_size = 0;
3943         }
3944         return;
3945 }
3946
3947 static int
3948 ql_alloc_minidump_buffer(qla_host_t *ha)
3949 {
3950         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3951
3952         if (!ha->hw.mdump_buffer_size)
3953                 return (-1);
3954
3955         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3956                                         M_NOWAIT);
3957
3958         if (ha->hw.mdump_buffer == NULL)
3959                 return (-1);
3960
3961         return (0);
3962 }
3963
3964 static void
3965 ql_free_minidump_template_buffer(qla_host_t *ha)
3966 {
3967         if (ha->hw.mdump_template != NULL) {
3968                 free(ha->hw.mdump_template, M_QLA83XXBUF);
3969                 ha->hw.mdump_template = NULL;
3970                 ha->hw.mdump_template_size = 0;
3971         }
3972         return;
3973 }
3974
3975 static int
3976 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3977 {
3978         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3979
3980         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3981                                         M_QLA83XXBUF, M_NOWAIT);
3982
3983         if (ha->hw.mdump_template == NULL)
3984                 return (-1);
3985
3986         return (0);
3987 }
3988
3989 static int
3990 ql_alloc_minidump_buffers(qla_host_t *ha)
3991 {
3992         int ret;
3993
3994         ret = ql_alloc_minidump_template_buffer(ha);
3995
3996         if (ret)
3997                 return (ret);
3998
3999         ret = ql_alloc_minidump_buffer(ha);
4000
4001         if (ret)
4002                 ql_free_minidump_template_buffer(ha);
4003
4004         return (ret);
4005 }
4006
4007
4008 static uint32_t
4009 ql_validate_minidump_checksum(qla_host_t *ha)
4010 {
4011         uint64_t sum = 0;
4012         int count;
4013         uint32_t *template_buff;
4014
4015         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4016         template_buff = ha->hw.dma_buf.minidump.dma_b;
4017
4018         while (count-- > 0) {
4019                 sum += *template_buff++;
4020         }
4021
4022         while (sum >> 32) {
4023                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4024         }
4025
4026         return (~sum);
4027 }
4028
4029 int
4030 ql_minidump_init(qla_host_t *ha)
4031 {
4032         int             ret = 0;
4033         uint32_t        template_size = 0;
4034         device_t        dev = ha->pci_dev;
4035
4036         /*
4037          * Get Minidump Template Size
4038          */
4039         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4040
4041         if (ret || (template_size == 0)) {
4042                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4043                         template_size);
4044                 return (-1);
4045         }
4046
4047         /*
4048          * Allocate Memory for Minidump Template
4049          */
4050
4051         ha->hw.dma_buf.minidump.alignment = 8;
4052         ha->hw.dma_buf.minidump.size = template_size;
4053
4054 #ifdef QL_LDFLASH_FW
4055         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4056
4057                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4058
4059                 return (-1);
4060         }
4061         ha->hw.dma_buf.flags.minidump = 1;
4062
4063         /*
4064          * Retrieve Minidump Template
4065          */
4066         ret = ql_get_minidump_template(ha);
4067 #else
4068         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4069
4070 #endif /* #ifdef QL_LDFLASH_FW */
4071
4072         if (ret == 0) {
4073
4074                 ret = ql_validate_minidump_checksum(ha);
4075
4076                 if (ret == 0) {
4077
4078                         ret = ql_alloc_minidump_buffers(ha);
4079
4080                         if (ret == 0)
4081                 ha->hw.mdump_init = 1;
4082                         else
4083                                 device_printf(dev,
4084                                         "%s: ql_alloc_minidump_buffers"
4085                                         " failed\n", __func__);
4086                 } else {
4087                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4088                                 " failed\n", __func__);
4089                 }
4090         } else {
4091                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4092                          __func__);
4093         }
4094
4095         if (ret)
4096                 ql_minidump_free(ha);
4097
4098         return (ret);
4099 }
4100
4101 static void
4102 ql_minidump_free(qla_host_t *ha)
4103 {
4104         ha->hw.mdump_init = 0;
4105         if (ha->hw.dma_buf.flags.minidump) {
4106                 ha->hw.dma_buf.flags.minidump = 0;
4107                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4108         }
4109
4110         ql_free_minidump_template_buffer(ha);
4111         ql_free_minidump_buffer(ha);
4112
4113         return;
4114 }
4115
4116 void
4117 ql_minidump(qla_host_t *ha)
4118 {
4119         if (!ha->hw.mdump_init)
4120                 return;
4121
4122         if (ha->hw.mdump_done)
4123                 return;
4124
4125                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4126
4127         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4128         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4129
4130         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4131                 ha->hw.mdump_template_size);
4132
4133         ql_parse_template(ha);
4134  
4135         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4136
4137         ha->hw.mdump_done = 1;
4138
4139         return;
4140 }
4141
4142
4143 /*
4144  * helper routines
4145  */
4146 static void 
4147 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4148 {
4149         if (esize != entry->hdr.entry_capture_size) {
4150                 entry->hdr.entry_capture_size = esize;
4151                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4152         }
4153         return;
4154 }
4155
4156
4157 static int 
4158 ql_parse_template(qla_host_t *ha)
4159 {
4160         uint32_t num_of_entries, buff_level, e_cnt, esize;
4161         uint32_t end_cnt, rv = 0;
4162         char *dump_buff, *dbuff;
4163         int sane_start = 0, sane_end = 0;
4164         ql_minidump_template_hdr_t *template_hdr;
4165         ql_minidump_entry_t *entry;
4166         uint32_t capture_mask; 
4167         uint32_t dump_size; 
4168
4169         /* Setup parameters */
4170         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4171
4172         if (template_hdr->entry_type == TLHDR)
4173                 sane_start = 1;
4174         
4175         dump_buff = (char *) ha->hw.mdump_buffer;
4176
4177         num_of_entries = template_hdr->num_of_entries;
4178
4179         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4180                         + template_hdr->first_entry_offset );
4181
4182         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4183                 template_hdr->ocm_window_array[ha->pci_func];
4184         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4185
4186         capture_mask = ha->hw.mdump_capture_mask;
4187         dump_size = ha->hw.mdump_buffer_size;
4188
4189         template_hdr->driver_capture_mask = capture_mask;
4190
4191         QL_DPRINT80(ha, (ha->pci_dev,
4192                 "%s: sane_start = %d num_of_entries = %d "
4193                 "capture_mask = 0x%x dump_size = %d \n", 
4194                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4195
4196         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4197
4198                 /*
4199                  * If the capture_mask of the entry does not match capture mask
4200                  * skip the entry after marking the driver_flags indicator.
4201                  */
4202                 
4203                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4204
4205                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4206                         entry = (ql_minidump_entry_t *) ((char *) entry
4207                                         + entry->hdr.entry_size);
4208                         continue;
4209                 }
4210
4211                 /*
4212                  * This is ONLY needed in implementations where
4213                  * the capture buffer allocated is too small to capture
4214                  * all of the required entries for a given capture mask.
4215                  * We need to empty the buffer contents to a file
4216                  * if possible, before processing the next entry
4217                  * If the buff_full_flag is set, no further capture will happen
4218                  * and all remaining non-control entries will be skipped.
4219                  */
4220                 if (entry->hdr.entry_capture_size != 0) {
4221                         if ((buff_level + entry->hdr.entry_capture_size) >
4222                                 dump_size) {
4223                                 /*  Try to recover by emptying buffer to file */
4224                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4225                                 entry = (ql_minidump_entry_t *) ((char *) entry
4226                                                 + entry->hdr.entry_size);
4227                                 continue;
4228                         }
4229                 }
4230
4231                 /*
4232                  * Decode the entry type and process it accordingly
4233                  */
4234
4235                 switch (entry->hdr.entry_type) {
4236                 case RDNOP:
4237                         break;
4238
4239                 case RDEND:
4240                         if (sane_end == 0) {
4241                                 end_cnt = e_cnt;
4242                         }
4243                         sane_end++;
4244                         break;
4245
4246                 case RDCRB:
4247                         dbuff = dump_buff + buff_level;
4248                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4249                         ql_entry_err_chk(entry, esize);
4250                         buff_level += esize;
4251                         break;
4252
4253                 case POLLRD:
4254                         dbuff = dump_buff + buff_level;
4255                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4256                         ql_entry_err_chk(entry, esize);
4257                         buff_level += esize;
4258                         break;
4259
4260                 case POLLRDMWR:
4261                         dbuff = dump_buff + buff_level;
4262                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4263                                         (void *)dbuff);
4264                         ql_entry_err_chk(entry, esize);
4265                         buff_level += esize;
4266                         break;
4267
4268                 case L2ITG:
4269                 case L2DTG:
4270                 case L2DAT:
4271                 case L2INS:
4272                         dbuff = dump_buff + buff_level;
4273                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4274                         if (esize == -1) {
4275                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4276                         } else {
4277                                 ql_entry_err_chk(entry, esize);
4278                                 buff_level += esize;
4279                         }
4280                         break;
4281
4282                 case L1DAT:
4283                 case L1INS:
4284                         dbuff = dump_buff + buff_level;
4285                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4286                         ql_entry_err_chk(entry, esize);
4287                         buff_level += esize;
4288                         break;
4289
4290                 case RDOCM:
4291                         dbuff = dump_buff + buff_level;
4292                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4293                         ql_entry_err_chk(entry, esize);
4294                         buff_level += esize;
4295                         break;
4296
4297                 case RDMEM:
4298                         dbuff = dump_buff + buff_level;
4299                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4300                         ql_entry_err_chk(entry, esize);
4301                         buff_level += esize;
4302                         break;
4303
4304                 case BOARD:
4305                 case RDROM:
4306                         dbuff = dump_buff + buff_level;
4307                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4308                         ql_entry_err_chk(entry, esize);
4309                         buff_level += esize;
4310                         break;
4311
4312                 case RDMUX:
4313                         dbuff = dump_buff + buff_level;
4314                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4315                         ql_entry_err_chk(entry, esize);
4316                         buff_level += esize;
4317                         break;
4318
4319                 case RDMUX2:
4320                         dbuff = dump_buff + buff_level;
4321                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4322                         ql_entry_err_chk(entry, esize);
4323                         buff_level += esize;
4324                         break;
4325
4326                 case QUEUE:
4327                         dbuff = dump_buff + buff_level;
4328                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4329                         ql_entry_err_chk(entry, esize);
4330                         buff_level += esize;
4331                         break;
4332
4333                 case CNTRL:
4334                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4335                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4336                         }
4337                         break;
4338                 default:
4339                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4340                         break;
4341                 }
4342                 /*  next entry in the template */
4343                 entry = (ql_minidump_entry_t *) ((char *) entry
4344                                                 + entry->hdr.entry_size);
4345         }
4346
4347         if (!sane_start || (sane_end > 1)) {
4348                 device_printf(ha->pci_dev,
4349                         "\n%s: Template configuration error. Check Template\n",
4350                         __func__);
4351         }
4352         
4353         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4354                 __func__, template_hdr->num_of_entries));
4355
4356         return 0;
4357 }
4358
4359 /*
4360  * Read CRB operation.
4361  */
4362 static uint32_t
4363 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4364         uint32_t * data_buff)
4365 {
4366         int loop_cnt;
4367         int ret;
4368         uint32_t op_count, addr, stride, value = 0;
4369
4370         addr = crb_entry->addr;
4371         op_count = crb_entry->op_count;
4372         stride = crb_entry->addr_stride;
4373
4374         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4375
4376                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4377
4378                 if (ret)
4379                         return (0);
4380
4381                 *data_buff++ = addr;
4382                 *data_buff++ = value;
4383                 addr = addr + stride;
4384         }
4385
4386         /*
4387          * for testing purpose we return amount of data written
4388          */
4389         return (op_count * (2 * sizeof(uint32_t)));
4390 }
4391
4392 /*
4393  * Handle L2 Cache.
4394  */
4395
4396 static uint32_t 
4397 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4398         uint32_t * data_buff)
4399 {
4400         int i, k;
4401         int loop_cnt;
4402         int ret;
4403
4404         uint32_t read_value;
4405         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4406         uint32_t tag_value, read_cnt;
4407         volatile uint8_t cntl_value_r;
4408         long timeout;
4409         uint32_t data;
4410
4411         loop_cnt = cacheEntry->op_count;
4412
4413         read_addr = cacheEntry->read_addr;
4414         cntrl_addr = cacheEntry->control_addr;
4415         cntl_value_w = (uint32_t) cacheEntry->write_value;
4416
4417         tag_reg_addr = cacheEntry->tag_reg_addr;
4418
4419         tag_value = cacheEntry->init_tag_value;
4420         read_cnt = cacheEntry->read_addr_cnt;
4421
4422         for (i = 0; i < loop_cnt; i++) {
4423
4424                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4425                 if (ret)
4426                         return (0);
4427
4428                 if (cacheEntry->write_value != 0) { 
4429
4430                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4431                                         &cntl_value_w, 0);
4432                         if (ret)
4433                                 return (0);
4434                 }
4435
4436                 if (cacheEntry->poll_mask != 0) { 
4437
4438                         timeout = cacheEntry->poll_wait;
4439
4440                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4441                         if (ret)
4442                                 return (0);
4443
4444                         cntl_value_r = (uint8_t)data;
4445
4446                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4447
4448                                 if (timeout) {
4449                                         qla_mdelay(__func__, 1);
4450                                         timeout--;
4451                                 } else
4452                                         break;
4453
4454                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4455                                                 &data, 1);
4456                                 if (ret)
4457                                         return (0);
4458
4459                                 cntl_value_r = (uint8_t)data;
4460                         }
4461                         if (!timeout) {
4462                                 /* Report timeout error. 
4463                                  * core dump capture failed
4464                                  * Skip remaining entries.
4465                                  * Write buffer out to file
4466                                  * Use driver specific fields in template header
4467                                  * to report this error.
4468                                  */
4469                                 return (-1);
4470                         }
4471                 }
4472
4473                 addr = read_addr;
4474                 for (k = 0; k < read_cnt; k++) {
4475
4476                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4477                         if (ret)
4478                                 return (0);
4479
4480                         *data_buff++ = read_value;
4481                         addr += cacheEntry->read_addr_stride;
4482                 }
4483
4484                 tag_value += cacheEntry->tag_value_stride;
4485         }
4486
4487         return (read_cnt * loop_cnt * sizeof(uint32_t));
4488 }
4489
4490 /*
4491  * Handle L1 Cache.
4492  */
4493
4494 static uint32_t 
4495 ql_L1Cache(qla_host_t *ha,
4496         ql_minidump_entry_cache_t *cacheEntry,
4497         uint32_t *data_buff)
4498 {
4499         int ret;
4500         int i, k;
4501         int loop_cnt;
4502
4503         uint32_t read_value;
4504         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4505         uint32_t tag_value, read_cnt;
4506         uint32_t cntl_value_w;
4507
4508         loop_cnt = cacheEntry->op_count;
4509
4510         read_addr = cacheEntry->read_addr;
4511         cntrl_addr = cacheEntry->control_addr;
4512         cntl_value_w = (uint32_t) cacheEntry->write_value;
4513
4514         tag_reg_addr = cacheEntry->tag_reg_addr;
4515
4516         tag_value = cacheEntry->init_tag_value;
4517         read_cnt = cacheEntry->read_addr_cnt;
4518
4519         for (i = 0; i < loop_cnt; i++) {
4520
4521                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4522                 if (ret)
4523                         return (0);
4524
4525                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4526                 if (ret)
4527                         return (0);
4528
4529                 addr = read_addr;
4530                 for (k = 0; k < read_cnt; k++) {
4531
4532                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4533                         if (ret)
4534                                 return (0);
4535
4536                         *data_buff++ = read_value;
4537                         addr += cacheEntry->read_addr_stride;
4538                 }
4539
4540                 tag_value += cacheEntry->tag_value_stride;
4541         }
4542
4543         return (read_cnt * loop_cnt * sizeof(uint32_t));
4544 }
4545
4546 /*
4547  * Reading OCM memory
4548  */
4549
4550 static uint32_t 
4551 ql_rdocm(qla_host_t *ha,
4552         ql_minidump_entry_rdocm_t *ocmEntry,
4553         uint32_t *data_buff)
4554 {
4555         int i, loop_cnt;
4556         volatile uint32_t addr;
4557         volatile uint32_t value;
4558
4559         addr = ocmEntry->read_addr;
4560         loop_cnt = ocmEntry->op_count;
4561
4562         for (i = 0; i < loop_cnt; i++) {
4563                 value = READ_REG32(ha, addr);
4564                 *data_buff++ = value;
4565                 addr += ocmEntry->read_addr_stride;
4566         }
4567         return (loop_cnt * sizeof(value));
4568 }
4569
4570 /*
4571  * Read memory
4572  */
4573
4574 static uint32_t 
4575 ql_rdmem(qla_host_t *ha,
4576         ql_minidump_entry_rdmem_t *mem_entry,
4577         uint32_t *data_buff)
4578 {
4579         int ret;
4580         int i, loop_cnt;
4581         volatile uint32_t addr;
4582         q80_offchip_mem_val_t val;
4583
4584         addr = mem_entry->read_addr;
4585
4586         /* size in bytes / 16 */
4587         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4588
4589         for (i = 0; i < loop_cnt; i++) {
4590
4591                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4592                 if (ret)
4593                         return (0);
4594
4595                 *data_buff++ = val.data_lo;
4596                 *data_buff++ = val.data_hi;
4597                 *data_buff++ = val.data_ulo;
4598                 *data_buff++ = val.data_uhi;
4599
4600                 addr += (sizeof(uint32_t) * 4);
4601         }
4602
4603         return (loop_cnt * (sizeof(uint32_t) * 4));
4604 }
4605
4606 /*
4607  * Read Rom
4608  */
4609
4610 static uint32_t 
4611 ql_rdrom(qla_host_t *ha,
4612         ql_minidump_entry_rdrom_t *romEntry,
4613         uint32_t *data_buff)
4614 {
4615         int ret;
4616         int i, loop_cnt;
4617         uint32_t addr;
4618         uint32_t value;
4619
4620         addr = romEntry->read_addr;
4621         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4622         loop_cnt /= sizeof(value);
4623
4624         for (i = 0; i < loop_cnt; i++) {
4625
4626                 ret = ql_rd_flash32(ha, addr, &value);
4627                 if (ret)
4628                         return (0);
4629
4630                 *data_buff++ = value;
4631                 addr += sizeof(value);
4632         }
4633
4634         return (loop_cnt * sizeof(value));
4635 }
4636
4637 /*
4638  * Read MUX data
4639  */
4640
4641 static uint32_t 
4642 ql_rdmux(qla_host_t *ha,
4643         ql_minidump_entry_mux_t *muxEntry,
4644         uint32_t *data_buff)
4645 {
4646         int ret;
4647         int loop_cnt;
4648         uint32_t read_value, sel_value;
4649         uint32_t read_addr, select_addr;
4650
4651         select_addr = muxEntry->select_addr;
4652         sel_value = muxEntry->select_value;
4653         read_addr = muxEntry->read_addr;
4654
4655         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4656
4657                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4658                 if (ret)
4659                         return (0);
4660
4661                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4662                 if (ret)
4663                         return (0);
4664
4665                 *data_buff++ = sel_value;
4666                 *data_buff++ = read_value;
4667
4668                 sel_value += muxEntry->select_value_stride;
4669         }
4670
4671         return (loop_cnt * (2 * sizeof(uint32_t)));
4672 }
4673
4674 static uint32_t
4675 ql_rdmux2(qla_host_t *ha,
4676         ql_minidump_entry_mux2_t *muxEntry,
4677         uint32_t *data_buff)
4678 {
4679         int ret;
4680         int loop_cnt;
4681
4682         uint32_t select_addr_1, select_addr_2;
4683         uint32_t select_value_1, select_value_2;
4684         uint32_t select_value_count, select_value_mask;
4685         uint32_t read_addr, read_value;
4686
4687         select_addr_1 = muxEntry->select_addr_1;
4688         select_addr_2 = muxEntry->select_addr_2;
4689         select_value_1 = muxEntry->select_value_1;
4690         select_value_2 = muxEntry->select_value_2;
4691         select_value_count = muxEntry->select_value_count;
4692         select_value_mask  = muxEntry->select_value_mask;
4693
4694         read_addr = muxEntry->read_addr;
4695
4696         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4697                 loop_cnt++) {
4698
4699                 uint32_t temp_sel_val;
4700
4701                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4702                 if (ret)
4703                         return (0);
4704
4705                 temp_sel_val = select_value_1 & select_value_mask;
4706
4707                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4708                 if (ret)
4709                         return (0);
4710
4711                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4712                 if (ret)
4713                         return (0);
4714
4715                 *data_buff++ = temp_sel_val;
4716                 *data_buff++ = read_value;
4717
4718                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4719                 if (ret)
4720                         return (0);
4721
4722                 temp_sel_val = select_value_2 & select_value_mask;
4723
4724                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4725                 if (ret)
4726                         return (0);
4727
4728                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4729                 if (ret)
4730                         return (0);
4731
4732                 *data_buff++ = temp_sel_val;
4733                 *data_buff++ = read_value;
4734
4735                 select_value_1 += muxEntry->select_value_stride;
4736                 select_value_2 += muxEntry->select_value_stride;
4737         }
4738
4739         return (loop_cnt * (4 * sizeof(uint32_t)));
4740 }
4741
4742 /*
4743  * Handling Queue State Reads.
4744  */
4745
4746 static uint32_t 
4747 ql_rdqueue(qla_host_t *ha,
4748         ql_minidump_entry_queue_t *queueEntry,
4749         uint32_t *data_buff)
4750 {
4751         int ret;
4752         int loop_cnt, k;
4753         uint32_t read_value;
4754         uint32_t read_addr, read_stride, select_addr;
4755         uint32_t queue_id, read_cnt;
4756
4757         read_cnt = queueEntry->read_addr_cnt;
4758         read_stride = queueEntry->read_addr_stride;
4759         select_addr = queueEntry->select_addr;
4760
4761         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4762                 loop_cnt++) {
4763
4764                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4765                 if (ret)
4766                         return (0);
4767
4768                 read_addr = queueEntry->read_addr;
4769
4770                 for (k = 0; k < read_cnt; k++) {
4771
4772                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4773                         if (ret)
4774                                 return (0);
4775
4776                         *data_buff++ = read_value;
4777                         read_addr += read_stride;
4778                 }
4779
4780                 queue_id += queueEntry->queue_id_stride;
4781         }
4782
4783         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4784 }
4785
4786 /*
4787  * Handling control entries.
4788  */
4789
4790 static uint32_t 
4791 ql_cntrl(qla_host_t *ha,
4792         ql_minidump_template_hdr_t *template_hdr,
4793         ql_minidump_entry_cntrl_t *crbEntry)
4794 {
4795         int ret;
4796         int count;
4797         uint32_t opcode, read_value, addr, entry_addr;
4798         long timeout;
4799
4800         entry_addr = crbEntry->addr;
4801
4802         for (count = 0; count < crbEntry->op_count; count++) {
4803                 opcode = crbEntry->opcode;
4804
4805                 if (opcode & QL_DBG_OPCODE_WR) {
4806
4807                         ret = ql_rdwr_indreg32(ha, entry_addr,
4808                                         &crbEntry->value_1, 0);
4809                         if (ret)
4810                                 return (0);
4811
4812                         opcode &= ~QL_DBG_OPCODE_WR;
4813                 }
4814
4815                 if (opcode & QL_DBG_OPCODE_RW) {
4816
4817                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4818                         if (ret)
4819                                 return (0);
4820
4821                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4822                         if (ret)
4823                                 return (0);
4824
4825                         opcode &= ~QL_DBG_OPCODE_RW;
4826                 }
4827
4828                 if (opcode & QL_DBG_OPCODE_AND) {
4829
4830                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4831                         if (ret)
4832                                 return (0);
4833
4834                         read_value &= crbEntry->value_2;
4835                         opcode &= ~QL_DBG_OPCODE_AND;
4836
4837                         if (opcode & QL_DBG_OPCODE_OR) {
4838                                 read_value |= crbEntry->value_3;
4839                                 opcode &= ~QL_DBG_OPCODE_OR;
4840                         }
4841
4842                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4843                         if (ret)
4844                                 return (0);
4845                 }
4846
4847                 if (opcode & QL_DBG_OPCODE_OR) {
4848
4849                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4850                         if (ret)
4851                                 return (0);
4852
4853                         read_value |= crbEntry->value_3;
4854
4855                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4856                         if (ret)
4857                                 return (0);
4858
4859                         opcode &= ~QL_DBG_OPCODE_OR;
4860                 }
4861
4862                 if (opcode & QL_DBG_OPCODE_POLL) {
4863
4864                         opcode &= ~QL_DBG_OPCODE_POLL;
4865                         timeout = crbEntry->poll_timeout;
4866                         addr = entry_addr;
4867
4868                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4869                         if (ret)
4870                                 return (0);
4871
4872                         while ((read_value & crbEntry->value_2)
4873                                 != crbEntry->value_1) {
4874
4875                                 if (timeout) {
4876                                         qla_mdelay(__func__, 1);
4877                                         timeout--;
4878                                 } else
4879                                         break;
4880
4881                                 ret = ql_rdwr_indreg32(ha, addr,
4882                                                 &read_value, 1);
4883                                 if (ret)
4884                                         return (0);
4885                         }
4886
4887                         if (!timeout) {
4888                                 /*
4889                                  * Report timeout error.
4890                                  * core dump capture failed
4891                                  * Skip remaining entries.
4892                                  * Write buffer out to file
4893                                  * Use driver specific fields in template header
4894                                  * to report this error.
4895                                  */
4896                                 return (-1);
4897                         }
4898                 }
4899
4900                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4901                         /*
4902                          * decide which address to use.
4903                          */
4904                         if (crbEntry->state_index_a) {
4905                                 addr = template_hdr->saved_state_array[
4906                                                 crbEntry-> state_index_a];
4907                         } else {
4908                                 addr = entry_addr;
4909                         }
4910
4911                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4912                         if (ret)
4913                                 return (0);
4914
4915                         template_hdr->saved_state_array[crbEntry->state_index_v]
4916                                         = read_value;
4917                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
4918                 }
4919
4920                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4921                         /*
4922                          * decide which value to use.
4923                          */
4924                         if (crbEntry->state_index_v) {
4925                                 read_value = template_hdr->saved_state_array[
4926                                                 crbEntry->state_index_v];
4927                         } else {
4928                                 read_value = crbEntry->value_1;
4929                         }
4930                         /*
4931                          * decide which address to use.
4932                          */
4933                         if (crbEntry->state_index_a) {
4934                                 addr = template_hdr->saved_state_array[
4935                                                 crbEntry-> state_index_a];
4936                         } else {
4937                                 addr = entry_addr;
4938                         }
4939
4940                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4941                         if (ret)
4942                                 return (0);
4943
4944                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
4945                 }
4946
4947                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4948                         /*  Read value from saved state using index */
4949                         read_value = template_hdr->saved_state_array[
4950                                                 crbEntry->state_index_v];
4951
4952                         read_value <<= crbEntry->shl; /*Shift left operation */
4953                         read_value >>= crbEntry->shr; /*Shift right operation */
4954
4955                         if (crbEntry->value_2) {
4956                                 /* check if AND mask is provided */
4957                                 read_value &= crbEntry->value_2;
4958                         }
4959
4960                         read_value |= crbEntry->value_3; /* OR operation */
4961                         read_value += crbEntry->value_1; /* increment op */
4962
4963                         /* Write value back to state area. */
4964
4965                         template_hdr->saved_state_array[crbEntry->state_index_v]
4966                                         = read_value;
4967                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
4968                 }
4969
4970                 entry_addr += crbEntry->addr_stride;
4971         }
4972
4973         return (0);
4974 }
4975
4976 /*
4977  * Handling rd poll entry.
4978  */
4979
4980 static uint32_t 
4981 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4982         uint32_t *data_buff)
4983 {
4984         int ret;
4985         int loop_cnt;
4986         uint32_t op_count, select_addr, select_value_stride, select_value;
4987         uint32_t read_addr, poll, mask, data_size, data;
4988         uint32_t wait_count = 0;
4989
4990         select_addr            = entry->select_addr;
4991         read_addr              = entry->read_addr;
4992         select_value           = entry->select_value;
4993         select_value_stride    = entry->select_value_stride;
4994         op_count               = entry->op_count;
4995         poll                   = entry->poll;
4996         mask                   = entry->mask;
4997         data_size              = entry->data_size;
4998
4999         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5000
5001                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5002                 if (ret)
5003                         return (0);
5004
5005                 wait_count = 0;
5006
5007                 while (wait_count < poll) {
5008
5009                         uint32_t temp;
5010
5011                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5012                         if (ret)
5013                                 return (0);
5014
5015                         if ( (temp & mask) != 0 ) {
5016                                 break;
5017                         }
5018                         wait_count++;
5019                 }
5020
5021                 if (wait_count == poll) {
5022                         device_printf(ha->pci_dev,
5023                                 "%s: Error in processing entry\n", __func__);
5024                         device_printf(ha->pci_dev,
5025                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5026                                 __func__, wait_count, poll);
5027                         return 0;
5028                 }
5029
5030                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5031                 if (ret)
5032                         return (0);
5033
5034                 *data_buff++ = select_value;
5035                 *data_buff++ = data;
5036                 select_value = select_value + select_value_stride;
5037         }
5038
5039         /*
5040          * for testing purpose we return amount of data written
5041          */
5042         return (loop_cnt * (2 * sizeof(uint32_t)));
5043 }
5044
5045
5046 /*
5047  * Handling rd modify write poll entry.
5048  */
5049
5050 static uint32_t 
5051 ql_pollrd_modify_write(qla_host_t *ha,
5052         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5053         uint32_t *data_buff)
5054 {
5055         int ret;
5056         uint32_t addr_1, addr_2, value_1, value_2, data;
5057         uint32_t poll, mask, data_size, modify_mask;
5058         uint32_t wait_count = 0;
5059
5060         addr_1          = entry->addr_1;
5061         addr_2          = entry->addr_2;
5062         value_1         = entry->value_1;
5063         value_2         = entry->value_2;
5064
5065         poll            = entry->poll;
5066         mask            = entry->mask;
5067         modify_mask     = entry->modify_mask;
5068         data_size       = entry->data_size;
5069
5070
5071         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5072         if (ret)
5073                 return (0);
5074
5075         wait_count = 0;
5076         while (wait_count < poll) {
5077
5078                 uint32_t temp;
5079
5080                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5081                 if (ret)
5082                         return (0);
5083
5084                 if ( (temp & mask) != 0 ) {
5085                         break;
5086                 }
5087                 wait_count++;
5088         }
5089
5090         if (wait_count == poll) {
5091                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5092                         __func__);
5093         } else {
5094
5095                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5096                 if (ret)
5097                         return (0);
5098
5099                 data = (data & modify_mask);
5100
5101                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5102                 if (ret)
5103                         return (0);
5104
5105                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5106                 if (ret)
5107                         return (0);
5108
5109                 /* Poll again */
5110                 wait_count = 0;
5111                 while (wait_count < poll) {
5112
5113                         uint32_t temp;
5114
5115                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5116                         if (ret)
5117                                 return (0);
5118
5119                         if ( (temp & mask) != 0 ) {
5120                                 break;
5121                         }
5122                         wait_count++;
5123                 }
5124                 *data_buff++ = addr_2;
5125                 *data_buff++ = data;
5126         }
5127
5128         /*
5129          * for testing purpose we return amount of data written
5130          */
5131         return (2 * sizeof(uint32_t));
5132 }
5133
5134