]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r306790
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58         uint32_t num_intrs, uint32_t create);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61         int tenable, int rcv);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66                 uint8_t *hdr);
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static void qla_get_quick_stats(qla_host_t *ha);
77 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
78 static int qla_get_cam_search_mode(qla_host_t *ha);
79
80 static void ql_minidump_free(qla_host_t *ha);
81
82
83 static int
84 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
85 {
86         int err = 0, ret;
87         qla_host_t *ha;
88         uint32_t i;
89
90         err = sysctl_handle_int(oidp, &ret, 0, req);
91
92         if (err || !req->newptr)
93                 return (err);
94
95         if (ret == 1) {
96
97                 ha = (qla_host_t *)arg1;
98
99                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
100
101                         device_printf(ha->pci_dev,
102                                 "%s: sds_ring[%d] = %p\n", __func__,i,
103                                 (void *)ha->hw.sds[i].intr_count);
104
105                         device_printf(ha->pci_dev,
106                                 "%s: sds_ring[%d].spurious_intr_count = %p\n",
107                                 __func__,
108                                 i, (void *)ha->hw.sds[i].spurious_intr_count);
109
110                         device_printf(ha->pci_dev,
111                                 "%s: sds_ring[%d].rx_free = %d\n", __func__,i,
112                                 ha->hw.sds[i].rx_free);
113                 }
114
115                 for (i = 0; i < ha->hw.num_tx_rings; i++) 
116                         device_printf(ha->pci_dev,
117                                 "%s: tx[%d] = %p\n", __func__,i,
118                                 (void *)ha->tx_ring[i].count);
119
120                 for (i = 0; i < ha->hw.num_rds_rings; i++)
121                         device_printf(ha->pci_dev,
122                                 "%s: rds_ring[%d] = %p\n", __func__,i,
123                                 (void *)ha->hw.rds[i].count);
124
125                 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
126                         (void *)ha->lro_pkt_count);
127
128                 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
129                         (void *)ha->lro_bytes);
130
131 #ifdef QL_ENABLE_ISCSI_TLV
132                 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
133                         (void *)ha->hw.iscsi_pkt_count);
134 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
135
136         }
137         return (err);
138 }
139
140 static int
141 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
142 {
143         int err, ret = 0;
144         qla_host_t *ha;
145
146         err = sysctl_handle_int(oidp, &ret, 0, req);
147
148         if (err || !req->newptr)
149                 return (err);
150
151         if (ret == 1) {
152                 ha = (qla_host_t *)arg1;
153                 qla_get_quick_stats(ha);
154         }
155         return (err);
156 }
157
158 #ifdef QL_DBG
159
160 static void
161 qla_stop_pegs(qla_host_t *ha)
162 {
163         uint32_t val = 1;
164
165         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
166         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
167         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
168         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
169         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
170         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
171 }
172
173 static int
174 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
175 {
176         int err, ret = 0;
177         qla_host_t *ha;
178         
179         err = sysctl_handle_int(oidp, &ret, 0, req);
180
181
182         if (err || !req->newptr)
183                 return (err);
184
185         if (ret == 1) {
186                 ha = (qla_host_t *)arg1;
187                 (void)QLA_LOCK(ha, __func__, 0);
188                 qla_stop_pegs(ha);      
189                 QLA_UNLOCK(ha, __func__);
190         }
191
192         return err;
193 }
194 #endif /* #ifdef QL_DBG */
195
196 static int
197 qla_validate_set_port_cfg_bit(uint32_t bits)
198 {
199         if ((bits & 0xF) > 1)
200                 return (-1);
201
202         if (((bits >> 4) & 0xF) > 2)
203                 return (-1);
204
205         if (((bits >> 8) & 0xF) > 2)
206                 return (-1);
207
208         return (0);
209 }
210
211 static int
212 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
213 {
214         int err, ret = 0;
215         qla_host_t *ha;
216         uint32_t cfg_bits;
217
218         err = sysctl_handle_int(oidp, &ret, 0, req);
219
220         if (err || !req->newptr)
221                 return (err);
222
223         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
224
225                 ha = (qla_host_t *)arg1;
226
227                 err = qla_get_port_config(ha, &cfg_bits);
228
229                 if (err)
230                         goto qla_sysctl_set_port_cfg_exit;
231
232                 if (ret & 0x1) {
233                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
234                 } else {
235                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
236                 }
237
238                 ret = ret >> 4;
239                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
240
241                 if ((ret & 0xF) == 0) {
242                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
243                 } else if ((ret & 0xF) == 1){
244                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
245                 } else {
246                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
247                 }
248
249                 ret = ret >> 4;
250                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
251
252                 if (ret == 0) {
253                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
254                 } else if (ret == 1){
255                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
256                 } else {
257                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
258                 }
259
260                 err = qla_set_port_config(ha, cfg_bits);
261         } else {
262                 ha = (qla_host_t *)arg1;
263
264                 err = qla_get_port_config(ha, &cfg_bits);
265         }
266
267 qla_sysctl_set_port_cfg_exit:
268         return err;
269 }
270
271 static int
272 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
273 {
274         int err, ret = 0;
275         qla_host_t *ha;
276
277         err = sysctl_handle_int(oidp, &ret, 0, req);
278
279         if (err || !req->newptr)
280                 return (err);
281
282         ha = (qla_host_t *)arg1;
283
284         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
285                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
286                 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
287         } else {
288                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
289         }
290
291         return (err);
292 }
293
294 static int
295 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
296 {
297         int err, ret = 0;
298         qla_host_t *ha;
299
300         err = sysctl_handle_int(oidp, &ret, 0, req);
301
302         if (err || !req->newptr)
303                 return (err);
304
305         ha = (qla_host_t *)arg1;
306         err = qla_get_cam_search_mode(ha);
307
308         return (err);
309 }
310
311
312 /*
313  * Name: ql_hw_add_sysctls
314  * Function: Add P3Plus specific sysctls
315  */
316 void
317 ql_hw_add_sysctls(qla_host_t *ha)
318 {
319         device_t        dev;
320
321         dev = ha->pci_dev;
322
323         ha->hw.num_sds_rings = MAX_SDS_RINGS;
324         ha->hw.num_rds_rings = MAX_RDS_RINGS;
325         ha->hw.num_tx_rings = NUM_TX_RINGS;
326
327         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
330                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
331
332         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
333                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
334                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
335                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
336
337         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
338                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
339                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
340                 ha->hw.num_tx_rings, "Number of Transmit Rings");
341
342         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
343                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
344                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
345                 ha->txr_idx, "Tx Ring Used");
346
347         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
348                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
349                 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
350                 (void *)ha, 0,
351                 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
352
353         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
354                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
355                 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
356                 (void *)ha, 0,
357                 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
358
359         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
360                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
362                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
363
364         ha->hw.sds_cidx_thres = 32;
365         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
366                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
368                 ha->hw.sds_cidx_thres,
369                 "Number of SDS entries to process before updating"
370                 " SDS Ring Consumer Index");
371
372         ha->hw.rds_pidx_thres = 32;
373         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
376                 ha->hw.rds_pidx_thres,
377                 "Number of Rcv Rings Entries to post before updating"
378                 " RDS Ring Producer Index");
379
380         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
381         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
382                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
383                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
384                 &ha->hw.rcv_intr_coalesce,
385                 ha->hw.rcv_intr_coalesce,
386                 "Rcv Intr Coalescing Parameters\n"
387                 "\tbits 15:0 max packets\n"
388                 "\tbits 31:16 max micro-seconds to wait\n"
389                 "\tplease run\n"
390                 "\tifconfig <if> down && ifconfig <if> up\n"
391                 "\tto take effect \n");
392
393         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
394         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
395                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
397                 &ha->hw.xmt_intr_coalesce,
398                 ha->hw.xmt_intr_coalesce,
399                 "Xmt Intr Coalescing Parameters\n"
400                 "\tbits 15:0 max packets\n"
401                 "\tbits 31:16 max micro-seconds to wait\n"
402                 "\tplease run\n"
403                 "\tifconfig <if> down && ifconfig <if> up\n"
404                 "\tto take effect \n");
405
406         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
407                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
409                 (void *)ha, 0,
410                 qla_sysctl_port_cfg, "I",
411                         "Set Port Configuration if values below "
412                         "otherwise Get Port Configuration\n"
413                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
414                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
415                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
416                         " 1 = xmt only; 2 = rcv only;\n"
417                 );
418
419         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
422                 (void *)ha, 0,
423                 qla_sysctl_set_cam_search_mode, "I",
424                         "Set CAM Search Mode"
425                         "\t 1 = search mode internal\n"
426                         "\t 2 = search mode auto\n");
427
428         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
431                 (void *)ha, 0,
432                 qla_sysctl_get_cam_search_mode, "I",
433                         "Get CAM Search Mode"
434                         "\t 1 = search mode internal\n"
435                         "\t 2 = search mode auto\n");
436
437         ha->hw.enable_9kb = 1;
438
439         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
440                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
442                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
443
444         ha->hw.mdump_active = 0;
445         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
448                 ha->hw.mdump_active,
449                 "Minidump retrieval is Active");
450
451         ha->hw.mdump_done = 0;
452         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
453                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454                 OID_AUTO, "mdump_done", CTLFLAG_RW,
455                 &ha->hw.mdump_done, ha->hw.mdump_done,
456                 "Minidump has been done and available for retrieval");
457
458         ha->hw.mdump_capture_mask = 0xF;
459         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
460                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
462                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
463                 "Minidump capture mask");
464 #ifdef QL_DBG
465
466         ha->err_inject = 0;
467         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
468                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469                 OID_AUTO, "err_inject",
470                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
471                 "Error to be injected\n"
472                 "\t\t\t 0: No Errors\n"
473                 "\t\t\t 1: rcv: rxb struct invalid\n"
474                 "\t\t\t 2: rcv: mp == NULL\n"
475                 "\t\t\t 3: lro: rxb struct invalid\n"
476                 "\t\t\t 4: lro: mp == NULL\n"
477                 "\t\t\t 5: rcv: num handles invalid\n"
478                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
479                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
480                 "\t\t\t 8: mbx: mailbox command failure\n"
481                 "\t\t\t 9: heartbeat failure\n"
482                 "\t\t\t A: temperature failure\n"
483                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
484
485         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
488                 (void *)ha, 0,
489                 qla_sysctl_stop_pegs, "I", "Peg Stop");
490
491 #endif /* #ifdef QL_DBG */
492
493         ha->hw.user_pri_nic = 0;
494         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
495                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
497                 ha->hw.user_pri_nic,
498                 "VLAN Tag User Priority for Normal Ethernet Packets");
499
500         ha->hw.user_pri_iscsi = 4;
501         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
502                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
504                 ha->hw.user_pri_iscsi,
505                 "VLAN Tag User Priority for iSCSI Packets");
506
507 }
508
509 void
510 ql_hw_link_status(qla_host_t *ha)
511 {
512         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
513
514         if (ha->hw.link_up) {
515                 device_printf(ha->pci_dev, "link Up\n");
516         } else {
517                 device_printf(ha->pci_dev, "link Down\n");
518         }
519
520         if (ha->hw.flags.fduplex) {
521                 device_printf(ha->pci_dev, "Full Duplex\n");
522         } else {
523                 device_printf(ha->pci_dev, "Half Duplex\n");
524         }
525
526         if (ha->hw.flags.autoneg) {
527                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
528         } else {
529                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
530         }
531
532         switch (ha->hw.link_speed) {
533         case 0x710:
534                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
535                 break;
536
537         case 0x3E8:
538                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
539                 break;
540
541         case 0x64:
542                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
543                 break;
544
545         default:
546                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
547                 break;
548         }
549
550         switch (ha->hw.module_type) {
551
552         case 0x01:
553                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
554                 break;
555
556         case 0x02:
557                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
558                 break;
559
560         case 0x03:
561                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
562                 break;
563
564         case 0x04:
565                 device_printf(ha->pci_dev,
566                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
567                         ha->hw.cable_length);
568                 break;
569
570         case 0x05:
571                 device_printf(ha->pci_dev, "Module Type 10GE Active"
572                         " Limiting Copper(Compliant)[%d m]\n",
573                         ha->hw.cable_length);
574                 break;
575
576         case 0x06:
577                 device_printf(ha->pci_dev,
578                         "Module Type 10GE Passive Copper"
579                         " (Legacy, Best Effort)[%d m]\n",
580                         ha->hw.cable_length);
581                 break;
582
583         case 0x07:
584                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
585                 break;
586
587         case 0x08:
588                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
589                 break;
590
591         case 0x09:
592                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
593                 break;
594
595         case 0x0A:
596                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
597                 break;
598
599         case 0x0B:
600                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
601                         "(Legacy, Best Effort)\n");
602                 break;
603
604         default:
605                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
606                         ha->hw.module_type);
607                 break;
608         }
609
610         if (ha->hw.link_faults == 1)
611                 device_printf(ha->pci_dev, "SFP Power Fault\n");
612 }
613
614 /*
615  * Name: ql_free_dma
616  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
617  */
618 void
619 ql_free_dma(qla_host_t *ha)
620 {
621         uint32_t i;
622
623         if (ha->hw.dma_buf.flags.sds_ring) {
624                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
625                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
626                 }
627                 ha->hw.dma_buf.flags.sds_ring = 0;
628         }
629
630         if (ha->hw.dma_buf.flags.rds_ring) {
631                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
632                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
633                 }
634                 ha->hw.dma_buf.flags.rds_ring = 0;
635         }
636
637         if (ha->hw.dma_buf.flags.tx_ring) {
638                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
639                 ha->hw.dma_buf.flags.tx_ring = 0;
640         }
641         ql_minidump_free(ha);
642 }
643
644 /*
645  * Name: ql_alloc_dma
646  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
647  */
648 int
649 ql_alloc_dma(qla_host_t *ha)
650 {
651         device_t                dev;
652         uint32_t                i, j, size, tx_ring_size;
653         qla_hw_t                *hw;
654         qla_hw_tx_cntxt_t       *tx_cntxt;
655         uint8_t                 *vaddr;
656         bus_addr_t              paddr;
657
658         dev = ha->pci_dev;
659
660         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
661
662         hw = &ha->hw;
663         /*
664          * Allocate Transmit Ring
665          */
666         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
667         size = (tx_ring_size * ha->hw.num_tx_rings);
668
669         hw->dma_buf.tx_ring.alignment = 8;
670         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
671         
672         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
673                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
674                 goto ql_alloc_dma_exit;
675         }
676
677         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
678         paddr = hw->dma_buf.tx_ring.dma_addr;
679         
680         for (i = 0; i < ha->hw.num_tx_rings; i++) {
681                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
682
683                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
684                 tx_cntxt->tx_ring_paddr = paddr;
685
686                 vaddr += tx_ring_size;
687                 paddr += tx_ring_size;
688         }
689
690         for (i = 0; i < ha->hw.num_tx_rings; i++) {
691                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
692
693                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
694                 tx_cntxt->tx_cons_paddr = paddr;
695
696                 vaddr += sizeof (uint32_t);
697                 paddr += sizeof (uint32_t);
698         }
699
700         ha->hw.dma_buf.flags.tx_ring = 1;
701
702         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
703                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
704                 hw->dma_buf.tx_ring.dma_b));
705         /*
706          * Allocate Receive Descriptor Rings
707          */
708
709         for (i = 0; i < hw->num_rds_rings; i++) {
710
711                 hw->dma_buf.rds_ring[i].alignment = 8;
712                 hw->dma_buf.rds_ring[i].size =
713                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
714
715                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
716                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
717                                 __func__, i);
718
719                         for (j = 0; j < i; j++)
720                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
721
722                         goto ql_alloc_dma_exit;
723                 }
724                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
725                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
726                         hw->dma_buf.rds_ring[i].dma_b));
727         }
728
729         hw->dma_buf.flags.rds_ring = 1;
730
731         /*
732          * Allocate Status Descriptor Rings
733          */
734
735         for (i = 0; i < hw->num_sds_rings; i++) {
736                 hw->dma_buf.sds_ring[i].alignment = 8;
737                 hw->dma_buf.sds_ring[i].size =
738                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
739
740                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
741                         device_printf(dev, "%s: sds ring alloc failed\n",
742                                 __func__);
743
744                         for (j = 0; j < i; j++)
745                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
746
747                         goto ql_alloc_dma_exit;
748                 }
749                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
750                         __func__, i,
751                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
752                         hw->dma_buf.sds_ring[i].dma_b));
753         }
754         for (i = 0; i < hw->num_sds_rings; i++) {
755                 hw->sds[i].sds_ring_base =
756                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
757         }
758
759         hw->dma_buf.flags.sds_ring = 1;
760
761         return 0;
762
763 ql_alloc_dma_exit:
764         ql_free_dma(ha);
765         return -1;
766 }
767
768 #define Q8_MBX_MSEC_DELAY       5000
769
770 static int
771 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
772         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
773 {
774         uint32_t i;
775         uint32_t data;
776         int ret = 0;
777
778         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
779                 ret = -3;
780                 ha->qla_initiate_recovery = 1;
781                 goto exit_qla_mbx_cmd;
782         }
783
784         if (no_pause)
785                 i = 1000;
786         else
787                 i = Q8_MBX_MSEC_DELAY;
788
789         while (i) {
790                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
791                 if (data == 0)
792                         break;
793                 if (no_pause) {
794                         DELAY(1000);
795                 } else {
796                         qla_mdelay(__func__, 1);
797                 }
798                 i--;
799         }
800
801         if (i == 0) {
802                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
803                         __func__, data);
804                 ret = -1;
805                 ha->qla_initiate_recovery = 1;
806                 goto exit_qla_mbx_cmd;
807         }
808
809         for (i = 0; i < n_hmbox; i++) {
810                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
811                 h_mbox++;
812         }
813
814         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
815
816
817         i = Q8_MBX_MSEC_DELAY;
818         while (i) {
819                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
820
821                 if ((data & 0x3) == 1) {
822                         data = READ_REG32(ha, Q8_FW_MBOX0);
823                         if ((data & 0xF000) != 0x8000)
824                                 break;
825                 }
826                 if (no_pause) {
827                         DELAY(1000);
828                 } else {
829                         qla_mdelay(__func__, 1);
830                 }
831                 i--;
832         }
833         if (i == 0) {
834                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
835                         __func__, data);
836                 ret = -2;
837                 ha->qla_initiate_recovery = 1;
838                 goto exit_qla_mbx_cmd;
839         }
840
841         for (i = 0; i < n_fwmbox; i++) {
842                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
843         }
844
845         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
846         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
847
848 exit_qla_mbx_cmd:
849         return (ret);
850 }
851
852 int
853 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
854         uint32_t *num_rcvq)
855 {
856         uint32_t *mbox, err;
857         device_t dev = ha->pci_dev;
858
859         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
860
861         mbox = ha->hw.mbox;
862
863         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
864
865         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
866                 device_printf(dev, "%s: failed0\n", __func__);
867                 return (-1);
868         }
869         err = mbox[0] >> 25; 
870
871         if (supports_9kb != NULL) {
872                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
873                         *supports_9kb = 1;
874                 else
875                         *supports_9kb = 0;
876         }
877
878         if (num_rcvq != NULL)
879                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
880
881         if ((err != 1) && (err != 0)) {
882                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
883                 return (-1);
884         }
885         return 0;
886 }
887
888 static int
889 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
890         uint32_t create)
891 {
892         uint32_t i, err;
893         device_t dev = ha->pci_dev;
894         q80_config_intr_t *c_intr;
895         q80_config_intr_rsp_t *c_intr_rsp;
896
897         c_intr = (q80_config_intr_t *)ha->hw.mbox;
898         bzero(c_intr, (sizeof (q80_config_intr_t)));
899
900         c_intr->opcode = Q8_MBX_CONFIG_INTR;
901
902         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
903         c_intr->count_version |= Q8_MBX_CMD_VERSION;
904
905         c_intr->nentries = num_intrs;
906
907         for (i = 0; i < num_intrs; i++) {
908                 if (create) {
909                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
910                         c_intr->intr[i].msix_index = start_idx + 1 + i;
911                 } else {
912                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
913                         c_intr->intr[i].msix_index =
914                                 ha->hw.intr_id[(start_idx + i)];
915                 }
916
917                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
918         }
919
920         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
921                 (sizeof (q80_config_intr_t) >> 2),
922                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
923                 device_printf(dev, "%s: failed0\n", __func__);
924                 return (-1);
925         }
926
927         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
928
929         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
930
931         if (err) {
932                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
933                         c_intr_rsp->nentries);
934
935                 for (i = 0; i < c_intr_rsp->nentries; i++) {
936                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
937                                 __func__, i, 
938                                 c_intr_rsp->intr[i].status,
939                                 c_intr_rsp->intr[i].intr_id,
940                                 c_intr_rsp->intr[i].intr_src);
941                 }
942
943                 return (-1);
944         }
945
946         for (i = 0; ((i < num_intrs) && create); i++) {
947                 if (!c_intr_rsp->intr[i].status) {
948                         ha->hw.intr_id[(start_idx + i)] =
949                                 c_intr_rsp->intr[i].intr_id;
950                         ha->hw.intr_src[(start_idx + i)] =
951                                 c_intr_rsp->intr[i].intr_src;
952                 }
953         }
954
955         return (0);
956 }
957
958 /*
959  * Name: qla_config_rss
960  * Function: Configure RSS for the context/interface.
961  */
962 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
963                         0x8030f20c77cb2da3ULL,
964                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
965                         0x255b0ec26d5a56daULL };
966
967 static int
968 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
969 {
970         q80_config_rss_t        *c_rss;
971         q80_config_rss_rsp_t    *c_rss_rsp;
972         uint32_t                err, i;
973         device_t                dev = ha->pci_dev;
974
975         c_rss = (q80_config_rss_t *)ha->hw.mbox;
976         bzero(c_rss, (sizeof (q80_config_rss_t)));
977
978         c_rss->opcode = Q8_MBX_CONFIG_RSS;
979
980         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
981         c_rss->count_version |= Q8_MBX_CMD_VERSION;
982
983         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
984                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
985         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
986         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
987
988         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
989         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
990
991         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
992
993         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
994         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
995
996         c_rss->cntxt_id = cntxt_id;
997
998         for (i = 0; i < 5; i++) {
999                 c_rss->rss_key[i] = rss_key[i];
1000         }
1001
1002         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1003                 (sizeof (q80_config_rss_t) >> 2),
1004                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1005                 device_printf(dev, "%s: failed0\n", __func__);
1006                 return (-1);
1007         }
1008         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1009
1010         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1011
1012         if (err) {
1013                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1014                 return (-1);
1015         }
1016         return 0;
1017 }
1018
1019 static int
1020 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1021         uint16_t cntxt_id, uint8_t *ind_table)
1022 {
1023         q80_config_rss_ind_table_t      *c_rss_ind;
1024         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1025         uint32_t                        err;
1026         device_t                        dev = ha->pci_dev;
1027
1028         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1029                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1030                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1031                         start_idx, count);
1032                 return (-1);
1033         }
1034
1035         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1036         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1037
1038         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1039         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1040         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1041
1042         c_rss_ind->start_idx = start_idx;
1043         c_rss_ind->end_idx = start_idx + count - 1;
1044         c_rss_ind->cntxt_id = cntxt_id;
1045         bcopy(ind_table, c_rss_ind->ind_table, count);
1046
1047         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1048                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1049                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1050                 device_printf(dev, "%s: failed0\n", __func__);
1051                 return (-1);
1052         }
1053
1054         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1055         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1056
1057         if (err) {
1058                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1059                 return (-1);
1060         }
1061         return 0;
1062 }
1063
1064 /*
1065  * Name: qla_config_intr_coalesce
1066  * Function: Configure Interrupt Coalescing.
1067  */
1068 static int
1069 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1070         int rcv)
1071 {
1072         q80_config_intr_coalesc_t       *intrc;
1073         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1074         uint32_t                        err, i;
1075         device_t                        dev = ha->pci_dev;
1076         
1077         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1078         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1079
1080         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1081         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1082         intrc->count_version |= Q8_MBX_CMD_VERSION;
1083
1084         if (rcv) {
1085                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1086                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1087                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1088         } else {
1089                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1090                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1091                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1092         }
1093
1094         intrc->cntxt_id = cntxt_id;
1095
1096         if (tenable) {
1097                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1098                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1099
1100                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1101                         intrc->sds_ring_mask |= (1 << i);
1102                 }
1103                 intrc->ms_timeout = 1000;
1104         }
1105
1106         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1107                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1108                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1109                 device_printf(dev, "%s: failed0\n", __func__);
1110                 return (-1);
1111         }
1112         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1113
1114         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1115
1116         if (err) {
1117                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1118                 return (-1);
1119         }
1120         
1121         return 0;
1122 }
1123
1124
1125 /*
1126  * Name: qla_config_mac_addr
1127  * Function: binds a MAC address to the context/interface.
1128  *      Can be unicast, multicast or broadcast.
1129  */
1130 static int
1131 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1132         uint32_t num_mac)
1133 {
1134         q80_config_mac_addr_t           *cmac;
1135         q80_config_mac_addr_rsp_t       *cmac_rsp;
1136         uint32_t                        err;
1137         device_t                        dev = ha->pci_dev;
1138         int                             i;
1139         uint8_t                         *mac_cpy = mac_addr;
1140
1141         if (num_mac > Q8_MAX_MAC_ADDRS) {
1142                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1143                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1144                 return (-1);
1145         }
1146
1147         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1148         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1149
1150         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1151         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1152         cmac->count_version |= Q8_MBX_CMD_VERSION;
1153
1154         if (add_mac) 
1155                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1156         else
1157                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1158                 
1159         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1160
1161         cmac->nmac_entries = num_mac;
1162         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1163
1164         for (i = 0; i < num_mac; i++) {
1165                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1166                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1167         }
1168
1169         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1170                 (sizeof (q80_config_mac_addr_t) >> 2),
1171                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1172                 device_printf(dev, "%s: %s failed0\n", __func__,
1173                         (add_mac ? "Add" : "Del"));
1174                 return (-1);
1175         }
1176         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1177
1178         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1179
1180         if (err) {
1181                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1182                         (add_mac ? "Add" : "Del"), err);
1183                 for (i = 0; i < num_mac; i++) {
1184                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1185                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1186                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1187                         mac_cpy += ETHER_ADDR_LEN;
1188                 }
1189                 return (-1);
1190         }
1191         
1192         return 0;
1193 }
1194
1195
1196 /*
1197  * Name: qla_set_mac_rcv_mode
1198  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1199  */
1200 static int
1201 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1202 {
1203         q80_config_mac_rcv_mode_t       *rcv_mode;
1204         uint32_t                        err;
1205         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1206         device_t                        dev = ha->pci_dev;
1207
1208         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1209         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1210
1211         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1212         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1213         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1214
1215         rcv_mode->mode = mode;
1216
1217         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1218
1219         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1220                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1221                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1222                 device_printf(dev, "%s: failed0\n", __func__);
1223                 return (-1);
1224         }
1225         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1226
1227         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1228
1229         if (err) {
1230                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1231                 return (-1);
1232         }
1233         
1234         return 0;
1235 }
1236
1237 int
1238 ql_set_promisc(qla_host_t *ha)
1239 {
1240         int ret;
1241
1242         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1243         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1244         return (ret);
1245 }
1246
1247 void
1248 qla_reset_promisc(qla_host_t *ha)
1249 {
1250         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1251         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1252 }
1253
1254 int
1255 ql_set_allmulti(qla_host_t *ha)
1256 {
1257         int ret;
1258
1259         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1260         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1261         return (ret);
1262 }
1263
1264 void
1265 qla_reset_allmulti(qla_host_t *ha)
1266 {
1267         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1268         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1269 }
1270
1271 /*
1272  * Name: ql_set_max_mtu
1273  * Function:
1274  *      Sets the maximum transfer unit size for the specified rcv context.
1275  */
1276 int
1277 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1278 {
1279         device_t                dev;
1280         q80_set_max_mtu_t       *max_mtu;
1281         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1282         uint32_t                err;
1283
1284         dev = ha->pci_dev;
1285
1286         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1287         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1288
1289         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1290         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1291         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1292
1293         max_mtu->cntxt_id = cntxt_id;
1294         max_mtu->mtu = mtu;
1295
1296         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1297                 (sizeof (q80_set_max_mtu_t) >> 2),
1298                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1299                 device_printf(dev, "%s: failed\n", __func__);
1300                 return -1;
1301         }
1302
1303         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1304
1305         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1306
1307         if (err) {
1308                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1309         }
1310
1311         return 0;
1312 }
1313
1314 static int
1315 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1316 {
1317         device_t                dev;
1318         q80_link_event_t        *lnk;
1319         q80_link_event_rsp_t    *lnk_rsp;
1320         uint32_t                err;
1321
1322         dev = ha->pci_dev;
1323
1324         lnk = (q80_link_event_t *)ha->hw.mbox;
1325         bzero(lnk, (sizeof (q80_link_event_t)));
1326
1327         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1328         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1329         lnk->count_version |= Q8_MBX_CMD_VERSION;
1330
1331         lnk->cntxt_id = cntxt_id;
1332         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1333
1334         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1335                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1336                 device_printf(dev, "%s: failed\n", __func__);
1337                 return -1;
1338         }
1339
1340         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1341
1342         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1343
1344         if (err) {
1345                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1346         }
1347
1348         return 0;
1349 }
1350
1351 static int
1352 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1353 {
1354         device_t                dev;
1355         q80_config_fw_lro_t     *fw_lro;
1356         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1357         uint32_t                err;
1358
1359         dev = ha->pci_dev;
1360
1361         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1362         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1363
1364         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1365         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1366         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1367
1368         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1369         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1370
1371         fw_lro->cntxt_id = cntxt_id;
1372
1373         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1374                 (sizeof (q80_config_fw_lro_t) >> 2),
1375                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1376                 device_printf(dev, "%s: failed\n", __func__);
1377                 return -1;
1378         }
1379
1380         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1381
1382         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1383
1384         if (err) {
1385                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1386         }
1387
1388         return 0;
1389 }
1390
1391 static int
1392 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1393 {
1394         device_t                dev;
1395         q80_hw_config_t         *hw_config;
1396         q80_hw_config_rsp_t     *hw_config_rsp;
1397         uint32_t                err;
1398
1399         dev = ha->pci_dev;
1400
1401         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1402         bzero(hw_config, sizeof (q80_hw_config_t));
1403
1404         hw_config->opcode = Q8_MBX_HW_CONFIG;
1405         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1406         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1407
1408         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1409
1410         hw_config->u.set_cam_search_mode.mode = search_mode;
1411
1412         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1413                 (sizeof (q80_hw_config_t) >> 2),
1414                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1415                 device_printf(dev, "%s: failed\n", __func__);
1416                 return -1;
1417         }
1418         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1419
1420         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1421
1422         if (err) {
1423                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1424         }
1425
1426         return 0;
1427 }
1428
1429 static int
1430 qla_get_cam_search_mode(qla_host_t *ha)
1431 {
1432         device_t                dev;
1433         q80_hw_config_t         *hw_config;
1434         q80_hw_config_rsp_t     *hw_config_rsp;
1435         uint32_t                err;
1436
1437         dev = ha->pci_dev;
1438
1439         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1440         bzero(hw_config, sizeof (q80_hw_config_t));
1441
1442         hw_config->opcode = Q8_MBX_HW_CONFIG;
1443         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1444         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1445
1446         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1447
1448         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1449                 (sizeof (q80_hw_config_t) >> 2),
1450                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1451                 device_printf(dev, "%s: failed\n", __func__);
1452                 return -1;
1453         }
1454         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1455
1456         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1457
1458         if (err) {
1459                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1460         } else {
1461                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1462                         hw_config_rsp->u.get_cam_search_mode.mode);
1463         }
1464
1465         return 0;
1466 }
1467
1468
1469
1470 static void
1471 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1472 {
1473         device_t dev = ha->pci_dev;
1474
1475         if (i < ha->hw.num_tx_rings) {
1476                 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1477                         __func__, i, xstat->total_bytes);
1478                 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1479                         __func__, i, xstat->total_pkts);
1480                 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1481                         __func__, i, xstat->errors);
1482                 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1483                         __func__, i, xstat->pkts_dropped);
1484                 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1485                         __func__, i, xstat->switch_pkts);
1486                 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1487                         __func__, i, xstat->num_buffers);
1488         } else {
1489                 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1490                         __func__, xstat->total_bytes);
1491                 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1492                         __func__, xstat->total_pkts);
1493                 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1494                         __func__, xstat->errors);
1495                 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1496                         __func__, xstat->pkts_dropped);
1497                 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1498                         __func__, xstat->switch_pkts);
1499                 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1500                         __func__, xstat->num_buffers);
1501         }
1502 }
1503
1504 static void
1505 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1506 {
1507         device_t dev = ha->pci_dev;
1508
1509         device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1510                 rstat->total_bytes);
1511         device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1512                 rstat->total_pkts);
1513         device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1514                 rstat->lro_pkt_count);
1515         device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1516                 rstat->sw_pkt_count);
1517         device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1518                 rstat->ip_chksum_err);
1519         device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1520                 rstat->pkts_wo_acntxts);
1521         device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1522                 __func__, rstat->pkts_dropped_no_sds_card);
1523         device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1524                 __func__, rstat->pkts_dropped_no_sds_host);
1525         device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1526                 rstat->oversized_pkts);
1527         device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1528                 __func__, rstat->pkts_dropped_no_rds);
1529         device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1530                 __func__, rstat->unxpctd_mcast_pkts);
1531         device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1532                 rstat->re1_fbq_error);
1533         device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1534                 rstat->invalid_mac_addr);
1535         device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1536                 rstat->rds_prime_trys);
1537         device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1538                 rstat->rds_prime_success);
1539         device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1540                 rstat->lro_flows_added);
1541         device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1542                 rstat->lro_flows_deleted);
1543         device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1544                 rstat->lro_flows_active);
1545         device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1546                 __func__, rstat->pkts_droped_unknown);
1547 }
1548
1549 static void
1550 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1551 {
1552         device_t dev = ha->pci_dev;
1553
1554         device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1555                 mstat->xmt_frames);
1556         device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1557                 mstat->xmt_bytes);
1558         device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1559                 mstat->xmt_mcast_pkts);
1560         device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1561                 mstat->xmt_bcast_pkts);
1562         device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1563                 mstat->xmt_pause_frames);
1564         device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1565                 mstat->xmt_cntrl_pkts);
1566         device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1567                 __func__, mstat->xmt_pkt_lt_64bytes);
1568         device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1569                 __func__, mstat->xmt_pkt_lt_127bytes);
1570         device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1571                 __func__, mstat->xmt_pkt_lt_255bytes);
1572         device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1573                 __func__, mstat->xmt_pkt_lt_511bytes);
1574         device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1575                 __func__, mstat->xmt_pkt_lt_1023bytes);
1576         device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1577                 __func__, mstat->xmt_pkt_lt_1518bytes);
1578         device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1579                 __func__, mstat->xmt_pkt_gt_1518bytes);
1580
1581         device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1582                 mstat->rcv_frames);
1583         device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1584                 mstat->rcv_bytes);
1585         device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1586                 mstat->rcv_mcast_pkts);
1587         device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1588                 mstat->rcv_bcast_pkts);
1589         device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1590                 mstat->rcv_pause_frames);
1591         device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1592                 mstat->rcv_cntrl_pkts);
1593         device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1594                 __func__, mstat->rcv_pkt_lt_64bytes);
1595         device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1596                 __func__, mstat->rcv_pkt_lt_127bytes);
1597         device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1598                 __func__, mstat->rcv_pkt_lt_255bytes);
1599         device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1600                 __func__, mstat->rcv_pkt_lt_511bytes);
1601         device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1602                 __func__, mstat->rcv_pkt_lt_1023bytes);
1603         device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1604                 __func__, mstat->rcv_pkt_lt_1518bytes);
1605         device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1606                 __func__, mstat->rcv_pkt_gt_1518bytes);
1607
1608         device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1609                 mstat->rcv_len_error);
1610         device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1611                 mstat->rcv_len_small);
1612         device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1613                 mstat->rcv_len_large);
1614         device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1615                 mstat->rcv_jabber);
1616         device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1617                 mstat->rcv_dropped);
1618         device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1619                 mstat->fcs_error);
1620         device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1621                 mstat->align_error);
1622 }
1623
1624
1625 static int
1626 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1627 {
1628         device_t                dev;
1629         q80_get_stats_t         *stat;
1630         q80_get_stats_rsp_t     *stat_rsp;
1631         uint32_t                err;
1632
1633         dev = ha->pci_dev;
1634
1635         stat = (q80_get_stats_t *)ha->hw.mbox;
1636         bzero(stat, (sizeof (q80_get_stats_t)));
1637
1638         stat->opcode = Q8_MBX_GET_STATS;
1639         stat->count_version = 2;
1640         stat->count_version |= Q8_MBX_CMD_VERSION;
1641
1642         stat->cmd = cmd;
1643
1644         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1645                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1646                 device_printf(dev, "%s: failed\n", __func__);
1647                 return -1;
1648         }
1649
1650         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1651
1652         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1653
1654         if (err) {
1655                 return -1;
1656         }
1657
1658         return 0;
1659 }
1660
1661 void
1662 ql_get_stats(qla_host_t *ha)
1663 {
1664         q80_get_stats_rsp_t     *stat_rsp;
1665         q80_mac_stats_t         *mstat;
1666         q80_xmt_stats_t         *xstat;
1667         q80_rcv_stats_t         *rstat;
1668         uint32_t                cmd;
1669         int                     i;
1670
1671         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1672         /*
1673          * Get MAC Statistics
1674          */
1675         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1676 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1677
1678         cmd |= ((ha->pci_func & 0x1) << 16);
1679
1680         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1681                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1682                 qla_mac_stats(ha, mstat);
1683         } else {
1684                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1685                         __func__, ha->hw.mbox[0]);
1686         }
1687         /*
1688          * Get RCV Statistics
1689          */
1690         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1691 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1692         cmd |= (ha->hw.rcv_cntxt_id << 16);
1693
1694         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1695                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1696                 qla_rcv_stats(ha, rstat);
1697         } else {
1698                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1699                         __func__, ha->hw.mbox[0]);
1700         }
1701         /*
1702          * Get XMT Statistics
1703          */
1704         for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1705                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1706 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
1707                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1708
1709                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1710                         == 0) {
1711                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1712                         qla_xmt_stats(ha, xstat, i);
1713                 } else {
1714                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1715                                 __func__, ha->hw.mbox[0]);
1716                 }
1717         }
1718         return;
1719 }
1720
1721 static void
1722 qla_get_quick_stats(qla_host_t *ha)
1723 {
1724         q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1725         q80_mac_stats_t         *mstat;
1726         q80_xmt_stats_t         *xstat;
1727         q80_rcv_stats_t         *rstat;
1728         uint32_t                cmd;
1729
1730         stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1731
1732         cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1733 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
1734
1735 //      cmd |= ((ha->pci_func & 0x3) << 16);
1736         cmd |= (0xFFFF << 16);
1737
1738         if (qla_get_hw_stats(ha, cmd,
1739                         sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1740
1741                 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1742                 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1743                 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1744                 qla_mac_stats(ha, mstat);
1745                 qla_rcv_stats(ha, rstat);
1746                 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1747         } else {
1748                 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1749                         __func__, ha->hw.mbox[0]);
1750         }
1751         return;
1752 }
1753
1754 /*
1755  * Name: qla_tx_tso
1756  * Function: Checks if the packet to be transmitted is a candidate for
1757  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1758  *      Ring Structure are plugged in.
1759  */
1760 static int
1761 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1762 {
1763         struct ether_vlan_header *eh;
1764         struct ip *ip = NULL;
1765         struct ip6_hdr *ip6 = NULL;
1766         struct tcphdr *th = NULL;
1767         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1768         uint16_t etype, opcode, offload = 1;
1769         device_t dev;
1770
1771         dev = ha->pci_dev;
1772
1773
1774         eh = mtod(mp, struct ether_vlan_header *);
1775
1776         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1777                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1778                 etype = ntohs(eh->evl_proto);
1779         } else {
1780                 ehdrlen = ETHER_HDR_LEN;
1781                 etype = ntohs(eh->evl_encap_proto);
1782         }
1783
1784         hdrlen = 0;
1785
1786         switch (etype) {
1787                 case ETHERTYPE_IP:
1788
1789                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
1790                                         sizeof(struct tcphdr);
1791
1792                         if (mp->m_len < tcp_opt_off) {
1793                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1794                                 ip = (struct ip *)(hdr + ehdrlen);
1795                         } else {
1796                                 ip = (struct ip *)(mp->m_data + ehdrlen);
1797                         }
1798
1799                         ip_hlen = ip->ip_hl << 2;
1800                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1801
1802                                 
1803                         if ((ip->ip_p != IPPROTO_TCP) ||
1804                                 (ip_hlen != sizeof (struct ip))){
1805                                 /* IP Options are not supported */
1806
1807                                 offload = 0;
1808                         } else
1809                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1810
1811                 break;
1812
1813                 case ETHERTYPE_IPV6:
1814
1815                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1816                                         sizeof (struct tcphdr);
1817
1818                         if (mp->m_len < tcp_opt_off) {
1819                                 m_copydata(mp, 0, tcp_opt_off, hdr);
1820                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1821                         } else {
1822                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1823                         }
1824
1825                         ip_hlen = sizeof(struct ip6_hdr);
1826                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1827
1828                         if (ip6->ip6_nxt != IPPROTO_TCP) {
1829                                 //device_printf(dev, "%s: ipv6\n", __func__);
1830                                 offload = 0;
1831                         } else
1832                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1833                 break;
1834
1835                 default:
1836                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1837                         offload = 0;
1838                 break;
1839         }
1840
1841         if (!offload)
1842                 return (-1);
1843
1844         tcp_hlen = th->th_off << 2;
1845         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1846
1847         if (mp->m_len < hdrlen) {
1848                 if (mp->m_len < tcp_opt_off) {
1849                         if (tcp_hlen > sizeof(struct tcphdr)) {
1850                                 m_copydata(mp, tcp_opt_off,
1851                                         (tcp_hlen - sizeof(struct tcphdr)),
1852                                         &hdr[tcp_opt_off]);
1853                         }
1854                 } else {
1855                         m_copydata(mp, 0, hdrlen, hdr);
1856                 }
1857         }
1858
1859         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1860
1861         tx_cmd->flags_opcode = opcode ;
1862         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1863         tx_cmd->total_hdr_len = hdrlen;
1864
1865         /* Check for Multicast least significant bit of MSB == 1 */
1866         if (eh->evl_dhost[0] & 0x01) {
1867                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1868         }
1869
1870         if (mp->m_len < hdrlen) {
1871                 printf("%d\n", hdrlen);
1872                 return (1);
1873         }
1874
1875         return (0);
1876 }
1877
1878 /*
1879  * Name: qla_tx_chksum
1880  * Function: Checks if the packet to be transmitted is a candidate for
1881  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1882  *      Ring Structure are plugged in.
1883  */
1884 static int
1885 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1886         uint32_t *tcp_hdr_off)
1887 {
1888         struct ether_vlan_header *eh;
1889         struct ip *ip;
1890         struct ip6_hdr *ip6;
1891         uint32_t ehdrlen, ip_hlen;
1892         uint16_t etype, opcode, offload = 1;
1893         device_t dev;
1894         uint8_t buf[sizeof(struct ip6_hdr)];
1895
1896         dev = ha->pci_dev;
1897
1898         *op_code = 0;
1899
1900         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1901                 return (-1);
1902
1903         eh = mtod(mp, struct ether_vlan_header *);
1904
1905         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1906                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1907                 etype = ntohs(eh->evl_proto);
1908         } else {
1909                 ehdrlen = ETHER_HDR_LEN;
1910                 etype = ntohs(eh->evl_encap_proto);
1911         }
1912
1913                 
1914         switch (etype) {
1915                 case ETHERTYPE_IP:
1916                         ip = (struct ip *)(mp->m_data + ehdrlen);
1917
1918                         ip_hlen = sizeof (struct ip);
1919
1920                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1921                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1922                                 ip = (struct ip *)buf;
1923                         }
1924
1925                         if (ip->ip_p == IPPROTO_TCP)
1926                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1927                         else if (ip->ip_p == IPPROTO_UDP)
1928                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1929                         else {
1930                                 //device_printf(dev, "%s: ipv4\n", __func__);
1931                                 offload = 0;
1932                         }
1933                 break;
1934
1935                 case ETHERTYPE_IPV6:
1936                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1937
1938                         ip_hlen = sizeof(struct ip6_hdr);
1939
1940                         if (mp->m_len < (ehdrlen + ip_hlen)) {
1941                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1942                                         buf);
1943                                 ip6 = (struct ip6_hdr *)buf;
1944                         }
1945
1946                         if (ip6->ip6_nxt == IPPROTO_TCP)
1947                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1948                         else if (ip6->ip6_nxt == IPPROTO_UDP)
1949                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1950                         else {
1951                                 //device_printf(dev, "%s: ipv6\n", __func__);
1952                                 offload = 0;
1953                         }
1954                 break;
1955
1956                 default:
1957                         offload = 0;
1958                 break;
1959         }
1960         if (!offload)
1961                 return (-1);
1962
1963         *op_code = opcode;
1964         *tcp_hdr_off = (ip_hlen + ehdrlen);
1965
1966         return (0);
1967 }
1968
1969 #define QLA_TX_MIN_FREE 2
1970 /*
1971  * Name: ql_hw_send
1972  * Function: Transmits a packet. It first checks if the packet is a
1973  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1974  *      offload. If either of these creteria are not met, it is transmitted
1975  *      as a regular ethernet frame.
1976  */
1977 int
1978 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1979         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1980 {
1981         struct ether_vlan_header *eh;
1982         qla_hw_t *hw = &ha->hw;
1983         q80_tx_cmd_t *tx_cmd, tso_cmd;
1984         bus_dma_segment_t *c_seg;
1985         uint32_t num_tx_cmds, hdr_len = 0;
1986         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1987         device_t dev;
1988         int i, ret;
1989         uint8_t *src = NULL, *dst = NULL;
1990         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1991         uint32_t op_code = 0;
1992         uint32_t tcp_hdr_off = 0;
1993
1994         dev = ha->pci_dev;
1995
1996         /*
1997          * Always make sure there is atleast one empty slot in the tx_ring
1998          * tx_ring is considered full when there only one entry available
1999          */
2000         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2001
2002         total_length = mp->m_pkthdr.len;
2003         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2004                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2005                         __func__, total_length);
2006                 return (-1);
2007         }
2008         eh = mtod(mp, struct ether_vlan_header *);
2009
2010         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2011
2012                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2013
2014                 src = frame_hdr;
2015                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2016
2017                 if (!(ret & ~1)) {
2018                         /* find the additional tx_cmd descriptors required */
2019
2020                         if (mp->m_flags & M_VLANTAG)
2021                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2022
2023                         hdr_len = tso_cmd.total_hdr_len;
2024
2025                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2026                         bytes = QL_MIN(bytes, hdr_len);
2027
2028                         num_tx_cmds++;
2029                         hdr_len -= bytes;
2030
2031                         while (hdr_len) {
2032                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2033                                 hdr_len -= bytes;
2034                                 num_tx_cmds++;
2035                         }
2036                         hdr_len = tso_cmd.total_hdr_len;
2037
2038                         if (ret == 0)
2039                                 src = (uint8_t *)eh;
2040                 } else 
2041                         return (EINVAL);
2042         } else {
2043                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2044         }
2045
2046         if (iscsi_pdu)
2047                 ha->hw.iscsi_pkt_count++;
2048
2049         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2050                 qla_hw_tx_done_locked(ha, txr_idx);
2051                 if (hw->tx_cntxt[txr_idx].txr_free <=
2052                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2053                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2054                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2055                                 __func__));
2056                         return (-1);
2057                 }
2058         }
2059
2060         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2061
2062         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2063
2064                 if (nsegs > ha->hw.max_tx_segs)
2065                         ha->hw.max_tx_segs = nsegs;
2066
2067                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2068
2069                 if (op_code) {
2070                         tx_cmd->flags_opcode = op_code;
2071                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2072
2073                 } else {
2074                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2075                 }
2076         } else {
2077                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2078                 ha->tx_tso_frames++;
2079         }
2080
2081         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2082                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2083
2084                 if (iscsi_pdu)
2085                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2086
2087         } else if (mp->m_flags & M_VLANTAG) {
2088
2089                 if (hdr_len) { /* TSO */
2090                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2091                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2092                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2093                 } else
2094                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2095
2096                 ha->hw_vlan_tx_frames++;
2097                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2098
2099                 if (iscsi_pdu) {
2100                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2101                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2102                 }
2103         }
2104
2105
2106         tx_cmd->n_bufs = (uint8_t)nsegs;
2107         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2108         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2109         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2110
2111         c_seg = segs;
2112
2113         while (1) {
2114                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2115
2116                         switch (i) {
2117                         case 0:
2118                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2119                                 tx_cmd->buf1_len = c_seg->ds_len;
2120                                 break;
2121
2122                         case 1:
2123                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2124                                 tx_cmd->buf2_len = c_seg->ds_len;
2125                                 break;
2126
2127                         case 2:
2128                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2129                                 tx_cmd->buf3_len = c_seg->ds_len;
2130                                 break;
2131
2132                         case 3:
2133                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2134                                 tx_cmd->buf4_len = c_seg->ds_len;
2135                                 break;
2136                         }
2137
2138                         c_seg++;
2139                         nsegs--;
2140                 }
2141
2142                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2143                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2144                                 (NUM_TX_DESCRIPTORS - 1);
2145                 tx_cmd_count++;
2146
2147                 if (!nsegs)
2148                         break;
2149                 
2150                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2151                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2152         }
2153
2154         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2155
2156                 /* TSO : Copy the header in the following tx cmd descriptors */
2157
2158                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2159
2160                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2161                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2162
2163                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2164                 bytes = QL_MIN(bytes, hdr_len);
2165
2166                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2167
2168                 if (mp->m_flags & M_VLANTAG) {
2169                         /* first copy the src/dst MAC addresses */
2170                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2171                         dst += (ETHER_ADDR_LEN * 2);
2172                         src += (ETHER_ADDR_LEN * 2);
2173                         
2174                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2175                         dst += 2;
2176                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2177                         dst += 2;
2178
2179                         /* bytes left in src header */
2180                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2181                                         ETHER_VLAN_ENCAP_LEN);
2182
2183                         /* bytes left in TxCmd Entry */
2184                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2185
2186
2187                         bcopy(src, dst, bytes);
2188                         src += bytes;
2189                         hdr_len -= bytes;
2190                 } else {
2191                         bcopy(src, dst, bytes);
2192                         src += bytes;
2193                         hdr_len -= bytes;
2194                 }
2195
2196                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2197                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2198                                         (NUM_TX_DESCRIPTORS - 1);
2199                 tx_cmd_count++;
2200                 
2201                 while (hdr_len) {
2202                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2203                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2204
2205                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2206
2207                         bcopy(src, tx_cmd, bytes);
2208                         src += bytes;
2209                         hdr_len -= bytes;
2210
2211                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2212                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2213                                         (NUM_TX_DESCRIPTORS - 1);
2214                         tx_cmd_count++;
2215                 }
2216         }
2217
2218         hw->tx_cntxt[txr_idx].txr_free =
2219                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2220
2221         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2222                 txr_idx);
2223         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2224
2225         return (0);
2226 }
2227
2228
2229
2230 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2231 static int
2232 qla_config_rss_ind_table(qla_host_t *ha)
2233 {
2234         uint32_t i, count;
2235         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2236
2237
2238         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2239                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2240         }
2241
2242         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2243                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2244
2245                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2246                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2247                 } else {
2248                         count = Q8_CONFIG_IND_TBL_SIZE;
2249                 }
2250
2251                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2252                         rss_ind_tbl))
2253                         return (-1);
2254         }
2255
2256         return (0);
2257 }
2258
2259 /*
2260  * Name: ql_del_hw_if
2261  * Function: Destroys the hardware specific entities corresponding to an
2262  *      Ethernet Interface
2263  */
2264 void
2265 ql_del_hw_if(qla_host_t *ha)
2266 {
2267         uint32_t i;
2268         uint32_t num_msix;
2269
2270         (void)qla_stop_nic_func(ha);
2271
2272         qla_del_rcv_cntxt(ha);
2273
2274         qla_del_xmt_cntxt(ha);
2275
2276         if (ha->hw.flags.init_intr_cnxt) {
2277                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2278
2279                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2280                                 num_msix = Q8_MAX_INTR_VECTORS;
2281                         else
2282                                 num_msix = ha->hw.num_sds_rings - i;
2283                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2284
2285                         i += num_msix;
2286                 }
2287
2288                 ha->hw.flags.init_intr_cnxt = 0;
2289         }
2290
2291         return;
2292 }
2293
2294 void
2295 qla_confirm_9kb_enable(qla_host_t *ha)
2296 {
2297         uint32_t supports_9kb = 0;
2298
2299         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2300
2301         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2302         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2303         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2304
2305         qla_get_nic_partition(ha, &supports_9kb, NULL);
2306
2307         if (!supports_9kb)
2308                 ha->hw.enable_9kb = 0;
2309
2310         return;
2311 }
2312
2313
2314 /*
2315  * Name: ql_init_hw_if
2316  * Function: Creates the hardware specific entities corresponding to an
2317  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2318  *      corresponding to the interface. Enables LRO if allowed.
2319  */
2320 int
2321 ql_init_hw_if(qla_host_t *ha)
2322 {
2323         device_t        dev;
2324         uint32_t        i;
2325         uint8_t         bcast_mac[6];
2326         qla_rdesc_t     *rdesc;
2327         uint32_t        num_msix;
2328
2329         dev = ha->pci_dev;
2330
2331         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2332                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2333                         ha->hw.dma_buf.sds_ring[i].size);
2334         }
2335
2336         for (i = 0; i < ha->hw.num_sds_rings; ) {
2337
2338                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2339                         num_msix = Q8_MAX_INTR_VECTORS;
2340                 else
2341                         num_msix = ha->hw.num_sds_rings - i;
2342
2343                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2344
2345                         if (i > 0) {
2346
2347                                 num_msix = i;
2348
2349                                 for (i = 0; i < num_msix; ) {
2350                                         qla_config_intr_cntxt(ha, i,
2351                                                 Q8_MAX_INTR_VECTORS, 0);
2352                                         i += Q8_MAX_INTR_VECTORS;
2353                                 }
2354                         }
2355                         return (-1);
2356                 }
2357
2358                 i = i + num_msix;
2359         }
2360
2361         ha->hw.flags.init_intr_cnxt = 1;
2362
2363         /*
2364          * Create Receive Context
2365          */
2366         if (qla_init_rcv_cntxt(ha)) {
2367                 return (-1);
2368         }
2369
2370         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2371                 rdesc = &ha->hw.rds[i];
2372                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2373                 rdesc->rx_in = 0;
2374                 /* Update the RDS Producer Indices */
2375                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2376                         rdesc->rx_next);
2377         }
2378
2379
2380         /*
2381          * Create Transmit Context
2382          */
2383         if (qla_init_xmt_cntxt(ha)) {
2384                 qla_del_rcv_cntxt(ha);
2385                 return (-1);
2386         }
2387         ha->hw.max_tx_segs = 0;
2388
2389         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2390                 return(-1);
2391
2392         ha->hw.flags.unicast_mac = 1;
2393
2394         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2395         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2396
2397         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2398                 return (-1);
2399
2400         ha->hw.flags.bcast_mac = 1;
2401
2402         /*
2403          * program any cached multicast addresses
2404          */
2405         if (qla_hw_add_all_mcast(ha))
2406                 return (-1);
2407
2408         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2409                 return (-1);
2410
2411         if (qla_config_rss_ind_table(ha))
2412                 return (-1);
2413
2414         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2415                 return (-1);
2416
2417         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2418                 return (-1);
2419
2420         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2421                 return (-1);
2422
2423         if (qla_init_nic_func(ha))
2424                 return (-1);
2425
2426         if (qla_query_fw_dcbx_caps(ha))
2427                 return (-1);
2428
2429         for (i = 0; i < ha->hw.num_sds_rings; i++)
2430                 QL_ENABLE_INTERRUPTS(ha, i);
2431
2432         return (0);
2433 }
2434
2435 static int
2436 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2437 {
2438         device_t                dev = ha->pci_dev;
2439         q80_rq_map_sds_to_rds_t *map_rings;
2440         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2441         uint32_t                i, err;
2442         qla_hw_t                *hw = &ha->hw;
2443
2444         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2445         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2446
2447         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2448         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2449         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2450
2451         map_rings->cntxt_id = hw->rcv_cntxt_id;
2452         map_rings->num_rings = num_idx;
2453
2454         for (i = 0; i < num_idx; i++) {
2455                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2456                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2457         }
2458
2459         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2460                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2461                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2462                 device_printf(dev, "%s: failed0\n", __func__);
2463                 return (-1);
2464         }
2465
2466         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2467
2468         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2469
2470         if (err) {
2471                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2472                 return (-1);
2473         }
2474
2475         return (0);
2476 }
2477
2478 /*
2479  * Name: qla_init_rcv_cntxt
2480  * Function: Creates the Receive Context.
2481  */
2482 static int
2483 qla_init_rcv_cntxt(qla_host_t *ha)
2484 {
2485         q80_rq_rcv_cntxt_t      *rcntxt;
2486         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2487         q80_stat_desc_t         *sdesc;
2488         int                     i, j;
2489         qla_hw_t                *hw = &ha->hw;
2490         device_t                dev;
2491         uint32_t                err;
2492         uint32_t                rcntxt_sds_rings;
2493         uint32_t                rcntxt_rds_rings;
2494         uint32_t                max_idx;
2495
2496         dev = ha->pci_dev;
2497
2498         /*
2499          * Create Receive Context
2500          */
2501
2502         for (i = 0; i < hw->num_sds_rings; i++) {
2503                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2504
2505                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2506                         sdesc->data[0] = 1ULL;
2507                         sdesc->data[1] = 1ULL;
2508                 }
2509         }
2510
2511         rcntxt_sds_rings = hw->num_sds_rings;
2512         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2513                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2514
2515         rcntxt_rds_rings = hw->num_rds_rings;
2516
2517         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2518                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2519
2520         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2521         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2522
2523         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2524         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2525         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2526
2527         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2528                         Q8_RCV_CNTXT_CAP0_LRO |
2529                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2530                         Q8_RCV_CNTXT_CAP0_RSS |
2531                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2532
2533         if (ha->hw.enable_9kb)
2534                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2535         else
2536                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2537
2538         if (ha->hw.num_rds_rings > 1) {
2539                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2540                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2541         } else
2542                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2543
2544         rcntxt->nsds_rings = rcntxt_sds_rings;
2545
2546         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2547
2548         rcntxt->rcv_vpid = 0;
2549
2550         for (i = 0; i <  rcntxt_sds_rings; i++) {
2551                 rcntxt->sds[i].paddr =
2552                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2553                 rcntxt->sds[i].size =
2554                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2555                 if (ha->msix_count == 2) {
2556                         rcntxt->sds[i].intr_id =
2557                                 qla_host_to_le16(hw->intr_id[0]);
2558                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2559                 } else {
2560                         rcntxt->sds[i].intr_id =
2561                                 qla_host_to_le16(hw->intr_id[i]);
2562                         rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2563                 }
2564         }
2565
2566         for (i = 0; i <  rcntxt_rds_rings; i++) {
2567                 rcntxt->rds[i].paddr_std =
2568                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2569
2570                 if (ha->hw.enable_9kb)
2571                         rcntxt->rds[i].std_bsize =
2572                                 qla_host_to_le64(MJUM9BYTES);
2573                 else
2574                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2575
2576                 rcntxt->rds[i].std_nentries =
2577                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2578         }
2579
2580         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2581                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2582                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2583                 device_printf(dev, "%s: failed0\n", __func__);
2584                 return (-1);
2585         }
2586
2587         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2588
2589         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2590
2591         if (err) {
2592                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2593                 return (-1);
2594         }
2595
2596         for (i = 0; i <  rcntxt_sds_rings; i++) {
2597                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2598         }
2599
2600         for (i = 0; i <  rcntxt_rds_rings; i++) {
2601                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2602         }
2603
2604         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2605
2606         ha->hw.flags.init_rx_cnxt = 1;
2607
2608         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2609
2610                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2611
2612                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2613                                 max_idx = MAX_RCNTXT_SDS_RINGS;
2614                         else
2615                                 max_idx = hw->num_sds_rings - i;
2616
2617                         err = qla_add_rcv_rings(ha, i, max_idx);
2618                         if (err)
2619                                 return -1;
2620
2621                         i += max_idx;
2622                 }
2623         }
2624
2625         if (hw->num_rds_rings > 1) {
2626
2627                 for (i = 0; i < hw->num_rds_rings; ) {
2628
2629                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2630                                 max_idx = MAX_SDS_TO_RDS_MAP;
2631                         else
2632                                 max_idx = hw->num_rds_rings - i;
2633
2634                         err = qla_map_sds_to_rds(ha, i, max_idx);
2635                         if (err)
2636                                 return -1;
2637
2638                         i += max_idx;
2639                 }
2640         }
2641
2642         return (0);
2643 }
2644
2645 static int
2646 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2647 {
2648         device_t                dev = ha->pci_dev;
2649         q80_rq_add_rcv_rings_t  *add_rcv;
2650         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2651         uint32_t                i,j, err;
2652         qla_hw_t                *hw = &ha->hw;
2653
2654         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2655         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2656
2657         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2658         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2659         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2660
2661         add_rcv->nrds_sets_rings = nsds | (1 << 5);
2662         add_rcv->nsds_rings = nsds;
2663         add_rcv->cntxt_id = hw->rcv_cntxt_id;
2664
2665         for (i = 0; i <  nsds; i++) {
2666
2667                 j = i + sds_idx;
2668
2669                 add_rcv->sds[i].paddr =
2670                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2671
2672                 add_rcv->sds[i].size =
2673                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2674
2675                 if (ha->msix_count == 2) {
2676                         add_rcv->sds[i].intr_id =
2677                                 qla_host_to_le16(hw->intr_id[0]);
2678                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2679                 } else {
2680                         add_rcv->sds[i].intr_id =
2681                                 qla_host_to_le16(hw->intr_id[j]);
2682                         add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2683                 }
2684
2685         }
2686         for (i = 0; (i <  nsds); i++) {
2687                 j = i + sds_idx;
2688
2689                 add_rcv->rds[i].paddr_std =
2690                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2691
2692                 if (ha->hw.enable_9kb)
2693                         add_rcv->rds[i].std_bsize =
2694                                 qla_host_to_le64(MJUM9BYTES);
2695                 else
2696                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2697
2698                 add_rcv->rds[i].std_nentries =
2699                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2700         }
2701
2702
2703         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2704                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2705                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2706                 device_printf(dev, "%s: failed0\n", __func__);
2707                 return (-1);
2708         }
2709
2710         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2711
2712         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2713
2714         if (err) {
2715                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2716                 return (-1);
2717         }
2718
2719         for (i = 0; i < nsds; i++) {
2720                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2721         }
2722
2723         for (i = 0; i < nsds; i++) {
2724                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2725         }
2726
2727         return (0);
2728 }
2729
2730 /*
2731  * Name: qla_del_rcv_cntxt
2732  * Function: Destroys the Receive Context.
2733  */
2734 static void
2735 qla_del_rcv_cntxt(qla_host_t *ha)
2736 {
2737         device_t                        dev = ha->pci_dev;
2738         q80_rcv_cntxt_destroy_t         *rcntxt;
2739         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
2740         uint32_t                        err;
2741         uint8_t                         bcast_mac[6];
2742
2743         if (!ha->hw.flags.init_rx_cnxt)
2744                 return;
2745
2746         if (qla_hw_del_all_mcast(ha))
2747                 return;
2748
2749         if (ha->hw.flags.bcast_mac) {
2750
2751                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2752                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2753
2754                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
2755                         return;
2756                 ha->hw.flags.bcast_mac = 0;
2757
2758         }
2759
2760         if (ha->hw.flags.unicast_mac) {
2761                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
2762                         return;
2763                 ha->hw.flags.unicast_mac = 0;
2764         }
2765
2766         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2767         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2768
2769         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2770         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2771         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2772
2773         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2774
2775         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2776                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2777                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2778                 device_printf(dev, "%s: failed0\n", __func__);
2779                 return;
2780         }
2781         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2782
2783         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2784
2785         if (err) {
2786                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2787         }
2788
2789         ha->hw.flags.init_rx_cnxt = 0;
2790         return;
2791 }
2792
2793 /*
2794  * Name: qla_init_xmt_cntxt
2795  * Function: Creates the Transmit Context.
2796  */
2797 static int
2798 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2799 {
2800         device_t                dev;
2801         qla_hw_t                *hw = &ha->hw;
2802         q80_rq_tx_cntxt_t       *tcntxt;
2803         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
2804         uint32_t                err;
2805         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2806
2807         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2808
2809         dev = ha->pci_dev;
2810
2811         /*
2812          * Create Transmit Context
2813          */
2814         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2815         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2816
2817         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2818         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2819         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2820
2821 #ifdef QL_ENABLE_ISCSI_TLV
2822
2823         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2824                                 Q8_TX_CNTXT_CAP0_TC;
2825
2826         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2827                 tcntxt->traffic_class = 1;
2828         }
2829
2830 #else
2831
2832         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2833
2834 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2835
2836         tcntxt->ntx_rings = 1;
2837
2838         tcntxt->tx_ring[0].paddr =
2839                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2840         tcntxt->tx_ring[0].tx_consumer =
2841                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2842         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2843
2844         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2845         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2846
2847
2848         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2849         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2850
2851         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2852                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2853                 ha->hw.mbox,
2854                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2855                 device_printf(dev, "%s: failed0\n", __func__);
2856                 return (-1);
2857         }
2858         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2859
2860         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2861
2862         if (err) {
2863                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2864                 return -1;
2865         }
2866
2867         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2868         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2869
2870         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2871                 return (-1);
2872
2873         return (0);
2874 }
2875
2876
2877 /*
2878  * Name: qla_del_xmt_cntxt
2879  * Function: Destroys the Transmit Context.
2880  */
2881 static int
2882 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2883 {
2884         device_t                        dev = ha->pci_dev;
2885         q80_tx_cntxt_destroy_t          *tcntxt;
2886         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
2887         uint32_t                        err;
2888
2889         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2890         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2891
2892         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2893         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2894         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2895
2896         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2897
2898         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2899                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2900                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2901                 device_printf(dev, "%s: failed0\n", __func__);
2902                 return (-1);
2903         }
2904         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2905
2906         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2907
2908         if (err) {
2909                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2910                 return (-1);
2911         }
2912
2913         return (0);
2914 }
2915 static void
2916 qla_del_xmt_cntxt(qla_host_t *ha)
2917 {
2918         uint32_t i;
2919
2920         if (!ha->hw.flags.init_tx_cnxt)
2921                 return;
2922
2923         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2924                 if (qla_del_xmt_cntxt_i(ha, i))
2925                         break;
2926         }
2927         ha->hw.flags.init_tx_cnxt = 0;
2928 }
2929
2930 static int
2931 qla_init_xmt_cntxt(qla_host_t *ha)
2932 {
2933         uint32_t i, j;
2934
2935         for (i = 0; i < ha->hw.num_tx_rings; i++) {
2936                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2937                         for (j = 0; j < i; j++)
2938                                 qla_del_xmt_cntxt_i(ha, j);
2939                         return (-1);
2940                 }
2941         }
2942         ha->hw.flags.init_tx_cnxt = 1;
2943         return (0);
2944 }
2945
2946 static int
2947 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
2948 {
2949         int i, nmcast;
2950         uint32_t count = 0;
2951         uint8_t *mcast;
2952
2953         nmcast = ha->hw.nmcast;
2954
2955         QL_DPRINT2(ha, (ha->pci_dev,
2956                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
2957
2958         mcast = ha->hw.mac_addr_arr;
2959         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
2960
2961         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2962                 if ((ha->hw.mcast[i].addr[0] != 0) || 
2963                         (ha->hw.mcast[i].addr[1] != 0) ||
2964                         (ha->hw.mcast[i].addr[2] != 0) ||
2965                         (ha->hw.mcast[i].addr[3] != 0) ||
2966                         (ha->hw.mcast[i].addr[4] != 0) ||
2967                         (ha->hw.mcast[i].addr[5] != 0)) {
2968
2969                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
2970                         mcast = mcast + ETHER_ADDR_LEN;
2971                         count++;
2972                         
2973                         if (count == Q8_MAX_MAC_ADDRS) {
2974                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
2975                                         add_mcast, count)) {
2976                                         device_printf(ha->pci_dev,
2977                                                 "%s: failed\n", __func__);
2978                                         return (-1);
2979                                 }
2980
2981                                 count = 0;
2982                                 mcast = ha->hw.mac_addr_arr;
2983                                 memset(mcast, 0,
2984                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
2985                         }
2986
2987                         nmcast--;
2988                 }
2989         }
2990
2991         if (count) {
2992                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
2993                         count)) {
2994                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
2995                         return (-1);
2996                 }
2997         }
2998         QL_DPRINT2(ha, (ha->pci_dev,
2999                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3000
3001         return 0;
3002 }
3003
3004 static int
3005 qla_hw_add_all_mcast(qla_host_t *ha)
3006 {
3007         int ret;
3008
3009         ret = qla_hw_all_mcast(ha, 1);
3010
3011         return (ret);
3012 }
3013
3014 static int
3015 qla_hw_del_all_mcast(qla_host_t *ha)
3016 {
3017         int ret;
3018
3019         ret = qla_hw_all_mcast(ha, 0);
3020
3021         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3022         ha->hw.nmcast = 0;
3023
3024         return (ret);
3025 }
3026
3027 static int
3028 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3029 {
3030         int i;
3031
3032         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3033                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3034                         return (0); /* its been already added */
3035         }
3036         return (-1);
3037 }
3038
3039 static int
3040 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3041 {
3042         int i;
3043
3044         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3045
3046                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3047                         (ha->hw.mcast[i].addr[1] == 0) &&
3048                         (ha->hw.mcast[i].addr[2] == 0) &&
3049                         (ha->hw.mcast[i].addr[3] == 0) &&
3050                         (ha->hw.mcast[i].addr[4] == 0) &&
3051                         (ha->hw.mcast[i].addr[5] == 0)) {
3052
3053                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3054                         ha->hw.nmcast++;        
3055
3056                         mta = mta + ETHER_ADDR_LEN;
3057                         nmcast--;
3058
3059                         if (nmcast == 0)
3060                                 break;
3061                 }
3062
3063         }
3064         return 0;
3065 }
3066
3067 static int
3068 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3069 {
3070         int i;
3071
3072         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3073                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3074
3075                         ha->hw.mcast[i].addr[0] = 0;
3076                         ha->hw.mcast[i].addr[1] = 0;
3077                         ha->hw.mcast[i].addr[2] = 0;
3078                         ha->hw.mcast[i].addr[3] = 0;
3079                         ha->hw.mcast[i].addr[4] = 0;
3080                         ha->hw.mcast[i].addr[5] = 0;
3081
3082                         ha->hw.nmcast--;        
3083
3084                         mta = mta + ETHER_ADDR_LEN;
3085                         nmcast--;
3086
3087                         if (nmcast == 0)
3088                                 break;
3089                 }
3090         }
3091         return 0;
3092 }
3093
3094 /*
3095  * Name: ql_hw_set_multi
3096  * Function: Sets the Multicast Addresses provided by the host O.S into the
3097  *      hardware (for the given interface)
3098  */
3099 int
3100 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3101         uint32_t add_mac)
3102 {
3103         uint8_t *mta = mcast_addr;
3104         int i;
3105         int ret = 0;
3106         uint32_t count = 0;
3107         uint8_t *mcast;
3108
3109         mcast = ha->hw.mac_addr_arr;
3110         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3111
3112         for (i = 0; i < mcnt; i++) {
3113                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3114                         if (add_mac) {
3115                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3116                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3117                                         mcast = mcast + ETHER_ADDR_LEN;
3118                                         count++;
3119                                 }
3120                         } else {
3121                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3122                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3123                                         mcast = mcast + ETHER_ADDR_LEN;
3124                                         count++;
3125                                 }
3126                         }
3127                 }
3128                 if (count == Q8_MAX_MAC_ADDRS) {
3129                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3130                                 add_mac, count)) {
3131                                 device_printf(ha->pci_dev, "%s: failed\n",
3132                                         __func__);
3133                                 return (-1);
3134                         }
3135
3136                         if (add_mac) {
3137                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3138                                         count);
3139                         } else {
3140                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3141                                         count);
3142                         }
3143
3144                         count = 0;
3145                         mcast = ha->hw.mac_addr_arr;
3146                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3147                 }
3148                         
3149                 mta += Q8_MAC_ADDR_LEN;
3150         }
3151
3152         if (count) {
3153                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3154                         count)) {
3155                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3156                         return (-1);
3157                 }
3158                 if (add_mac) {
3159                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3160                 } else {
3161                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3162                 }
3163         }
3164
3165         return (ret);
3166 }
3167
3168 /*
3169  * Name: qla_hw_tx_done_locked
3170  * Function: Handle Transmit Completions
3171  */
3172 static void
3173 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3174 {
3175         qla_tx_buf_t *txb;
3176         qla_hw_t *hw = &ha->hw;
3177         uint32_t comp_idx, comp_count = 0;
3178         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3179
3180         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3181
3182         /* retrieve index of last entry in tx ring completed */
3183         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3184
3185         while (comp_idx != hw_tx_cntxt->txr_comp) {
3186
3187                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3188
3189                 hw_tx_cntxt->txr_comp++;
3190                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3191                         hw_tx_cntxt->txr_comp = 0;
3192
3193                 comp_count++;
3194
3195                 if (txb->m_head) {
3196                         ha->ifp->if_opackets++;
3197
3198                         bus_dmamap_sync(ha->tx_tag, txb->map,
3199                                 BUS_DMASYNC_POSTWRITE);
3200                         bus_dmamap_unload(ha->tx_tag, txb->map);
3201                         m_freem(txb->m_head);
3202
3203                         txb->m_head = NULL;
3204                 }
3205         }
3206
3207         hw_tx_cntxt->txr_free += comp_count;
3208         return;
3209 }
3210
3211 /*
3212  * Name: ql_hw_tx_done
3213  * Function: Handle Transmit Completions
3214  */
3215 void
3216 ql_hw_tx_done(qla_host_t *ha)
3217 {
3218         int i;
3219         uint32_t flag = 0;
3220
3221         if (!mtx_trylock(&ha->tx_lock)) {
3222                 QL_DPRINT8(ha, (ha->pci_dev,
3223                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
3224                 return;
3225         }
3226         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3227                 qla_hw_tx_done_locked(ha, i);
3228                 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
3229                         flag = 1;
3230         }
3231
3232         if (!flag)
3233                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3234
3235         QLA_TX_UNLOCK(ha);
3236         return;
3237 }
3238
3239 void
3240 ql_update_link_state(qla_host_t *ha)
3241 {
3242         uint32_t link_state;
3243         uint32_t prev_link_state;
3244
3245         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3246                 ha->hw.link_up = 0;
3247                 return;
3248         }
3249         link_state = READ_REG32(ha, Q8_LINK_STATE);
3250
3251         prev_link_state =  ha->hw.link_up;
3252
3253         if (ha->pci_func == 0) 
3254                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3255         else
3256                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3257
3258         if (prev_link_state !=  ha->hw.link_up) {
3259                 if (ha->hw.link_up) {
3260                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3261                 } else {
3262                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3263                 }
3264         }
3265         return;
3266 }
3267
3268 void
3269 ql_hw_stop_rcv(qla_host_t *ha)
3270 {
3271         int i, done, count = 100;
3272
3273         ha->flags.stop_rcv = 1;
3274
3275         while (count) {
3276                 done = 1;
3277                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3278                         if (ha->hw.sds[i].rcv_active)
3279                                 done = 0;
3280                 }
3281                 if (done)
3282                         break;
3283                 else 
3284                         qla_mdelay(__func__, 10);
3285                 count--;
3286         }
3287         if (!count)
3288                 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3289
3290         return;
3291 }
3292
3293 int
3294 ql_hw_check_health(qla_host_t *ha)
3295 {
3296         uint32_t val;
3297
3298         ha->hw.health_count++;
3299
3300         if (ha->hw.health_count < 1000)
3301                 return 0;
3302
3303         ha->hw.health_count = 0;
3304
3305         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3306
3307         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3308                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3309                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3310                         __func__, val);
3311                 return -1;
3312         }
3313
3314         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3315
3316         if ((val != ha->hw.hbeat_value) &&
3317                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3318                 ha->hw.hbeat_value = val;
3319                 return 0;
3320         }
3321         device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3322                 __func__, val);
3323
3324         return -1;
3325 }
3326
3327 static int
3328 qla_init_nic_func(qla_host_t *ha)
3329 {
3330         device_t                dev;
3331         q80_init_nic_func_t     *init_nic;
3332         q80_init_nic_func_rsp_t *init_nic_rsp;
3333         uint32_t                err;
3334
3335         dev = ha->pci_dev;
3336
3337         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3338         bzero(init_nic, sizeof(q80_init_nic_func_t));
3339
3340         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3341         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3342         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3343
3344         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3345         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3346         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3347
3348 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3349         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3350                 (sizeof (q80_init_nic_func_t) >> 2),
3351                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3352                 device_printf(dev, "%s: failed\n", __func__);
3353                 return -1;
3354         }
3355
3356         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3357 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3358
3359         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3360
3361         if (err) {
3362                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3363         }
3364
3365         return 0;
3366 }
3367
3368 static int
3369 qla_stop_nic_func(qla_host_t *ha)
3370 {
3371         device_t                dev;
3372         q80_stop_nic_func_t     *stop_nic;
3373         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3374         uint32_t                err;
3375
3376         dev = ha->pci_dev;
3377
3378         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3379         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3380
3381         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3382         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3383         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3384
3385         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3386         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3387
3388 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3389         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3390                 (sizeof (q80_stop_nic_func_t) >> 2),
3391                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3392                 device_printf(dev, "%s: failed\n", __func__);
3393                 return -1;
3394         }
3395
3396         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3397 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3398
3399         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3400
3401         if (err) {
3402                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3403         }
3404
3405         return 0;
3406 }
3407
3408 static int
3409 qla_query_fw_dcbx_caps(qla_host_t *ha)
3410 {
3411         device_t                        dev;
3412         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3413         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3414         uint32_t                        err;
3415
3416         dev = ha->pci_dev;
3417
3418         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3419         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3420
3421         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3422         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3423         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3424
3425         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3426         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3427                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3428                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3429                 device_printf(dev, "%s: failed\n", __func__);
3430                 return -1;
3431         }
3432
3433         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3434         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3435                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3436
3437         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3438
3439         if (err) {
3440                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3441         }
3442
3443         return 0;
3444 }
3445
3446 static int
3447 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3448         uint32_t aen_mb3, uint32_t aen_mb4)
3449 {
3450         device_t                dev;
3451         q80_idc_ack_t           *idc_ack;
3452         q80_idc_ack_rsp_t       *idc_ack_rsp;
3453         uint32_t                err;
3454         int                     count = 300;
3455
3456         dev = ha->pci_dev;
3457
3458         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3459         bzero(idc_ack, sizeof(q80_idc_ack_t));
3460
3461         idc_ack->opcode = Q8_MBX_IDC_ACK;
3462         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3463         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3464
3465         idc_ack->aen_mb1 = aen_mb1;
3466         idc_ack->aen_mb2 = aen_mb2;
3467         idc_ack->aen_mb3 = aen_mb3;
3468         idc_ack->aen_mb4 = aen_mb4;
3469
3470         ha->hw.imd_compl= 0;
3471
3472         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3473                 (sizeof (q80_idc_ack_t) >> 2),
3474                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3475                 device_printf(dev, "%s: failed\n", __func__);
3476                 return -1;
3477         }
3478
3479         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3480
3481         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3482
3483         if (err) {
3484                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3485                 return(-1);
3486         }
3487
3488         while (count && !ha->hw.imd_compl) {
3489                 qla_mdelay(__func__, 100);
3490                 count--;
3491         }
3492
3493         if (!count)
3494                 return -1;
3495         else
3496                 device_printf(dev, "%s: count %d\n", __func__, count);
3497
3498         return (0);
3499 }
3500
3501 static int
3502 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3503 {
3504         device_t                dev;
3505         q80_set_port_cfg_t      *pcfg;
3506         q80_set_port_cfg_rsp_t  *pfg_rsp;
3507         uint32_t                err;
3508         int                     count = 300;
3509
3510         dev = ha->pci_dev;
3511
3512         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3513         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3514
3515         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3516         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3517         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3518
3519         pcfg->cfg_bits = cfg_bits;
3520
3521         device_printf(dev, "%s: cfg_bits"
3522                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3523                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3524                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3525                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3526                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3527
3528         ha->hw.imd_compl= 0;
3529
3530         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3531                 (sizeof (q80_set_port_cfg_t) >> 2),
3532                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3533                 device_printf(dev, "%s: failed\n", __func__);
3534                 return -1;
3535         }
3536
3537         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3538
3539         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3540
3541         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3542                 while (count && !ha->hw.imd_compl) {
3543                         qla_mdelay(__func__, 100);
3544                         count--;
3545                 }
3546                 if (count) {
3547                         device_printf(dev, "%s: count %d\n", __func__, count);
3548
3549                         err = 0;
3550                 }
3551         }
3552
3553         if (err) {
3554                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3555                 return(-1);
3556         }
3557
3558         return (0);
3559 }
3560
3561
3562 static int
3563 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3564 {
3565         uint32_t                        err;
3566         device_t                        dev = ha->pci_dev;
3567         q80_config_md_templ_size_t      *md_size;
3568         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3569
3570 #ifndef QL_LDFLASH_FW
3571
3572         ql_minidump_template_hdr_t *hdr;
3573
3574         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3575         *size = hdr->size_of_template;
3576         return (0);
3577
3578 #endif /* #ifdef QL_LDFLASH_FW */
3579
3580         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3581         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3582
3583         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3584         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3585         md_size->count_version |= Q8_MBX_CMD_VERSION;
3586
3587         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3588                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3589                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3590
3591                 device_printf(dev, "%s: failed\n", __func__);
3592
3593                 return (-1);
3594         }
3595
3596         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3597
3598         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3599
3600         if (err) {
3601                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3602                 return(-1);
3603         }
3604
3605         *size = md_size_rsp->templ_size;
3606
3607         return (0);
3608 }
3609
3610 static int
3611 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3612 {
3613         device_t                dev;
3614         q80_get_port_cfg_t      *pcfg;
3615         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3616         uint32_t                err;
3617
3618         dev = ha->pci_dev;
3619
3620         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3621         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3622
3623         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3624         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3625         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3626
3627         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3628                 (sizeof (q80_get_port_cfg_t) >> 2),
3629                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3630                 device_printf(dev, "%s: failed\n", __func__);
3631                 return -1;
3632         }
3633
3634         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3635
3636         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3637
3638         if (err) {
3639                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3640                 return(-1);
3641         }
3642
3643         device_printf(dev, "%s: [cfg_bits, port type]"
3644                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3645                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3646                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3647                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3648                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3649                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3650                 );
3651
3652         *cfg_bits = pcfg_rsp->cfg_bits;
3653
3654         return (0);
3655 }
3656
3657 int
3658 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3659 {
3660         struct ether_vlan_header        *eh;
3661         uint16_t                        etype;
3662         struct ip                       *ip = NULL;
3663         struct ip6_hdr                  *ip6 = NULL;
3664         struct tcphdr                   *th = NULL;
3665         uint32_t                        hdrlen;
3666         uint32_t                        offset;
3667         uint8_t                         buf[sizeof(struct ip6_hdr)];
3668
3669         eh = mtod(mp, struct ether_vlan_header *);
3670
3671         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3672                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3673                 etype = ntohs(eh->evl_proto);
3674         } else {
3675                 hdrlen = ETHER_HDR_LEN;
3676                 etype = ntohs(eh->evl_encap_proto);
3677         }
3678
3679         if (etype == ETHERTYPE_IP) {
3680
3681                 offset = (hdrlen + sizeof (struct ip));
3682
3683                 if (mp->m_len >= offset) {
3684                         ip = (struct ip *)(mp->m_data + hdrlen);
3685                 } else {
3686                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3687                         ip = (struct ip *)buf;
3688                 }
3689
3690                 if (ip->ip_p == IPPROTO_TCP) {
3691
3692                         hdrlen += ip->ip_hl << 2;
3693                         offset = hdrlen + 4;
3694         
3695                         if (mp->m_len >= offset) {
3696                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3697                         } else {
3698                                 m_copydata(mp, hdrlen, 4, buf);
3699                                 th = (struct tcphdr *)buf;
3700                         }
3701                 }
3702
3703         } else if (etype == ETHERTYPE_IPV6) {
3704
3705                 offset = (hdrlen + sizeof (struct ip6_hdr));
3706
3707                 if (mp->m_len >= offset) {
3708                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3709                 } else {
3710                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3711                         ip6 = (struct ip6_hdr *)buf;
3712                 }
3713
3714                 if (ip6->ip6_nxt == IPPROTO_TCP) {
3715
3716                         hdrlen += sizeof(struct ip6_hdr);
3717                         offset = hdrlen + 4;
3718
3719                         if (mp->m_len >= offset) {
3720                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3721                         } else {
3722                                 m_copydata(mp, hdrlen, 4, buf);
3723                                 th = (struct tcphdr *)buf;
3724                         }
3725                 }
3726         }
3727
3728         if (th != NULL) {
3729                 if ((th->th_sport == htons(3260)) ||
3730                         (th->th_dport == htons(3260)))
3731                         return 0;
3732         }
3733         return (-1);
3734 }
3735
3736 void
3737 qla_hw_async_event(qla_host_t *ha)
3738 {
3739         switch (ha->hw.aen_mb0) {
3740         case 0x8101:
3741                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3742                         ha->hw.aen_mb3, ha->hw.aen_mb4);
3743
3744                 break;
3745
3746         default:
3747                 break;
3748         }
3749
3750         return;
3751 }
3752
3753 #ifdef QL_LDFLASH_FW
3754 static int
3755 ql_get_minidump_template(qla_host_t *ha)
3756 {
3757         uint32_t                        err;
3758         device_t                        dev = ha->pci_dev;
3759         q80_config_md_templ_cmd_t       *md_templ;
3760         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
3761
3762         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3763         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3764
3765         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3766         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3767         md_templ->count_version |= Q8_MBX_CMD_VERSION;
3768
3769         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3770         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3771
3772         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3773                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3774                  ha->hw.mbox,
3775                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3776
3777                 device_printf(dev, "%s: failed\n", __func__);
3778
3779                 return (-1);
3780         }
3781
3782         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3783
3784         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3785
3786         if (err) {
3787                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3788                 return (-1);
3789         }
3790
3791         return (0);
3792
3793 }
3794 #endif /* #ifdef QL_LDFLASH_FW */
3795
3796 /*
3797  * Minidump related functionality 
3798  */
3799
3800 static int ql_parse_template(qla_host_t *ha);
3801
3802 static uint32_t ql_rdcrb(qla_host_t *ha,
3803                         ql_minidump_entry_rdcrb_t *crb_entry,
3804                         uint32_t * data_buff);
3805
3806 static uint32_t ql_pollrd(qla_host_t *ha,
3807                         ql_minidump_entry_pollrd_t *entry,
3808                         uint32_t * data_buff);
3809
3810 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3811                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3812                         uint32_t *data_buff);
3813
3814 static uint32_t ql_L2Cache(qla_host_t *ha,
3815                         ql_minidump_entry_cache_t *cacheEntry,
3816                         uint32_t * data_buff);
3817
3818 static uint32_t ql_L1Cache(qla_host_t *ha,
3819                         ql_minidump_entry_cache_t *cacheEntry,
3820                         uint32_t *data_buff);
3821
3822 static uint32_t ql_rdocm(qla_host_t *ha,
3823                         ql_minidump_entry_rdocm_t *ocmEntry,
3824                         uint32_t *data_buff);
3825
3826 static uint32_t ql_rdmem(qla_host_t *ha,
3827                         ql_minidump_entry_rdmem_t *mem_entry,
3828                         uint32_t *data_buff);
3829
3830 static uint32_t ql_rdrom(qla_host_t *ha,
3831                         ql_minidump_entry_rdrom_t *romEntry,
3832                         uint32_t *data_buff);
3833
3834 static uint32_t ql_rdmux(qla_host_t *ha,
3835                         ql_minidump_entry_mux_t *muxEntry,
3836                         uint32_t *data_buff);
3837
3838 static uint32_t ql_rdmux2(qla_host_t *ha,
3839                         ql_minidump_entry_mux2_t *muxEntry,
3840                         uint32_t *data_buff);
3841
3842 static uint32_t ql_rdqueue(qla_host_t *ha,
3843                         ql_minidump_entry_queue_t *queueEntry,
3844                         uint32_t *data_buff);
3845
3846 static uint32_t ql_cntrl(qla_host_t *ha,
3847                         ql_minidump_template_hdr_t *template_hdr,
3848                         ql_minidump_entry_cntrl_t *crbEntry);
3849
3850
3851 static uint32_t
3852 ql_minidump_size(qla_host_t *ha)
3853 {
3854         uint32_t i, k;
3855         uint32_t size = 0;
3856         ql_minidump_template_hdr_t *hdr;
3857
3858         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3859
3860         i = 0x2;
3861
3862         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3863                 if (i & ha->hw.mdump_capture_mask)
3864                         size += hdr->capture_size_array[k];
3865                 i = i << 1;
3866         }
3867         return (size);
3868 }
3869
3870 static void
3871 ql_free_minidump_buffer(qla_host_t *ha)
3872 {
3873         if (ha->hw.mdump_buffer != NULL) {
3874                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3875                 ha->hw.mdump_buffer = NULL;
3876                 ha->hw.mdump_buffer_size = 0;
3877         }
3878         return;
3879 }
3880
3881 static int
3882 ql_alloc_minidump_buffer(qla_host_t *ha)
3883 {
3884         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3885
3886         if (!ha->hw.mdump_buffer_size)
3887                 return (-1);
3888
3889         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3890                                         M_NOWAIT);
3891
3892         if (ha->hw.mdump_buffer == NULL)
3893                 return (-1);
3894
3895         return (0);
3896 }
3897
3898 static void
3899 ql_free_minidump_template_buffer(qla_host_t *ha)
3900 {
3901         if (ha->hw.mdump_template != NULL) {
3902                 free(ha->hw.mdump_template, M_QLA83XXBUF);
3903                 ha->hw.mdump_template = NULL;
3904                 ha->hw.mdump_template_size = 0;
3905         }
3906         return;
3907 }
3908
3909 static int
3910 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3911 {
3912         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3913
3914         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3915                                         M_QLA83XXBUF, M_NOWAIT);
3916
3917         if (ha->hw.mdump_template == NULL)
3918                 return (-1);
3919
3920         return (0);
3921 }
3922
3923 static int
3924 ql_alloc_minidump_buffers(qla_host_t *ha)
3925 {
3926         int ret;
3927
3928         ret = ql_alloc_minidump_template_buffer(ha);
3929
3930         if (ret)
3931                 return (ret);
3932
3933         ret = ql_alloc_minidump_buffer(ha);
3934
3935         if (ret)
3936                 ql_free_minidump_template_buffer(ha);
3937
3938         return (ret);
3939 }
3940
3941
3942 static uint32_t
3943 ql_validate_minidump_checksum(qla_host_t *ha)
3944 {
3945         uint64_t sum = 0;
3946         int count;
3947         uint32_t *template_buff;
3948
3949         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3950         template_buff = ha->hw.dma_buf.minidump.dma_b;
3951
3952         while (count-- > 0) {
3953                 sum += *template_buff++;
3954         }
3955
3956         while (sum >> 32) {
3957                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3958         }
3959
3960         return (~sum);
3961 }
3962
3963 int
3964 ql_minidump_init(qla_host_t *ha)
3965 {
3966         int             ret = 0;
3967         uint32_t        template_size = 0;
3968         device_t        dev = ha->pci_dev;
3969
3970         /*
3971          * Get Minidump Template Size
3972          */
3973         ret = qla_get_minidump_tmplt_size(ha, &template_size);
3974
3975         if (ret || (template_size == 0)) {
3976                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3977                         template_size);
3978                 return (-1);
3979         }
3980
3981         /*
3982          * Allocate Memory for Minidump Template
3983          */
3984
3985         ha->hw.dma_buf.minidump.alignment = 8;
3986         ha->hw.dma_buf.minidump.size = template_size;
3987
3988 #ifdef QL_LDFLASH_FW
3989         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3990
3991                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3992
3993                 return (-1);
3994         }
3995         ha->hw.dma_buf.flags.minidump = 1;
3996
3997         /*
3998          * Retrieve Minidump Template
3999          */
4000         ret = ql_get_minidump_template(ha);
4001 #else
4002         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4003
4004 #endif /* #ifdef QL_LDFLASH_FW */
4005
4006         if (ret == 0) {
4007
4008                 ret = ql_validate_minidump_checksum(ha);
4009
4010                 if (ret == 0) {
4011
4012                         ret = ql_alloc_minidump_buffers(ha);
4013
4014                         if (ret == 0)
4015                 ha->hw.mdump_init = 1;
4016                         else
4017                                 device_printf(dev,
4018                                         "%s: ql_alloc_minidump_buffers"
4019                                         " failed\n", __func__);
4020                 } else {
4021                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4022                                 " failed\n", __func__);
4023                 }
4024         } else {
4025                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4026                          __func__);
4027         }
4028
4029         if (ret)
4030                 ql_minidump_free(ha);
4031
4032         return (ret);
4033 }
4034
4035 static void
4036 ql_minidump_free(qla_host_t *ha)
4037 {
4038         ha->hw.mdump_init = 0;
4039         if (ha->hw.dma_buf.flags.minidump) {
4040                 ha->hw.dma_buf.flags.minidump = 0;
4041                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4042         }
4043
4044         ql_free_minidump_template_buffer(ha);
4045         ql_free_minidump_buffer(ha);
4046
4047         return;
4048 }
4049
4050 void
4051 ql_minidump(qla_host_t *ha)
4052 {
4053         if (!ha->hw.mdump_init)
4054                 return;
4055
4056         if (ha->hw.mdump_done)
4057                 return;
4058
4059                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4060
4061         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4062         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4063
4064         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4065                 ha->hw.mdump_template_size);
4066
4067         ql_parse_template(ha);
4068  
4069         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4070
4071         ha->hw.mdump_done = 1;
4072
4073         return;
4074 }
4075
4076
4077 /*
4078  * helper routines
4079  */
4080 static void 
4081 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4082 {
4083         if (esize != entry->hdr.entry_capture_size) {
4084                 entry->hdr.entry_capture_size = esize;
4085                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4086         }
4087         return;
4088 }
4089
4090
4091 static int 
4092 ql_parse_template(qla_host_t *ha)
4093 {
4094         uint32_t num_of_entries, buff_level, e_cnt, esize;
4095         uint32_t end_cnt, rv = 0;
4096         char *dump_buff, *dbuff;
4097         int sane_start = 0, sane_end = 0;
4098         ql_minidump_template_hdr_t *template_hdr;
4099         ql_minidump_entry_t *entry;
4100         uint32_t capture_mask; 
4101         uint32_t dump_size; 
4102
4103         /* Setup parameters */
4104         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4105
4106         if (template_hdr->entry_type == TLHDR)
4107                 sane_start = 1;
4108         
4109         dump_buff = (char *) ha->hw.mdump_buffer;
4110
4111         num_of_entries = template_hdr->num_of_entries;
4112
4113         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4114                         + template_hdr->first_entry_offset );
4115
4116         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4117                 template_hdr->ocm_window_array[ha->pci_func];
4118         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4119
4120         capture_mask = ha->hw.mdump_capture_mask;
4121         dump_size = ha->hw.mdump_buffer_size;
4122
4123         template_hdr->driver_capture_mask = capture_mask;
4124
4125         QL_DPRINT80(ha, (ha->pci_dev,
4126                 "%s: sane_start = %d num_of_entries = %d "
4127                 "capture_mask = 0x%x dump_size = %d \n", 
4128                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4129
4130         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4131
4132                 /*
4133                  * If the capture_mask of the entry does not match capture mask
4134                  * skip the entry after marking the driver_flags indicator.
4135                  */
4136                 
4137                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4138
4139                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4140                         entry = (ql_minidump_entry_t *) ((char *) entry
4141                                         + entry->hdr.entry_size);
4142                         continue;
4143                 }
4144
4145                 /*
4146                  * This is ONLY needed in implementations where
4147                  * the capture buffer allocated is too small to capture
4148                  * all of the required entries for a given capture mask.
4149                  * We need to empty the buffer contents to a file
4150                  * if possible, before processing the next entry
4151                  * If the buff_full_flag is set, no further capture will happen
4152                  * and all remaining non-control entries will be skipped.
4153                  */
4154                 if (entry->hdr.entry_capture_size != 0) {
4155                         if ((buff_level + entry->hdr.entry_capture_size) >
4156                                 dump_size) {
4157                                 /*  Try to recover by emptying buffer to file */
4158                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4159                                 entry = (ql_minidump_entry_t *) ((char *) entry
4160                                                 + entry->hdr.entry_size);
4161                                 continue;
4162                         }
4163                 }
4164
4165                 /*
4166                  * Decode the entry type and process it accordingly
4167                  */
4168
4169                 switch (entry->hdr.entry_type) {
4170                 case RDNOP:
4171                         break;
4172
4173                 case RDEND:
4174                         if (sane_end == 0) {
4175                                 end_cnt = e_cnt;
4176                         }
4177                         sane_end++;
4178                         break;
4179
4180                 case RDCRB:
4181                         dbuff = dump_buff + buff_level;
4182                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4183                         ql_entry_err_chk(entry, esize);
4184                         buff_level += esize;
4185                         break;
4186
4187                 case POLLRD:
4188                         dbuff = dump_buff + buff_level;
4189                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4190                         ql_entry_err_chk(entry, esize);
4191                         buff_level += esize;
4192                         break;
4193
4194                 case POLLRDMWR:
4195                         dbuff = dump_buff + buff_level;
4196                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4197                                         (void *)dbuff);
4198                         ql_entry_err_chk(entry, esize);
4199                         buff_level += esize;
4200                         break;
4201
4202                 case L2ITG:
4203                 case L2DTG:
4204                 case L2DAT:
4205                 case L2INS:
4206                         dbuff = dump_buff + buff_level;
4207                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4208                         if (esize == -1) {
4209                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4210                         } else {
4211                                 ql_entry_err_chk(entry, esize);
4212                                 buff_level += esize;
4213                         }
4214                         break;
4215
4216                 case L1DAT:
4217                 case L1INS:
4218                         dbuff = dump_buff + buff_level;
4219                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4220                         ql_entry_err_chk(entry, esize);
4221                         buff_level += esize;
4222                         break;
4223
4224                 case RDOCM:
4225                         dbuff = dump_buff + buff_level;
4226                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4227                         ql_entry_err_chk(entry, esize);
4228                         buff_level += esize;
4229                         break;
4230
4231                 case RDMEM:
4232                         dbuff = dump_buff + buff_level;
4233                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4234                         ql_entry_err_chk(entry, esize);
4235                         buff_level += esize;
4236                         break;
4237
4238                 case BOARD:
4239                 case RDROM:
4240                         dbuff = dump_buff + buff_level;
4241                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4242                         ql_entry_err_chk(entry, esize);
4243                         buff_level += esize;
4244                         break;
4245
4246                 case RDMUX:
4247                         dbuff = dump_buff + buff_level;
4248                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4249                         ql_entry_err_chk(entry, esize);
4250                         buff_level += esize;
4251                         break;
4252
4253                 case RDMUX2:
4254                         dbuff = dump_buff + buff_level;
4255                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4256                         ql_entry_err_chk(entry, esize);
4257                         buff_level += esize;
4258                         break;
4259
4260                 case QUEUE:
4261                         dbuff = dump_buff + buff_level;
4262                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4263                         ql_entry_err_chk(entry, esize);
4264                         buff_level += esize;
4265                         break;
4266
4267                 case CNTRL:
4268                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4269                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4270                         }
4271                         break;
4272                 default:
4273                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4274                         break;
4275                 }
4276                 /*  next entry in the template */
4277                 entry = (ql_minidump_entry_t *) ((char *) entry
4278                                                 + entry->hdr.entry_size);
4279         }
4280
4281         if (!sane_start || (sane_end > 1)) {
4282                 device_printf(ha->pci_dev,
4283                         "\n%s: Template configuration error. Check Template\n",
4284                         __func__);
4285         }
4286         
4287         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4288                 __func__, template_hdr->num_of_entries));
4289
4290         return 0;
4291 }
4292
4293 /*
4294  * Read CRB operation.
4295  */
4296 static uint32_t
4297 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4298         uint32_t * data_buff)
4299 {
4300         int loop_cnt;
4301         int ret;
4302         uint32_t op_count, addr, stride, value = 0;
4303
4304         addr = crb_entry->addr;
4305         op_count = crb_entry->op_count;
4306         stride = crb_entry->addr_stride;
4307
4308         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4309
4310                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4311
4312                 if (ret)
4313                         return (0);
4314
4315                 *data_buff++ = addr;
4316                 *data_buff++ = value;
4317                 addr = addr + stride;
4318         }
4319
4320         /*
4321          * for testing purpose we return amount of data written
4322          */
4323         return (op_count * (2 * sizeof(uint32_t)));
4324 }
4325
4326 /*
4327  * Handle L2 Cache.
4328  */
4329
4330 static uint32_t 
4331 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4332         uint32_t * data_buff)
4333 {
4334         int i, k;
4335         int loop_cnt;
4336         int ret;
4337
4338         uint32_t read_value;
4339         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4340         uint32_t tag_value, read_cnt;
4341         volatile uint8_t cntl_value_r;
4342         long timeout;
4343         uint32_t data;
4344
4345         loop_cnt = cacheEntry->op_count;
4346
4347         read_addr = cacheEntry->read_addr;
4348         cntrl_addr = cacheEntry->control_addr;
4349         cntl_value_w = (uint32_t) cacheEntry->write_value;
4350
4351         tag_reg_addr = cacheEntry->tag_reg_addr;
4352
4353         tag_value = cacheEntry->init_tag_value;
4354         read_cnt = cacheEntry->read_addr_cnt;
4355
4356         for (i = 0; i < loop_cnt; i++) {
4357
4358                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4359                 if (ret)
4360                         return (0);
4361
4362                 if (cacheEntry->write_value != 0) { 
4363
4364                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4365                                         &cntl_value_w, 0);
4366                         if (ret)
4367                                 return (0);
4368                 }
4369
4370                 if (cacheEntry->poll_mask != 0) { 
4371
4372                         timeout = cacheEntry->poll_wait;
4373
4374                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4375                         if (ret)
4376                                 return (0);
4377
4378                         cntl_value_r = (uint8_t)data;
4379
4380                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4381
4382                                 if (timeout) {
4383                                         qla_mdelay(__func__, 1);
4384                                         timeout--;
4385                                 } else
4386                                         break;
4387
4388                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4389                                                 &data, 1);
4390                                 if (ret)
4391                                         return (0);
4392
4393                                 cntl_value_r = (uint8_t)data;
4394                         }
4395                         if (!timeout) {
4396                                 /* Report timeout error. 
4397                                  * core dump capture failed
4398                                  * Skip remaining entries.
4399                                  * Write buffer out to file
4400                                  * Use driver specific fields in template header
4401                                  * to report this error.
4402                                  */
4403                                 return (-1);
4404                         }
4405                 }
4406
4407                 addr = read_addr;
4408                 for (k = 0; k < read_cnt; k++) {
4409
4410                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4411                         if (ret)
4412                                 return (0);
4413
4414                         *data_buff++ = read_value;
4415                         addr += cacheEntry->read_addr_stride;
4416                 }
4417
4418                 tag_value += cacheEntry->tag_value_stride;
4419         }
4420
4421         return (read_cnt * loop_cnt * sizeof(uint32_t));
4422 }
4423
4424 /*
4425  * Handle L1 Cache.
4426  */
4427
4428 static uint32_t 
4429 ql_L1Cache(qla_host_t *ha,
4430         ql_minidump_entry_cache_t *cacheEntry,
4431         uint32_t *data_buff)
4432 {
4433         int ret;
4434         int i, k;
4435         int loop_cnt;
4436
4437         uint32_t read_value;
4438         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4439         uint32_t tag_value, read_cnt;
4440         uint32_t cntl_value_w;
4441
4442         loop_cnt = cacheEntry->op_count;
4443
4444         read_addr = cacheEntry->read_addr;
4445         cntrl_addr = cacheEntry->control_addr;
4446         cntl_value_w = (uint32_t) cacheEntry->write_value;
4447
4448         tag_reg_addr = cacheEntry->tag_reg_addr;
4449
4450         tag_value = cacheEntry->init_tag_value;
4451         read_cnt = cacheEntry->read_addr_cnt;
4452
4453         for (i = 0; i < loop_cnt; i++) {
4454
4455                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4456                 if (ret)
4457                         return (0);
4458
4459                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4460                 if (ret)
4461                         return (0);
4462
4463                 addr = read_addr;
4464                 for (k = 0; k < read_cnt; k++) {
4465
4466                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4467                         if (ret)
4468                                 return (0);
4469
4470                         *data_buff++ = read_value;
4471                         addr += cacheEntry->read_addr_stride;
4472                 }
4473
4474                 tag_value += cacheEntry->tag_value_stride;
4475         }
4476
4477         return (read_cnt * loop_cnt * sizeof(uint32_t));
4478 }
4479
4480 /*
4481  * Reading OCM memory
4482  */
4483
4484 static uint32_t 
4485 ql_rdocm(qla_host_t *ha,
4486         ql_minidump_entry_rdocm_t *ocmEntry,
4487         uint32_t *data_buff)
4488 {
4489         int i, loop_cnt;
4490         volatile uint32_t addr;
4491         volatile uint32_t value;
4492
4493         addr = ocmEntry->read_addr;
4494         loop_cnt = ocmEntry->op_count;
4495
4496         for (i = 0; i < loop_cnt; i++) {
4497                 value = READ_REG32(ha, addr);
4498                 *data_buff++ = value;
4499                 addr += ocmEntry->read_addr_stride;
4500         }
4501         return (loop_cnt * sizeof(value));
4502 }
4503
4504 /*
4505  * Read memory
4506  */
4507
4508 static uint32_t 
4509 ql_rdmem(qla_host_t *ha,
4510         ql_minidump_entry_rdmem_t *mem_entry,
4511         uint32_t *data_buff)
4512 {
4513         int ret;
4514         int i, loop_cnt;
4515         volatile uint32_t addr;
4516         q80_offchip_mem_val_t val;
4517
4518         addr = mem_entry->read_addr;
4519
4520         /* size in bytes / 16 */
4521         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4522
4523         for (i = 0; i < loop_cnt; i++) {
4524
4525                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4526                 if (ret)
4527                         return (0);
4528
4529                 *data_buff++ = val.data_lo;
4530                 *data_buff++ = val.data_hi;
4531                 *data_buff++ = val.data_ulo;
4532                 *data_buff++ = val.data_uhi;
4533
4534                 addr += (sizeof(uint32_t) * 4);
4535         }
4536
4537         return (loop_cnt * (sizeof(uint32_t) * 4));
4538 }
4539
4540 /*
4541  * Read Rom
4542  */
4543
4544 static uint32_t 
4545 ql_rdrom(qla_host_t *ha,
4546         ql_minidump_entry_rdrom_t *romEntry,
4547         uint32_t *data_buff)
4548 {
4549         int ret;
4550         int i, loop_cnt;
4551         uint32_t addr;
4552         uint32_t value;
4553
4554         addr = romEntry->read_addr;
4555         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4556         loop_cnt /= sizeof(value);
4557
4558         for (i = 0; i < loop_cnt; i++) {
4559
4560                 ret = ql_rd_flash32(ha, addr, &value);
4561                 if (ret)
4562                         return (0);
4563
4564                 *data_buff++ = value;
4565                 addr += sizeof(value);
4566         }
4567
4568         return (loop_cnt * sizeof(value));
4569 }
4570
4571 /*
4572  * Read MUX data
4573  */
4574
4575 static uint32_t 
4576 ql_rdmux(qla_host_t *ha,
4577         ql_minidump_entry_mux_t *muxEntry,
4578         uint32_t *data_buff)
4579 {
4580         int ret;
4581         int loop_cnt;
4582         uint32_t read_value, sel_value;
4583         uint32_t read_addr, select_addr;
4584
4585         select_addr = muxEntry->select_addr;
4586         sel_value = muxEntry->select_value;
4587         read_addr = muxEntry->read_addr;
4588
4589         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4590
4591                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4592                 if (ret)
4593                         return (0);
4594
4595                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4596                 if (ret)
4597                         return (0);
4598
4599                 *data_buff++ = sel_value;
4600                 *data_buff++ = read_value;
4601
4602                 sel_value += muxEntry->select_value_stride;
4603         }
4604
4605         return (loop_cnt * (2 * sizeof(uint32_t)));
4606 }
4607
4608 static uint32_t
4609 ql_rdmux2(qla_host_t *ha,
4610         ql_minidump_entry_mux2_t *muxEntry,
4611         uint32_t *data_buff)
4612 {
4613         int ret;
4614         int loop_cnt;
4615
4616         uint32_t select_addr_1, select_addr_2;
4617         uint32_t select_value_1, select_value_2;
4618         uint32_t select_value_count, select_value_mask;
4619         uint32_t read_addr, read_value;
4620
4621         select_addr_1 = muxEntry->select_addr_1;
4622         select_addr_2 = muxEntry->select_addr_2;
4623         select_value_1 = muxEntry->select_value_1;
4624         select_value_2 = muxEntry->select_value_2;
4625         select_value_count = muxEntry->select_value_count;
4626         select_value_mask  = muxEntry->select_value_mask;
4627
4628         read_addr = muxEntry->read_addr;
4629
4630         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4631                 loop_cnt++) {
4632
4633                 uint32_t temp_sel_val;
4634
4635                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4636                 if (ret)
4637                         return (0);
4638
4639                 temp_sel_val = select_value_1 & select_value_mask;
4640
4641                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4642                 if (ret)
4643                         return (0);
4644
4645                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4646                 if (ret)
4647                         return (0);
4648
4649                 *data_buff++ = temp_sel_val;
4650                 *data_buff++ = read_value;
4651
4652                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4653                 if (ret)
4654                         return (0);
4655
4656                 temp_sel_val = select_value_2 & select_value_mask;
4657
4658                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4659                 if (ret)
4660                         return (0);
4661
4662                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4663                 if (ret)
4664                         return (0);
4665
4666                 *data_buff++ = temp_sel_val;
4667                 *data_buff++ = read_value;
4668
4669                 select_value_1 += muxEntry->select_value_stride;
4670                 select_value_2 += muxEntry->select_value_stride;
4671         }
4672
4673         return (loop_cnt * (4 * sizeof(uint32_t)));
4674 }
4675
4676 /*
4677  * Handling Queue State Reads.
4678  */
4679
4680 static uint32_t 
4681 ql_rdqueue(qla_host_t *ha,
4682         ql_minidump_entry_queue_t *queueEntry,
4683         uint32_t *data_buff)
4684 {
4685         int ret;
4686         int loop_cnt, k;
4687         uint32_t read_value;
4688         uint32_t read_addr, read_stride, select_addr;
4689         uint32_t queue_id, read_cnt;
4690
4691         read_cnt = queueEntry->read_addr_cnt;
4692         read_stride = queueEntry->read_addr_stride;
4693         select_addr = queueEntry->select_addr;
4694
4695         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4696                 loop_cnt++) {
4697
4698                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4699                 if (ret)
4700                         return (0);
4701
4702                 read_addr = queueEntry->read_addr;
4703
4704                 for (k = 0; k < read_cnt; k++) {
4705
4706                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4707                         if (ret)
4708                                 return (0);
4709
4710                         *data_buff++ = read_value;
4711                         read_addr += read_stride;
4712                 }
4713
4714                 queue_id += queueEntry->queue_id_stride;
4715         }
4716
4717         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4718 }
4719
4720 /*
4721  * Handling control entries.
4722  */
4723
4724 static uint32_t 
4725 ql_cntrl(qla_host_t *ha,
4726         ql_minidump_template_hdr_t *template_hdr,
4727         ql_minidump_entry_cntrl_t *crbEntry)
4728 {
4729         int ret;
4730         int count;
4731         uint32_t opcode, read_value, addr, entry_addr;
4732         long timeout;
4733
4734         entry_addr = crbEntry->addr;
4735
4736         for (count = 0; count < crbEntry->op_count; count++) {
4737                 opcode = crbEntry->opcode;
4738
4739                 if (opcode & QL_DBG_OPCODE_WR) {
4740
4741                         ret = ql_rdwr_indreg32(ha, entry_addr,
4742                                         &crbEntry->value_1, 0);
4743                         if (ret)
4744                                 return (0);
4745
4746                         opcode &= ~QL_DBG_OPCODE_WR;
4747                 }
4748
4749                 if (opcode & QL_DBG_OPCODE_RW) {
4750
4751                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4752                         if (ret)
4753                                 return (0);
4754
4755                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4756                         if (ret)
4757                                 return (0);
4758
4759                         opcode &= ~QL_DBG_OPCODE_RW;
4760                 }
4761
4762                 if (opcode & QL_DBG_OPCODE_AND) {
4763
4764                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4765                         if (ret)
4766                                 return (0);
4767
4768                         read_value &= crbEntry->value_2;
4769                         opcode &= ~QL_DBG_OPCODE_AND;
4770
4771                         if (opcode & QL_DBG_OPCODE_OR) {
4772                                 read_value |= crbEntry->value_3;
4773                                 opcode &= ~QL_DBG_OPCODE_OR;
4774                         }
4775
4776                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4777                         if (ret)
4778                                 return (0);
4779                 }
4780
4781                 if (opcode & QL_DBG_OPCODE_OR) {
4782
4783                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4784                         if (ret)
4785                                 return (0);
4786
4787                         read_value |= crbEntry->value_3;
4788
4789                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4790                         if (ret)
4791                                 return (0);
4792
4793                         opcode &= ~QL_DBG_OPCODE_OR;
4794                 }
4795
4796                 if (opcode & QL_DBG_OPCODE_POLL) {
4797
4798                         opcode &= ~QL_DBG_OPCODE_POLL;
4799                         timeout = crbEntry->poll_timeout;
4800                         addr = entry_addr;
4801
4802                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4803                         if (ret)
4804                                 return (0);
4805
4806                         while ((read_value & crbEntry->value_2)
4807                                 != crbEntry->value_1) {
4808
4809                                 if (timeout) {
4810                                         qla_mdelay(__func__, 1);
4811                                         timeout--;
4812                                 } else
4813                                         break;
4814
4815                                 ret = ql_rdwr_indreg32(ha, addr,
4816                                                 &read_value, 1);
4817                                 if (ret)
4818                                         return (0);
4819                         }
4820
4821                         if (!timeout) {
4822                                 /*
4823                                  * Report timeout error.
4824                                  * core dump capture failed
4825                                  * Skip remaining entries.
4826                                  * Write buffer out to file
4827                                  * Use driver specific fields in template header
4828                                  * to report this error.
4829                                  */
4830                                 return (-1);
4831                         }
4832                 }
4833
4834                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4835                         /*
4836                          * decide which address to use.
4837                          */
4838                         if (crbEntry->state_index_a) {
4839                                 addr = template_hdr->saved_state_array[
4840                                                 crbEntry-> state_index_a];
4841                         } else {
4842                                 addr = entry_addr;
4843                         }
4844
4845                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4846                         if (ret)
4847                                 return (0);
4848
4849                         template_hdr->saved_state_array[crbEntry->state_index_v]
4850                                         = read_value;
4851                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
4852                 }
4853
4854                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4855                         /*
4856                          * decide which value to use.
4857                          */
4858                         if (crbEntry->state_index_v) {
4859                                 read_value = template_hdr->saved_state_array[
4860                                                 crbEntry->state_index_v];
4861                         } else {
4862                                 read_value = crbEntry->value_1;
4863                         }
4864                         /*
4865                          * decide which address to use.
4866                          */
4867                         if (crbEntry->state_index_a) {
4868                                 addr = template_hdr->saved_state_array[
4869                                                 crbEntry-> state_index_a];
4870                         } else {
4871                                 addr = entry_addr;
4872                         }
4873
4874                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4875                         if (ret)
4876                                 return (0);
4877
4878                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
4879                 }
4880
4881                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4882                         /*  Read value from saved state using index */
4883                         read_value = template_hdr->saved_state_array[
4884                                                 crbEntry->state_index_v];
4885
4886                         read_value <<= crbEntry->shl; /*Shift left operation */
4887                         read_value >>= crbEntry->shr; /*Shift right operation */
4888
4889                         if (crbEntry->value_2) {
4890                                 /* check if AND mask is provided */
4891                                 read_value &= crbEntry->value_2;
4892                         }
4893
4894                         read_value |= crbEntry->value_3; /* OR operation */
4895                         read_value += crbEntry->value_1; /* increment op */
4896
4897                         /* Write value back to state area. */
4898
4899                         template_hdr->saved_state_array[crbEntry->state_index_v]
4900                                         = read_value;
4901                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
4902                 }
4903
4904                 entry_addr += crbEntry->addr_stride;
4905         }
4906
4907         return (0);
4908 }
4909
4910 /*
4911  * Handling rd poll entry.
4912  */
4913
4914 static uint32_t 
4915 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4916         uint32_t *data_buff)
4917 {
4918         int ret;
4919         int loop_cnt;
4920         uint32_t op_count, select_addr, select_value_stride, select_value;
4921         uint32_t read_addr, poll, mask, data_size, data;
4922         uint32_t wait_count = 0;
4923
4924         select_addr            = entry->select_addr;
4925         read_addr              = entry->read_addr;
4926         select_value           = entry->select_value;
4927         select_value_stride    = entry->select_value_stride;
4928         op_count               = entry->op_count;
4929         poll                   = entry->poll;
4930         mask                   = entry->mask;
4931         data_size              = entry->data_size;
4932
4933         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4934
4935                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4936                 if (ret)
4937                         return (0);
4938
4939                 wait_count = 0;
4940
4941                 while (wait_count < poll) {
4942
4943                         uint32_t temp;
4944
4945                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4946                         if (ret)
4947                                 return (0);
4948
4949                         if ( (temp & mask) != 0 ) {
4950                                 break;
4951                         }
4952                         wait_count++;
4953                 }
4954
4955                 if (wait_count == poll) {
4956                         device_printf(ha->pci_dev,
4957                                 "%s: Error in processing entry\n", __func__);
4958                         device_printf(ha->pci_dev,
4959                                 "%s: wait_count <0x%x> poll <0x%x>\n",
4960                                 __func__, wait_count, poll);
4961                         return 0;
4962                 }
4963
4964                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4965                 if (ret)
4966                         return (0);
4967
4968                 *data_buff++ = select_value;
4969                 *data_buff++ = data;
4970                 select_value = select_value + select_value_stride;
4971         }
4972
4973         /*
4974          * for testing purpose we return amount of data written
4975          */
4976         return (loop_cnt * (2 * sizeof(uint32_t)));
4977 }
4978
4979
4980 /*
4981  * Handling rd modify write poll entry.
4982  */
4983
4984 static uint32_t 
4985 ql_pollrd_modify_write(qla_host_t *ha,
4986         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4987         uint32_t *data_buff)
4988 {
4989         int ret;
4990         uint32_t addr_1, addr_2, value_1, value_2, data;
4991         uint32_t poll, mask, data_size, modify_mask;
4992         uint32_t wait_count = 0;
4993
4994         addr_1          = entry->addr_1;
4995         addr_2          = entry->addr_2;
4996         value_1         = entry->value_1;
4997         value_2         = entry->value_2;
4998
4999         poll            = entry->poll;
5000         mask            = entry->mask;
5001         modify_mask     = entry->modify_mask;
5002         data_size       = entry->data_size;
5003
5004
5005         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5006         if (ret)
5007                 return (0);
5008
5009         wait_count = 0;
5010         while (wait_count < poll) {
5011
5012                 uint32_t temp;
5013
5014                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5015                 if (ret)
5016                         return (0);
5017
5018                 if ( (temp & mask) != 0 ) {
5019                         break;
5020                 }
5021                 wait_count++;
5022         }
5023
5024         if (wait_count == poll) {
5025                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5026                         __func__);
5027         } else {
5028
5029                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5030                 if (ret)
5031                         return (0);
5032
5033                 data = (data & modify_mask);
5034
5035                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5036                 if (ret)
5037                         return (0);
5038
5039                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5040                 if (ret)
5041                         return (0);
5042
5043                 /* Poll again */
5044                 wait_count = 0;
5045                 while (wait_count < poll) {
5046
5047                         uint32_t temp;
5048
5049                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5050                         if (ret)
5051                                 return (0);
5052
5053                         if ( (temp & mask) != 0 ) {
5054                                 break;
5055                         }
5056                         wait_count++;
5057                 }
5058                 *data_buff++ = addr_2;
5059                 *data_buff++ = data;
5060         }
5061
5062         /*
5063          * for testing purpose we return amount of data written
5064          */
5065         return (2 * sizeof(uint32_t));
5066 }
5067
5068