]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r324026
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
68
69 static int qla_init_nic_func(qla_host_t *ha);
70 static int qla_stop_nic_func(qla_host_t *ha);
71 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
72 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75 static int qla_get_cam_search_mode(qla_host_t *ha);
76
77 static void ql_minidump_free(qla_host_t *ha);
78
79 #ifdef QL_DBG
80
81 static void
82 qla_stop_pegs(qla_host_t *ha)
83 {
84         uint32_t val = 1;
85
86         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
91         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
92 }
93
94 static int
95 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
96 {
97         int err, ret = 0;
98         qla_host_t *ha;
99         
100         err = sysctl_handle_int(oidp, &ret, 0, req);
101
102
103         if (err || !req->newptr)
104                 return (err);
105
106         if (ret == 1) {
107                 ha = (qla_host_t *)arg1;
108                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
109                         qla_stop_pegs(ha);      
110                         QLA_UNLOCK(ha, __func__);
111                 }
112         }
113
114         return err;
115 }
116 #endif /* #ifdef QL_DBG */
117
118 static int
119 qla_validate_set_port_cfg_bit(uint32_t bits)
120 {
121         if ((bits & 0xF) > 1)
122                 return (-1);
123
124         if (((bits >> 4) & 0xF) > 2)
125                 return (-1);
126
127         if (((bits >> 8) & 0xF) > 2)
128                 return (-1);
129
130         return (0);
131 }
132
133 static int
134 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
135 {
136         int err, ret = 0;
137         qla_host_t *ha;
138         uint32_t cfg_bits;
139
140         err = sysctl_handle_int(oidp, &ret, 0, req);
141
142         if (err || !req->newptr)
143                 return (err);
144
145         ha = (qla_host_t *)arg1;
146
147         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
148
149                 err = qla_get_port_config(ha, &cfg_bits);
150
151                 if (err)
152                         goto qla_sysctl_set_port_cfg_exit;
153
154                 if (ret & 0x1) {
155                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
156                 } else {
157                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 }
159
160                 ret = ret >> 4;
161                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
162
163                 if ((ret & 0xF) == 0) {
164                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
165                 } else if ((ret & 0xF) == 1){
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
167                 } else {
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
169                 }
170
171                 ret = ret >> 4;
172                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
173
174                 if (ret == 0) {
175                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176                 } else if (ret == 1){
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
178                 } else {
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
180                 }
181
182                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
183                         err = qla_set_port_config(ha, cfg_bits);
184                         QLA_UNLOCK(ha, __func__);
185                 } else {
186                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
187                 }
188         } else {
189                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190                         err = qla_get_port_config(ha, &cfg_bits);
191                         QLA_UNLOCK(ha, __func__);
192                 } else {
193                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
194                 }
195         }
196
197 qla_sysctl_set_port_cfg_exit:
198         return err;
199 }
200
201 static int
202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
203 {
204         int err, ret = 0;
205         qla_host_t *ha;
206
207         err = sysctl_handle_int(oidp, &ret, 0, req);
208
209         if (err || !req->newptr)
210                 return (err);
211
212         ha = (qla_host_t *)arg1;
213
214         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
216
217                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
218                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
219                         QLA_UNLOCK(ha, __func__);
220                 } else {
221                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
222                 }
223
224         } else {
225                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
226         }
227
228         return (err);
229 }
230
231 static int
232 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
233 {
234         int err, ret = 0;
235         qla_host_t *ha;
236
237         err = sysctl_handle_int(oidp, &ret, 0, req);
238
239         if (err || !req->newptr)
240                 return (err);
241
242         ha = (qla_host_t *)arg1;
243         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
244                 err = qla_get_cam_search_mode(ha);
245                 QLA_UNLOCK(ha, __func__);
246         } else {
247                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
248         }
249
250         return (err);
251 }
252
253 static void
254 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
255 {
256         struct sysctl_ctx_list  *ctx;
257         struct sysctl_oid_list  *children;
258         struct sysctl_oid       *ctx_oid;
259
260         ctx = device_get_sysctl_ctx(ha->pci_dev);
261         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
262
263         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
264                         CTLFLAG_RD, NULL, "stats_hw_mac");
265         children = SYSCTL_CHILDREN(ctx_oid);
266
267         SYSCTL_ADD_QUAD(ctx, children,
268                 OID_AUTO, "xmt_frames",
269                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
270                 "xmt_frames");
271
272         SYSCTL_ADD_QUAD(ctx, children,
273                 OID_AUTO, "xmt_bytes",
274                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
275                 "xmt_frames");
276
277         SYSCTL_ADD_QUAD(ctx, children,
278                 OID_AUTO, "xmt_mcast_pkts",
279                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
280                 "xmt_mcast_pkts");
281
282         SYSCTL_ADD_QUAD(ctx, children,
283                 OID_AUTO, "xmt_bcast_pkts",
284                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
285                 "xmt_bcast_pkts");
286
287         SYSCTL_ADD_QUAD(ctx, children,
288                 OID_AUTO, "xmt_pause_frames",
289                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
290                 "xmt_pause_frames");
291
292         SYSCTL_ADD_QUAD(ctx, children,
293                 OID_AUTO, "xmt_cntrl_pkts",
294                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
295                 "xmt_cntrl_pkts");
296
297         SYSCTL_ADD_QUAD(ctx, children,
298                 OID_AUTO, "xmt_pkt_lt_64bytes",
299                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
300                 "xmt_pkt_lt_64bytes");
301
302         SYSCTL_ADD_QUAD(ctx, children,
303                 OID_AUTO, "xmt_pkt_lt_127bytes",
304                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
305                 "xmt_pkt_lt_127bytes");
306
307         SYSCTL_ADD_QUAD(ctx, children,
308                 OID_AUTO, "xmt_pkt_lt_255bytes",
309                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
310                 "xmt_pkt_lt_255bytes");
311
312         SYSCTL_ADD_QUAD(ctx, children,
313                 OID_AUTO, "xmt_pkt_lt_511bytes",
314                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
315                 "xmt_pkt_lt_511bytes");
316
317         SYSCTL_ADD_QUAD(ctx, children,
318                 OID_AUTO, "xmt_pkt_lt_1023bytes",
319                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
320                 "xmt_pkt_lt_1023bytes");
321
322         SYSCTL_ADD_QUAD(ctx, children,
323                 OID_AUTO, "xmt_pkt_lt_1518bytes",
324                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
325                 "xmt_pkt_lt_1518bytes");
326
327         SYSCTL_ADD_QUAD(ctx, children,
328                 OID_AUTO, "xmt_pkt_gt_1518bytes",
329                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
330                 "xmt_pkt_gt_1518bytes");
331
332         SYSCTL_ADD_QUAD(ctx, children,
333                 OID_AUTO, "rcv_frames",
334                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
335                 "rcv_frames");
336
337         SYSCTL_ADD_QUAD(ctx, children,
338                 OID_AUTO, "rcv_bytes",
339                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
340                 "rcv_bytes");
341
342         SYSCTL_ADD_QUAD(ctx, children,
343                 OID_AUTO, "rcv_mcast_pkts",
344                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
345                 "rcv_mcast_pkts");
346
347         SYSCTL_ADD_QUAD(ctx, children,
348                 OID_AUTO, "rcv_bcast_pkts",
349                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
350                 "rcv_bcast_pkts");
351
352         SYSCTL_ADD_QUAD(ctx, children,
353                 OID_AUTO, "rcv_pause_frames",
354                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
355                 "rcv_pause_frames");
356
357         SYSCTL_ADD_QUAD(ctx, children,
358                 OID_AUTO, "rcv_cntrl_pkts",
359                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
360                 "rcv_cntrl_pkts");
361
362         SYSCTL_ADD_QUAD(ctx, children,
363                 OID_AUTO, "rcv_pkt_lt_64bytes",
364                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
365                 "rcv_pkt_lt_64bytes");
366
367         SYSCTL_ADD_QUAD(ctx, children,
368                 OID_AUTO, "rcv_pkt_lt_127bytes",
369                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
370                 "rcv_pkt_lt_127bytes");
371
372         SYSCTL_ADD_QUAD(ctx, children,
373                 OID_AUTO, "rcv_pkt_lt_255bytes",
374                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
375                 "rcv_pkt_lt_255bytes");
376
377         SYSCTL_ADD_QUAD(ctx, children,
378                 OID_AUTO, "rcv_pkt_lt_511bytes",
379                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
380                 "rcv_pkt_lt_511bytes");
381
382         SYSCTL_ADD_QUAD(ctx, children,
383                 OID_AUTO, "rcv_pkt_lt_1023bytes",
384                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
385                 "rcv_pkt_lt_1023bytes");
386
387         SYSCTL_ADD_QUAD(ctx, children,
388                 OID_AUTO, "rcv_pkt_lt_1518bytes",
389                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
390                 "rcv_pkt_lt_1518bytes");
391
392         SYSCTL_ADD_QUAD(ctx, children,
393                 OID_AUTO, "rcv_pkt_gt_1518bytes",
394                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
395                 "rcv_pkt_gt_1518bytes");
396
397         SYSCTL_ADD_QUAD(ctx, children,
398                 OID_AUTO, "rcv_len_error",
399                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
400                 "rcv_len_error");
401
402         SYSCTL_ADD_QUAD(ctx, children,
403                 OID_AUTO, "rcv_len_small",
404                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
405                 "rcv_len_small");
406
407         SYSCTL_ADD_QUAD(ctx, children,
408                 OID_AUTO, "rcv_len_large",
409                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
410                 "rcv_len_large");
411
412         SYSCTL_ADD_QUAD(ctx, children,
413                 OID_AUTO, "rcv_jabber",
414                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
415                 "rcv_jabber");
416
417         SYSCTL_ADD_QUAD(ctx, children,
418                 OID_AUTO, "rcv_dropped",
419                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
420                 "rcv_dropped");
421
422         SYSCTL_ADD_QUAD(ctx, children,
423                 OID_AUTO, "fcs_error",
424                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
425                 "fcs_error");
426
427         SYSCTL_ADD_QUAD(ctx, children,
428                 OID_AUTO, "align_error",
429                 CTLFLAG_RD, &ha->hw.mac.align_error,
430                 "align_error");
431
432         SYSCTL_ADD_QUAD(ctx, children,
433                 OID_AUTO, "eswitched_frames",
434                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
435                 "eswitched_frames");
436
437         SYSCTL_ADD_QUAD(ctx, children,
438                 OID_AUTO, "eswitched_bytes",
439                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
440                 "eswitched_bytes");
441
442         SYSCTL_ADD_QUAD(ctx, children,
443                 OID_AUTO, "eswitched_mcast_frames",
444                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
445                 "eswitched_mcast_frames");
446
447         SYSCTL_ADD_QUAD(ctx, children,
448                 OID_AUTO, "eswitched_bcast_frames",
449                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
450                 "eswitched_bcast_frames");
451
452         SYSCTL_ADD_QUAD(ctx, children,
453                 OID_AUTO, "eswitched_ucast_frames",
454                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
455                 "eswitched_ucast_frames");
456
457         SYSCTL_ADD_QUAD(ctx, children,
458                 OID_AUTO, "eswitched_err_free_frames",
459                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
460                 "eswitched_err_free_frames");
461
462         SYSCTL_ADD_QUAD(ctx, children,
463                 OID_AUTO, "eswitched_err_free_bytes",
464                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
465                 "eswitched_err_free_bytes");
466
467         return;
468 }
469
470 static void
471 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
472 {
473         struct sysctl_ctx_list  *ctx;
474         struct sysctl_oid_list  *children;
475         struct sysctl_oid       *ctx_oid;
476
477         ctx = device_get_sysctl_ctx(ha->pci_dev);
478         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
479
480         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
481                         CTLFLAG_RD, NULL, "stats_hw_rcv");
482         children = SYSCTL_CHILDREN(ctx_oid);
483
484         SYSCTL_ADD_QUAD(ctx, children,
485                 OID_AUTO, "total_bytes",
486                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
487                 "total_bytes");
488
489         SYSCTL_ADD_QUAD(ctx, children,
490                 OID_AUTO, "total_pkts",
491                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
492                 "total_pkts");
493
494         SYSCTL_ADD_QUAD(ctx, children,
495                 OID_AUTO, "lro_pkt_count",
496                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
497                 "lro_pkt_count");
498
499         SYSCTL_ADD_QUAD(ctx, children,
500                 OID_AUTO, "sw_pkt_count",
501                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
502                 "sw_pkt_count");
503
504         SYSCTL_ADD_QUAD(ctx, children,
505                 OID_AUTO, "ip_chksum_err",
506                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
507                 "ip_chksum_err");
508
509         SYSCTL_ADD_QUAD(ctx, children,
510                 OID_AUTO, "pkts_wo_acntxts",
511                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
512                 "pkts_wo_acntxts");
513
514         SYSCTL_ADD_QUAD(ctx, children,
515                 OID_AUTO, "pkts_dropped_no_sds_card",
516                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
517                 "pkts_dropped_no_sds_card");
518
519         SYSCTL_ADD_QUAD(ctx, children,
520                 OID_AUTO, "pkts_dropped_no_sds_host",
521                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
522                 "pkts_dropped_no_sds_host");
523
524         SYSCTL_ADD_QUAD(ctx, children,
525                 OID_AUTO, "oversized_pkts",
526                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
527                 "oversized_pkts");
528
529         SYSCTL_ADD_QUAD(ctx, children,
530                 OID_AUTO, "pkts_dropped_no_rds",
531                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
532                 "pkts_dropped_no_rds");
533
534         SYSCTL_ADD_QUAD(ctx, children,
535                 OID_AUTO, "unxpctd_mcast_pkts",
536                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
537                 "unxpctd_mcast_pkts");
538
539         SYSCTL_ADD_QUAD(ctx, children,
540                 OID_AUTO, "re1_fbq_error",
541                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
542                 "re1_fbq_error");
543
544         SYSCTL_ADD_QUAD(ctx, children,
545                 OID_AUTO, "invalid_mac_addr",
546                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
547                 "invalid_mac_addr");
548
549         SYSCTL_ADD_QUAD(ctx, children,
550                 OID_AUTO, "rds_prime_trys",
551                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
552                 "rds_prime_trys");
553
554         SYSCTL_ADD_QUAD(ctx, children,
555                 OID_AUTO, "rds_prime_success",
556                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
557                 "rds_prime_success");
558
559         SYSCTL_ADD_QUAD(ctx, children,
560                 OID_AUTO, "lro_flows_added",
561                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
562                 "lro_flows_added");
563
564         SYSCTL_ADD_QUAD(ctx, children,
565                 OID_AUTO, "lro_flows_deleted",
566                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
567                 "lro_flows_deleted");
568
569         SYSCTL_ADD_QUAD(ctx, children,
570                 OID_AUTO, "lro_flows_active",
571                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
572                 "lro_flows_active");
573
574         SYSCTL_ADD_QUAD(ctx, children,
575                 OID_AUTO, "pkts_droped_unknown",
576                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
577                 "pkts_droped_unknown");
578
579         SYSCTL_ADD_QUAD(ctx, children,
580                 OID_AUTO, "pkts_cnt_oversized",
581                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
582                 "pkts_cnt_oversized");
583
584         return;
585 }
586
587 static void
588 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
589 {
590         struct sysctl_ctx_list  *ctx;
591         struct sysctl_oid_list  *children;
592         struct sysctl_oid_list  *node_children;
593         struct sysctl_oid       *ctx_oid;
594         int                     i;
595         uint8_t                 name_str[16];
596
597         ctx = device_get_sysctl_ctx(ha->pci_dev);
598         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
599
600         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
601                         CTLFLAG_RD, NULL, "stats_hw_xmt");
602         children = SYSCTL_CHILDREN(ctx_oid);
603
604         for (i = 0; i < ha->hw.num_tx_rings; i++) {
605
606                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
607                 snprintf(name_str, sizeof(name_str), "%d", i);
608
609                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
610                         CTLFLAG_RD, NULL, name_str);
611                 node_children = SYSCTL_CHILDREN(ctx_oid);
612
613                 /* Tx Related */
614
615                 SYSCTL_ADD_QUAD(ctx, node_children,
616                         OID_AUTO, "total_bytes",
617                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
618                         "total_bytes");
619
620                 SYSCTL_ADD_QUAD(ctx, node_children,
621                         OID_AUTO, "total_pkts",
622                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
623                         "total_pkts");
624
625                 SYSCTL_ADD_QUAD(ctx, node_children,
626                         OID_AUTO, "errors",
627                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
628                         "errors");
629
630                 SYSCTL_ADD_QUAD(ctx, node_children,
631                         OID_AUTO, "pkts_dropped",
632                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
633                         "pkts_dropped");
634
635                 SYSCTL_ADD_QUAD(ctx, node_children,
636                         OID_AUTO, "switch_pkts",
637                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
638                         "switch_pkts");
639
640                 SYSCTL_ADD_QUAD(ctx, node_children,
641                         OID_AUTO, "num_buffers",
642                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
643                         "num_buffers");
644         }
645
646         return;
647 }
648
649 static void
650 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
651 {
652         qlnx_add_hw_mac_stats_sysctls(ha);
653         qlnx_add_hw_rcv_stats_sysctls(ha);
654         qlnx_add_hw_xmt_stats_sysctls(ha);
655
656         return;
657 }
658
659 static void
660 qlnx_add_drvr_sds_stats(qla_host_t *ha)
661 {
662         struct sysctl_ctx_list  *ctx;
663         struct sysctl_oid_list  *children;
664         struct sysctl_oid_list  *node_children;
665         struct sysctl_oid       *ctx_oid;
666         int                     i;
667         uint8_t                 name_str[16];
668
669         ctx = device_get_sysctl_ctx(ha->pci_dev);
670         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
671
672         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
673                         CTLFLAG_RD, NULL, "stats_drvr_sds");
674         children = SYSCTL_CHILDREN(ctx_oid);
675
676         for (i = 0; i < ha->hw.num_sds_rings; i++) {
677
678                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
679                 snprintf(name_str, sizeof(name_str), "%d", i);
680
681                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
682                         CTLFLAG_RD, NULL, name_str);
683                 node_children = SYSCTL_CHILDREN(ctx_oid);
684
685                 SYSCTL_ADD_QUAD(ctx, node_children,
686                         OID_AUTO, "intr_count",
687                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
688                         "intr_count");
689
690                 SYSCTL_ADD_UINT(ctx, node_children,
691                         OID_AUTO, "rx_free",
692                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
693                         ha->hw.sds[i].rx_free, "rx_free");
694         }
695
696         return;
697 }
698 static void
699 qlnx_add_drvr_rds_stats(qla_host_t *ha)
700 {
701         struct sysctl_ctx_list  *ctx;
702         struct sysctl_oid_list  *children;
703         struct sysctl_oid_list  *node_children;
704         struct sysctl_oid       *ctx_oid;
705         int                     i;
706         uint8_t                 name_str[16];
707
708         ctx = device_get_sysctl_ctx(ha->pci_dev);
709         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
710
711         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
712                         CTLFLAG_RD, NULL, "stats_drvr_rds");
713         children = SYSCTL_CHILDREN(ctx_oid);
714
715         for (i = 0; i < ha->hw.num_rds_rings; i++) {
716
717                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
718                 snprintf(name_str, sizeof(name_str), "%d", i);
719
720                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
721                         CTLFLAG_RD, NULL, name_str);
722                 node_children = SYSCTL_CHILDREN(ctx_oid);
723
724                 SYSCTL_ADD_QUAD(ctx, node_children,
725                         OID_AUTO, "count",
726                         CTLFLAG_RD, &ha->hw.rds[i].count,
727                         "count");
728
729                 SYSCTL_ADD_QUAD(ctx, node_children,
730                         OID_AUTO, "lro_pkt_count",
731                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
732                         "lro_pkt_count");
733
734                 SYSCTL_ADD_QUAD(ctx, node_children,
735                         OID_AUTO, "lro_bytes",
736                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
737                         "lro_bytes");
738         }
739
740         return;
741 }
742
743 static void
744 qlnx_add_drvr_tx_stats(qla_host_t *ha)
745 {
746         struct sysctl_ctx_list  *ctx;
747         struct sysctl_oid_list  *children;
748         struct sysctl_oid_list  *node_children;
749         struct sysctl_oid       *ctx_oid;
750         int                     i;
751         uint8_t                 name_str[16];
752
753         ctx = device_get_sysctl_ctx(ha->pci_dev);
754         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
755
756         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
757                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
758         children = SYSCTL_CHILDREN(ctx_oid);
759
760         for (i = 0; i < ha->hw.num_tx_rings; i++) {
761
762                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
763                 snprintf(name_str, sizeof(name_str), "%d", i);
764
765                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
766                         CTLFLAG_RD, NULL, name_str);
767                 node_children = SYSCTL_CHILDREN(ctx_oid);
768
769                 SYSCTL_ADD_QUAD(ctx, node_children,
770                         OID_AUTO, "count",
771                         CTLFLAG_RD, &ha->tx_ring[i].count,
772                         "count");
773
774 #ifdef QL_ENABLE_ISCSI_TLV
775                 SYSCTL_ADD_QUAD(ctx, node_children,
776                         OID_AUTO, "iscsi_pkt_count",
777                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
778                         "iscsi_pkt_count");
779 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
780         }
781
782         return;
783 }
784
785 static void
786 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
787 {
788         qlnx_add_drvr_sds_stats(ha);
789         qlnx_add_drvr_rds_stats(ha);
790         qlnx_add_drvr_tx_stats(ha);
791         return;
792 }
793
794 /*
795  * Name: ql_hw_add_sysctls
796  * Function: Add P3Plus specific sysctls
797  */
798 void
799 ql_hw_add_sysctls(qla_host_t *ha)
800 {
801         device_t        dev;
802
803         dev = ha->pci_dev;
804
805         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
806                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
807                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
808                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
809
810         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
811                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
812                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
813                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
814
815         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
816                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
817                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
818                 ha->hw.num_tx_rings, "Number of Transmit Rings");
819
820         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
821                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
823                 ha->txr_idx, "Tx Ring Used");
824
825         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
826                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
828                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
829
830         ha->hw.sds_cidx_thres = 32;
831         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
832                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
833                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
834                 ha->hw.sds_cidx_thres,
835                 "Number of SDS entries to process before updating"
836                 " SDS Ring Consumer Index");
837
838         ha->hw.rds_pidx_thres = 32;
839         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
840                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
841                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
842                 ha->hw.rds_pidx_thres,
843                 "Number of Rcv Rings Entries to post before updating"
844                 " RDS Ring Producer Index");
845
846         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
847         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
848                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
849                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
850                 &ha->hw.rcv_intr_coalesce,
851                 ha->hw.rcv_intr_coalesce,
852                 "Rcv Intr Coalescing Parameters\n"
853                 "\tbits 15:0 max packets\n"
854                 "\tbits 31:16 max micro-seconds to wait\n"
855                 "\tplease run\n"
856                 "\tifconfig <if> down && ifconfig <if> up\n"
857                 "\tto take effect \n");
858
859         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
860         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
861                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
862                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
863                 &ha->hw.xmt_intr_coalesce,
864                 ha->hw.xmt_intr_coalesce,
865                 "Xmt Intr Coalescing Parameters\n"
866                 "\tbits 15:0 max packets\n"
867                 "\tbits 31:16 max micro-seconds to wait\n"
868                 "\tplease run\n"
869                 "\tifconfig <if> down && ifconfig <if> up\n"
870                 "\tto take effect \n");
871
872         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
873                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
874                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
875                 (void *)ha, 0,
876                 qla_sysctl_port_cfg, "I",
877                         "Set Port Configuration if values below "
878                         "otherwise Get Port Configuration\n"
879                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
880                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
881                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
882                         " 1 = xmt only; 2 = rcv only;\n"
883                 );
884
885         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
886                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
887                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
888                 (void *)ha, 0,
889                 qla_sysctl_set_cam_search_mode, "I",
890                         "Set CAM Search Mode"
891                         "\t 1 = search mode internal\n"
892                         "\t 2 = search mode auto\n");
893
894         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
895                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
896                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
897                 (void *)ha, 0,
898                 qla_sysctl_get_cam_search_mode, "I",
899                         "Get CAM Search Mode"
900                         "\t 1 = search mode internal\n"
901                         "\t 2 = search mode auto\n");
902
903         ha->hw.enable_9kb = 1;
904
905         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
906                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
907                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
908                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
909
910         ha->hw.enable_hw_lro = 1;
911
912         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
915                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
916                 "\t 1 : Hardware LRO if LRO is enabled\n"
917                 "\t 0 : Software LRO if LRO is enabled\n"
918                 "\t Any change requires ifconfig down/up to take effect\n"
919                 "\t Note that LRO may be turned off/on via ifconfig\n");
920
921         ha->hw.mdump_active = 0;
922         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
925                 ha->hw.mdump_active,
926                 "Minidump retrieval is Active");
927
928         ha->hw.mdump_done = 0;
929         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                 OID_AUTO, "mdump_done", CTLFLAG_RW,
932                 &ha->hw.mdump_done, ha->hw.mdump_done,
933                 "Minidump has been done and available for retrieval");
934
935         ha->hw.mdump_capture_mask = 0xF;
936         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
937                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
938                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
939                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
940                 "Minidump capture mask");
941 #ifdef QL_DBG
942
943         ha->err_inject = 0;
944         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
945                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
946                 OID_AUTO, "err_inject",
947                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
948                 "Error to be injected\n"
949                 "\t\t\t 0: No Errors\n"
950                 "\t\t\t 1: rcv: rxb struct invalid\n"
951                 "\t\t\t 2: rcv: mp == NULL\n"
952                 "\t\t\t 3: lro: rxb struct invalid\n"
953                 "\t\t\t 4: lro: mp == NULL\n"
954                 "\t\t\t 5: rcv: num handles invalid\n"
955                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
956                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
957                 "\t\t\t 8: mbx: mailbox command failure\n"
958                 "\t\t\t 9: heartbeat failure\n"
959                 "\t\t\t A: temperature failure\n"
960                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
961
962         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
963                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
964                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
965                 (void *)ha, 0,
966                 qla_sysctl_stop_pegs, "I", "Peg Stop");
967
968 #endif /* #ifdef QL_DBG */
969
970         ha->hw.user_pri_nic = 0;
971         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
972                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
973                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
974                 ha->hw.user_pri_nic,
975                 "VLAN Tag User Priority for Normal Ethernet Packets");
976
977         ha->hw.user_pri_iscsi = 4;
978         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
979                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
980                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
981                 ha->hw.user_pri_iscsi,
982                 "VLAN Tag User Priority for iSCSI Packets");
983
984         qlnx_add_hw_stats_sysctls(ha);
985         qlnx_add_drvr_stats_sysctls(ha);
986
987         return;
988 }
989
990 void
991 ql_hw_link_status(qla_host_t *ha)
992 {
993         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
994
995         if (ha->hw.link_up) {
996                 device_printf(ha->pci_dev, "link Up\n");
997         } else {
998                 device_printf(ha->pci_dev, "link Down\n");
999         }
1000
1001         if (ha->hw.flags.fduplex) {
1002                 device_printf(ha->pci_dev, "Full Duplex\n");
1003         } else {
1004                 device_printf(ha->pci_dev, "Half Duplex\n");
1005         }
1006
1007         if (ha->hw.flags.autoneg) {
1008                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1009         } else {
1010                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1011         }
1012
1013         switch (ha->hw.link_speed) {
1014         case 0x710:
1015                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1016                 break;
1017
1018         case 0x3E8:
1019                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1020                 break;
1021
1022         case 0x64:
1023                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1024                 break;
1025
1026         default:
1027                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1028                 break;
1029         }
1030
1031         switch (ha->hw.module_type) {
1032
1033         case 0x01:
1034                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1035                 break;
1036
1037         case 0x02:
1038                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1039                 break;
1040
1041         case 0x03:
1042                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1043                 break;
1044
1045         case 0x04:
1046                 device_printf(ha->pci_dev,
1047                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1048                         ha->hw.cable_length);
1049                 break;
1050
1051         case 0x05:
1052                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1053                         " Limiting Copper(Compliant)[%d m]\n",
1054                         ha->hw.cable_length);
1055                 break;
1056
1057         case 0x06:
1058                 device_printf(ha->pci_dev,
1059                         "Module Type 10GE Passive Copper"
1060                         " (Legacy, Best Effort)[%d m]\n",
1061                         ha->hw.cable_length);
1062                 break;
1063
1064         case 0x07:
1065                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1066                 break;
1067
1068         case 0x08:
1069                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1070                 break;
1071
1072         case 0x09:
1073                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1074                 break;
1075
1076         case 0x0A:
1077                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1078                 break;
1079
1080         case 0x0B:
1081                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1082                         "(Legacy, Best Effort)\n");
1083                 break;
1084
1085         default:
1086                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1087                         ha->hw.module_type);
1088                 break;
1089         }
1090
1091         if (ha->hw.link_faults == 1)
1092                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1093 }
1094
1095 /*
1096  * Name: ql_free_dma
1097  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1098  */
1099 void
1100 ql_free_dma(qla_host_t *ha)
1101 {
1102         uint32_t i;
1103
1104         if (ha->hw.dma_buf.flags.sds_ring) {
1105                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1106                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1107                 }
1108                 ha->hw.dma_buf.flags.sds_ring = 0;
1109         }
1110
1111         if (ha->hw.dma_buf.flags.rds_ring) {
1112                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1113                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1114                 }
1115                 ha->hw.dma_buf.flags.rds_ring = 0;
1116         }
1117
1118         if (ha->hw.dma_buf.flags.tx_ring) {
1119                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1120                 ha->hw.dma_buf.flags.tx_ring = 0;
1121         }
1122         ql_minidump_free(ha);
1123 }
1124
1125 /*
1126  * Name: ql_alloc_dma
1127  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1128  */
1129 int
1130 ql_alloc_dma(qla_host_t *ha)
1131 {
1132         device_t                dev;
1133         uint32_t                i, j, size, tx_ring_size;
1134         qla_hw_t                *hw;
1135         qla_hw_tx_cntxt_t       *tx_cntxt;
1136         uint8_t                 *vaddr;
1137         bus_addr_t              paddr;
1138
1139         dev = ha->pci_dev;
1140
1141         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1142
1143         hw = &ha->hw;
1144         /*
1145          * Allocate Transmit Ring
1146          */
1147         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1148         size = (tx_ring_size * ha->hw.num_tx_rings);
1149
1150         hw->dma_buf.tx_ring.alignment = 8;
1151         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1152         
1153         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1154                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1155                 goto ql_alloc_dma_exit;
1156         }
1157
1158         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1159         paddr = hw->dma_buf.tx_ring.dma_addr;
1160         
1161         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1162                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1163
1164                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1165                 tx_cntxt->tx_ring_paddr = paddr;
1166
1167                 vaddr += tx_ring_size;
1168                 paddr += tx_ring_size;
1169         }
1170
1171         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1172                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1173
1174                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1175                 tx_cntxt->tx_cons_paddr = paddr;
1176
1177                 vaddr += sizeof (uint32_t);
1178                 paddr += sizeof (uint32_t);
1179         }
1180
1181         ha->hw.dma_buf.flags.tx_ring = 1;
1182
1183         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1184                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1185                 hw->dma_buf.tx_ring.dma_b));
1186         /*
1187          * Allocate Receive Descriptor Rings
1188          */
1189
1190         for (i = 0; i < hw->num_rds_rings; i++) {
1191
1192                 hw->dma_buf.rds_ring[i].alignment = 8;
1193                 hw->dma_buf.rds_ring[i].size =
1194                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1195
1196                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1197                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1198                                 __func__, i);
1199
1200                         for (j = 0; j < i; j++)
1201                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1202
1203                         goto ql_alloc_dma_exit;
1204                 }
1205                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1206                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1207                         hw->dma_buf.rds_ring[i].dma_b));
1208         }
1209
1210         hw->dma_buf.flags.rds_ring = 1;
1211
1212         /*
1213          * Allocate Status Descriptor Rings
1214          */
1215
1216         for (i = 0; i < hw->num_sds_rings; i++) {
1217                 hw->dma_buf.sds_ring[i].alignment = 8;
1218                 hw->dma_buf.sds_ring[i].size =
1219                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1220
1221                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1222                         device_printf(dev, "%s: sds ring alloc failed\n",
1223                                 __func__);
1224
1225                         for (j = 0; j < i; j++)
1226                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1227
1228                         goto ql_alloc_dma_exit;
1229                 }
1230                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1231                         __func__, i,
1232                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1233                         hw->dma_buf.sds_ring[i].dma_b));
1234         }
1235         for (i = 0; i < hw->num_sds_rings; i++) {
1236                 hw->sds[i].sds_ring_base =
1237                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1238         }
1239
1240         hw->dma_buf.flags.sds_ring = 1;
1241
1242         return 0;
1243
1244 ql_alloc_dma_exit:
1245         ql_free_dma(ha);
1246         return -1;
1247 }
1248
1249 #define Q8_MBX_MSEC_DELAY       5000
1250
1251 static int
1252 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1253         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1254 {
1255         uint32_t i;
1256         uint32_t data;
1257         int ret = 0;
1258
1259         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1260                 ret = -3;
1261                 ha->qla_initiate_recovery = 1;
1262                 goto exit_qla_mbx_cmd;
1263         }
1264
1265         if (no_pause)
1266                 i = 1000;
1267         else
1268                 i = Q8_MBX_MSEC_DELAY;
1269
1270         while (i) {
1271                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1272                 if (data == 0)
1273                         break;
1274                 if (no_pause) {
1275                         DELAY(1000);
1276                 } else {
1277                         qla_mdelay(__func__, 1);
1278                 }
1279                 i--;
1280         }
1281
1282         if (i == 0) {
1283                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1284                         __func__, data);
1285                 ret = -1;
1286                 ha->qla_initiate_recovery = 1;
1287                 goto exit_qla_mbx_cmd;
1288         }
1289
1290         for (i = 0; i < n_hmbox; i++) {
1291                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1292                 h_mbox++;
1293         }
1294
1295         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1296
1297
1298         i = Q8_MBX_MSEC_DELAY;
1299         while (i) {
1300                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1301
1302                 if ((data & 0x3) == 1) {
1303                         data = READ_REG32(ha, Q8_FW_MBOX0);
1304                         if ((data & 0xF000) != 0x8000)
1305                                 break;
1306                 }
1307                 if (no_pause) {
1308                         DELAY(1000);
1309                 } else {
1310                         qla_mdelay(__func__, 1);
1311                 }
1312                 i--;
1313         }
1314         if (i == 0) {
1315                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1316                         __func__, data);
1317                 ret = -2;
1318                 ha->qla_initiate_recovery = 1;
1319                 goto exit_qla_mbx_cmd;
1320         }
1321
1322         for (i = 0; i < n_fwmbox; i++) {
1323                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1324         }
1325
1326         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1327         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1328
1329 exit_qla_mbx_cmd:
1330         return (ret);
1331 }
1332
1333 int
1334 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1335         uint32_t *num_rcvq)
1336 {
1337         uint32_t *mbox, err;
1338         device_t dev = ha->pci_dev;
1339
1340         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1341
1342         mbox = ha->hw.mbox;
1343
1344         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1345
1346         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1347                 device_printf(dev, "%s: failed0\n", __func__);
1348                 return (-1);
1349         }
1350         err = mbox[0] >> 25; 
1351
1352         if (supports_9kb != NULL) {
1353                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1354                         *supports_9kb = 1;
1355                 else
1356                         *supports_9kb = 0;
1357         }
1358
1359         if (num_rcvq != NULL)
1360                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1361
1362         if ((err != 1) && (err != 0)) {
1363                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1364                 return (-1);
1365         }
1366         return 0;
1367 }
1368
1369 static int
1370 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1371         uint32_t create)
1372 {
1373         uint32_t i, err;
1374         device_t dev = ha->pci_dev;
1375         q80_config_intr_t *c_intr;
1376         q80_config_intr_rsp_t *c_intr_rsp;
1377
1378         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1379         bzero(c_intr, (sizeof (q80_config_intr_t)));
1380
1381         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1382
1383         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1384         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1385
1386         c_intr->nentries = num_intrs;
1387
1388         for (i = 0; i < num_intrs; i++) {
1389                 if (create) {
1390                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1391                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1392                 } else {
1393                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1394                         c_intr->intr[i].msix_index =
1395                                 ha->hw.intr_id[(start_idx + i)];
1396                 }
1397
1398                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1399         }
1400
1401         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1402                 (sizeof (q80_config_intr_t) >> 2),
1403                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1404                 device_printf(dev, "%s: failed0\n", __func__);
1405                 return (-1);
1406         }
1407
1408         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1409
1410         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1411
1412         if (err) {
1413                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1414                         c_intr_rsp->nentries);
1415
1416                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1417                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1418                                 __func__, i, 
1419                                 c_intr_rsp->intr[i].status,
1420                                 c_intr_rsp->intr[i].intr_id,
1421                                 c_intr_rsp->intr[i].intr_src);
1422                 }
1423
1424                 return (-1);
1425         }
1426
1427         for (i = 0; ((i < num_intrs) && create); i++) {
1428                 if (!c_intr_rsp->intr[i].status) {
1429                         ha->hw.intr_id[(start_idx + i)] =
1430                                 c_intr_rsp->intr[i].intr_id;
1431                         ha->hw.intr_src[(start_idx + i)] =
1432                                 c_intr_rsp->intr[i].intr_src;
1433                 }
1434         }
1435
1436         return (0);
1437 }
1438
1439 /*
1440  * Name: qla_config_rss
1441  * Function: Configure RSS for the context/interface.
1442  */
1443 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1444                         0x8030f20c77cb2da3ULL,
1445                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1446                         0x255b0ec26d5a56daULL };
1447
1448 static int
1449 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1450 {
1451         q80_config_rss_t        *c_rss;
1452         q80_config_rss_rsp_t    *c_rss_rsp;
1453         uint32_t                err, i;
1454         device_t                dev = ha->pci_dev;
1455
1456         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1457         bzero(c_rss, (sizeof (q80_config_rss_t)));
1458
1459         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1460
1461         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1462         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1463
1464         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1465                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1466         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1467         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1468
1469         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1470         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1471
1472         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1473
1474         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1475         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1476
1477         c_rss->cntxt_id = cntxt_id;
1478
1479         for (i = 0; i < 5; i++) {
1480                 c_rss->rss_key[i] = rss_key[i];
1481         }
1482
1483         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1484                 (sizeof (q80_config_rss_t) >> 2),
1485                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1486                 device_printf(dev, "%s: failed0\n", __func__);
1487                 return (-1);
1488         }
1489         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1490
1491         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1492
1493         if (err) {
1494                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1495                 return (-1);
1496         }
1497         return 0;
1498 }
1499
1500 static int
1501 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1502         uint16_t cntxt_id, uint8_t *ind_table)
1503 {
1504         q80_config_rss_ind_table_t      *c_rss_ind;
1505         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1506         uint32_t                        err;
1507         device_t                        dev = ha->pci_dev;
1508
1509         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1510                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1511                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1512                         start_idx, count);
1513                 return (-1);
1514         }
1515
1516         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1517         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1518
1519         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1520         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1521         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1522
1523         c_rss_ind->start_idx = start_idx;
1524         c_rss_ind->end_idx = start_idx + count - 1;
1525         c_rss_ind->cntxt_id = cntxt_id;
1526         bcopy(ind_table, c_rss_ind->ind_table, count);
1527
1528         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1529                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1530                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1531                 device_printf(dev, "%s: failed0\n", __func__);
1532                 return (-1);
1533         }
1534
1535         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1536         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1537
1538         if (err) {
1539                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1540                 return (-1);
1541         }
1542         return 0;
1543 }
1544
1545 /*
1546  * Name: qla_config_intr_coalesce
1547  * Function: Configure Interrupt Coalescing.
1548  */
1549 static int
1550 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1551         int rcv)
1552 {
1553         q80_config_intr_coalesc_t       *intrc;
1554         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1555         uint32_t                        err, i;
1556         device_t                        dev = ha->pci_dev;
1557         
1558         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1559         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1560
1561         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1562         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1563         intrc->count_version |= Q8_MBX_CMD_VERSION;
1564
1565         if (rcv) {
1566                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1567                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1568                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1569         } else {
1570                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1571                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1572                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1573         }
1574
1575         intrc->cntxt_id = cntxt_id;
1576
1577         if (tenable) {
1578                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1579                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1580
1581                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1582                         intrc->sds_ring_mask |= (1 << i);
1583                 }
1584                 intrc->ms_timeout = 1000;
1585         }
1586
1587         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1588                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1589                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1590                 device_printf(dev, "%s: failed0\n", __func__);
1591                 return (-1);
1592         }
1593         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1594
1595         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1596
1597         if (err) {
1598                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1599                 return (-1);
1600         }
1601         
1602         return 0;
1603 }
1604
1605
1606 /*
1607  * Name: qla_config_mac_addr
1608  * Function: binds a MAC address to the context/interface.
1609  *      Can be unicast, multicast or broadcast.
1610  */
1611 static int
1612 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1613         uint32_t num_mac)
1614 {
1615         q80_config_mac_addr_t           *cmac;
1616         q80_config_mac_addr_rsp_t       *cmac_rsp;
1617         uint32_t                        err;
1618         device_t                        dev = ha->pci_dev;
1619         int                             i;
1620         uint8_t                         *mac_cpy = mac_addr;
1621
1622         if (num_mac > Q8_MAX_MAC_ADDRS) {
1623                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1624                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1625                 return (-1);
1626         }
1627
1628         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1629         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1630
1631         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1632         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1633         cmac->count_version |= Q8_MBX_CMD_VERSION;
1634
1635         if (add_mac) 
1636                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1637         else
1638                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1639                 
1640         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1641
1642         cmac->nmac_entries = num_mac;
1643         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1644
1645         for (i = 0; i < num_mac; i++) {
1646                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1647                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1648         }
1649
1650         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1651                 (sizeof (q80_config_mac_addr_t) >> 2),
1652                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1653                 device_printf(dev, "%s: %s failed0\n", __func__,
1654                         (add_mac ? "Add" : "Del"));
1655                 return (-1);
1656         }
1657         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1658
1659         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1660
1661         if (err) {
1662                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1663                         (add_mac ? "Add" : "Del"), err);
1664                 for (i = 0; i < num_mac; i++) {
1665                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1666                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1667                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1668                         mac_cpy += ETHER_ADDR_LEN;
1669                 }
1670                 return (-1);
1671         }
1672         
1673         return 0;
1674 }
1675
1676
1677 /*
1678  * Name: qla_set_mac_rcv_mode
1679  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1680  */
1681 static int
1682 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1683 {
1684         q80_config_mac_rcv_mode_t       *rcv_mode;
1685         uint32_t                        err;
1686         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1687         device_t                        dev = ha->pci_dev;
1688
1689         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1690         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1691
1692         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1693         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1694         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1695
1696         rcv_mode->mode = mode;
1697
1698         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1699
1700         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1701                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1702                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1703                 device_printf(dev, "%s: failed0\n", __func__);
1704                 return (-1);
1705         }
1706         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1707
1708         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1709
1710         if (err) {
1711                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1712                 return (-1);
1713         }
1714         
1715         return 0;
1716 }
1717
1718 int
1719 ql_set_promisc(qla_host_t *ha)
1720 {
1721         int ret;
1722
1723         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1724         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1725         return (ret);
1726 }
1727
1728 void
1729 qla_reset_promisc(qla_host_t *ha)
1730 {
1731         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1732         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1733 }
1734
1735 int
1736 ql_set_allmulti(qla_host_t *ha)
1737 {
1738         int ret;
1739
1740         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1741         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1742         return (ret);
1743 }
1744
1745 void
1746 qla_reset_allmulti(qla_host_t *ha)
1747 {
1748         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1749         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1750 }
1751
1752 /*
1753  * Name: ql_set_max_mtu
1754  * Function:
1755  *      Sets the maximum transfer unit size for the specified rcv context.
1756  */
1757 int
1758 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1759 {
1760         device_t                dev;
1761         q80_set_max_mtu_t       *max_mtu;
1762         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1763         uint32_t                err;
1764
1765         dev = ha->pci_dev;
1766
1767         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1768         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1769
1770         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1771         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1772         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1773
1774         max_mtu->cntxt_id = cntxt_id;
1775         max_mtu->mtu = mtu;
1776
1777         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1778                 (sizeof (q80_set_max_mtu_t) >> 2),
1779                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1780                 device_printf(dev, "%s: failed\n", __func__);
1781                 return -1;
1782         }
1783
1784         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1785
1786         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1787
1788         if (err) {
1789                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int
1796 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1797 {
1798         device_t                dev;
1799         q80_link_event_t        *lnk;
1800         q80_link_event_rsp_t    *lnk_rsp;
1801         uint32_t                err;
1802
1803         dev = ha->pci_dev;
1804
1805         lnk = (q80_link_event_t *)ha->hw.mbox;
1806         bzero(lnk, (sizeof (q80_link_event_t)));
1807
1808         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1809         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1810         lnk->count_version |= Q8_MBX_CMD_VERSION;
1811
1812         lnk->cntxt_id = cntxt_id;
1813         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1814
1815         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1816                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1817                 device_printf(dev, "%s: failed\n", __func__);
1818                 return -1;
1819         }
1820
1821         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1822
1823         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1824
1825         if (err) {
1826                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1827         }
1828
1829         return 0;
1830 }
1831
1832 static int
1833 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1834 {
1835         device_t                dev;
1836         q80_config_fw_lro_t     *fw_lro;
1837         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1838         uint32_t                err;
1839
1840         dev = ha->pci_dev;
1841
1842         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1843         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1844
1845         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1846         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1847         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1848
1849         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1850         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1851
1852         fw_lro->cntxt_id = cntxt_id;
1853
1854         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1855                 (sizeof (q80_config_fw_lro_t) >> 2),
1856                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1857                 device_printf(dev, "%s: failed\n", __func__);
1858                 return -1;
1859         }
1860
1861         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1862
1863         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1864
1865         if (err) {
1866                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1867         }
1868
1869         return 0;
1870 }
1871
1872 static int
1873 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1874 {
1875         device_t                dev;
1876         q80_hw_config_t         *hw_config;
1877         q80_hw_config_rsp_t     *hw_config_rsp;
1878         uint32_t                err;
1879
1880         dev = ha->pci_dev;
1881
1882         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1883         bzero(hw_config, sizeof (q80_hw_config_t));
1884
1885         hw_config->opcode = Q8_MBX_HW_CONFIG;
1886         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1887         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1888
1889         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1890
1891         hw_config->u.set_cam_search_mode.mode = search_mode;
1892
1893         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1894                 (sizeof (q80_hw_config_t) >> 2),
1895                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1896                 device_printf(dev, "%s: failed\n", __func__);
1897                 return -1;
1898         }
1899         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1900
1901         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1902
1903         if (err) {
1904                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1905         }
1906
1907         return 0;
1908 }
1909
1910 static int
1911 qla_get_cam_search_mode(qla_host_t *ha)
1912 {
1913         device_t                dev;
1914         q80_hw_config_t         *hw_config;
1915         q80_hw_config_rsp_t     *hw_config_rsp;
1916         uint32_t                err;
1917
1918         dev = ha->pci_dev;
1919
1920         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1921         bzero(hw_config, sizeof (q80_hw_config_t));
1922
1923         hw_config->opcode = Q8_MBX_HW_CONFIG;
1924         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1925         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1926
1927         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1928
1929         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1930                 (sizeof (q80_hw_config_t) >> 2),
1931                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1932                 device_printf(dev, "%s: failed\n", __func__);
1933                 return -1;
1934         }
1935         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1936
1937         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1938
1939         if (err) {
1940                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1941         } else {
1942                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1943                         hw_config_rsp->u.get_cam_search_mode.mode);
1944         }
1945
1946         return 0;
1947 }
1948
1949 static int
1950 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1951 {
1952         device_t                dev;
1953         q80_get_stats_t         *stat;
1954         q80_get_stats_rsp_t     *stat_rsp;
1955         uint32_t                err;
1956
1957         dev = ha->pci_dev;
1958
1959         stat = (q80_get_stats_t *)ha->hw.mbox;
1960         bzero(stat, (sizeof (q80_get_stats_t)));
1961
1962         stat->opcode = Q8_MBX_GET_STATS;
1963         stat->count_version = 2;
1964         stat->count_version |= Q8_MBX_CMD_VERSION;
1965
1966         stat->cmd = cmd;
1967
1968         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1969                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1970                 device_printf(dev, "%s: failed\n", __func__);
1971                 return -1;
1972         }
1973
1974         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1975
1976         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1977
1978         if (err) {
1979                 return -1;
1980         }
1981
1982         return 0;
1983 }
1984
1985 void
1986 ql_get_stats(qla_host_t *ha)
1987 {
1988         q80_get_stats_rsp_t     *stat_rsp;
1989         q80_mac_stats_t         *mstat;
1990         q80_xmt_stats_t         *xstat;
1991         q80_rcv_stats_t         *rstat;
1992         uint32_t                cmd;
1993         int                     i;
1994         struct ifnet *ifp = ha->ifp;
1995
1996         if (ifp == NULL)
1997                 return;
1998
1999         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2000                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2001                 return;
2002         }
2003
2004         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2005                 QLA_UNLOCK(ha, __func__);
2006                 return;
2007         }
2008
2009         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2010         /*
2011          * Get MAC Statistics
2012          */
2013         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2014 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2015
2016         cmd |= ((ha->pci_func & 0x1) << 16);
2017
2018         if (ha->qla_watchdog_pause)
2019                 goto ql_get_stats_exit;
2020
2021         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2022                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2023                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2024         } else {
2025                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2026                         __func__, ha->hw.mbox[0]);
2027         }
2028         /*
2029          * Get RCV Statistics
2030          */
2031         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2032 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2033         cmd |= (ha->hw.rcv_cntxt_id << 16);
2034
2035         if (ha->qla_watchdog_pause)
2036                 goto ql_get_stats_exit;
2037
2038         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2039                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2040                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2041         } else {
2042                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2043                         __func__, ha->hw.mbox[0]);
2044         }
2045
2046         if (ha->qla_watchdog_pause)
2047                 goto ql_get_stats_exit;
2048         /*
2049          * Get XMT Statistics
2050          */
2051         for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2052                 i++) {
2053                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2054 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2055                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2056
2057                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2058                         == 0) {
2059                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2060                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2061                 } else {
2062                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2063                                 __func__, ha->hw.mbox[0]);
2064                 }
2065         }
2066
2067 ql_get_stats_exit:
2068         QLA_UNLOCK(ha, __func__);
2069
2070         return;
2071 }
2072
2073 /*
2074  * Name: qla_tx_tso
2075  * Function: Checks if the packet to be transmitted is a candidate for
2076  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2077  *      Ring Structure are plugged in.
2078  */
2079 static int
2080 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2081 {
2082         struct ether_vlan_header *eh;
2083         struct ip *ip = NULL;
2084         struct ip6_hdr *ip6 = NULL;
2085         struct tcphdr *th = NULL;
2086         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2087         uint16_t etype, opcode, offload = 1;
2088         device_t dev;
2089
2090         dev = ha->pci_dev;
2091
2092
2093         eh = mtod(mp, struct ether_vlan_header *);
2094
2095         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2096                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2097                 etype = ntohs(eh->evl_proto);
2098         } else {
2099                 ehdrlen = ETHER_HDR_LEN;
2100                 etype = ntohs(eh->evl_encap_proto);
2101         }
2102
2103         hdrlen = 0;
2104
2105         switch (etype) {
2106                 case ETHERTYPE_IP:
2107
2108                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2109                                         sizeof(struct tcphdr);
2110
2111                         if (mp->m_len < tcp_opt_off) {
2112                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2113                                 ip = (struct ip *)(hdr + ehdrlen);
2114                         } else {
2115                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2116                         }
2117
2118                         ip_hlen = ip->ip_hl << 2;
2119                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2120
2121                                 
2122                         if ((ip->ip_p != IPPROTO_TCP) ||
2123                                 (ip_hlen != sizeof (struct ip))){
2124                                 /* IP Options are not supported */
2125
2126                                 offload = 0;
2127                         } else
2128                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2129
2130                 break;
2131
2132                 case ETHERTYPE_IPV6:
2133
2134                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2135                                         sizeof (struct tcphdr);
2136
2137                         if (mp->m_len < tcp_opt_off) {
2138                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2139                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2140                         } else {
2141                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2142                         }
2143
2144                         ip_hlen = sizeof(struct ip6_hdr);
2145                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2146
2147                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2148                                 //device_printf(dev, "%s: ipv6\n", __func__);
2149                                 offload = 0;
2150                         } else
2151                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2152                 break;
2153
2154                 default:
2155                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2156                         offload = 0;
2157                 break;
2158         }
2159
2160         if (!offload)
2161                 return (-1);
2162
2163         tcp_hlen = th->th_off << 2;
2164         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2165
2166         if (mp->m_len < hdrlen) {
2167                 if (mp->m_len < tcp_opt_off) {
2168                         if (tcp_hlen > sizeof(struct tcphdr)) {
2169                                 m_copydata(mp, tcp_opt_off,
2170                                         (tcp_hlen - sizeof(struct tcphdr)),
2171                                         &hdr[tcp_opt_off]);
2172                         }
2173                 } else {
2174                         m_copydata(mp, 0, hdrlen, hdr);
2175                 }
2176         }
2177
2178         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2179
2180         tx_cmd->flags_opcode = opcode ;
2181         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2182         tx_cmd->total_hdr_len = hdrlen;
2183
2184         /* Check for Multicast least significant bit of MSB == 1 */
2185         if (eh->evl_dhost[0] & 0x01) {
2186                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2187         }
2188
2189         if (mp->m_len < hdrlen) {
2190                 printf("%d\n", hdrlen);
2191                 return (1);
2192         }
2193
2194         return (0);
2195 }
2196
2197 /*
2198  * Name: qla_tx_chksum
2199  * Function: Checks if the packet to be transmitted is a candidate for
2200  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2201  *      Ring Structure are plugged in.
2202  */
2203 static int
2204 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2205         uint32_t *tcp_hdr_off)
2206 {
2207         struct ether_vlan_header *eh;
2208         struct ip *ip;
2209         struct ip6_hdr *ip6;
2210         uint32_t ehdrlen, ip_hlen;
2211         uint16_t etype, opcode, offload = 1;
2212         device_t dev;
2213         uint8_t buf[sizeof(struct ip6_hdr)];
2214
2215         dev = ha->pci_dev;
2216
2217         *op_code = 0;
2218
2219         if ((mp->m_pkthdr.csum_flags &
2220                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2221                 return (-1);
2222
2223         eh = mtod(mp, struct ether_vlan_header *);
2224
2225         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2226                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227                 etype = ntohs(eh->evl_proto);
2228         } else {
2229                 ehdrlen = ETHER_HDR_LEN;
2230                 etype = ntohs(eh->evl_encap_proto);
2231         }
2232
2233                 
2234         switch (etype) {
2235                 case ETHERTYPE_IP:
2236                         ip = (struct ip *)(mp->m_data + ehdrlen);
2237
2238                         ip_hlen = sizeof (struct ip);
2239
2240                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2241                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2242                                 ip = (struct ip *)buf;
2243                         }
2244
2245                         if (ip->ip_p == IPPROTO_TCP)
2246                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2247                         else if (ip->ip_p == IPPROTO_UDP)
2248                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2249                         else {
2250                                 //device_printf(dev, "%s: ipv4\n", __func__);
2251                                 offload = 0;
2252                         }
2253                 break;
2254
2255                 case ETHERTYPE_IPV6:
2256                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2257
2258                         ip_hlen = sizeof(struct ip6_hdr);
2259
2260                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2261                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2262                                         buf);
2263                                 ip6 = (struct ip6_hdr *)buf;
2264                         }
2265
2266                         if (ip6->ip6_nxt == IPPROTO_TCP)
2267                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2268                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2269                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2270                         else {
2271                                 //device_printf(dev, "%s: ipv6\n", __func__);
2272                                 offload = 0;
2273                         }
2274                 break;
2275
2276                 default:
2277                         offload = 0;
2278                 break;
2279         }
2280         if (!offload)
2281                 return (-1);
2282
2283         *op_code = opcode;
2284         *tcp_hdr_off = (ip_hlen + ehdrlen);
2285
2286         return (0);
2287 }
2288
2289 #define QLA_TX_MIN_FREE 2
2290 /*
2291  * Name: ql_hw_send
2292  * Function: Transmits a packet. It first checks if the packet is a
2293  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2294  *      offload. If either of these creteria are not met, it is transmitted
2295  *      as a regular ethernet frame.
2296  */
2297 int
2298 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2299         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2300 {
2301         struct ether_vlan_header *eh;
2302         qla_hw_t *hw = &ha->hw;
2303         q80_tx_cmd_t *tx_cmd, tso_cmd;
2304         bus_dma_segment_t *c_seg;
2305         uint32_t num_tx_cmds, hdr_len = 0;
2306         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2307         device_t dev;
2308         int i, ret;
2309         uint8_t *src = NULL, *dst = NULL;
2310         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2311         uint32_t op_code = 0;
2312         uint32_t tcp_hdr_off = 0;
2313
2314         dev = ha->pci_dev;
2315
2316         /*
2317          * Always make sure there is atleast one empty slot in the tx_ring
2318          * tx_ring is considered full when there only one entry available
2319          */
2320         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2321
2322         total_length = mp->m_pkthdr.len;
2323         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2324                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2325                         __func__, total_length);
2326                 return (EINVAL);
2327         }
2328         eh = mtod(mp, struct ether_vlan_header *);
2329
2330         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2331
2332                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2333
2334                 src = frame_hdr;
2335                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2336
2337                 if (!(ret & ~1)) {
2338                         /* find the additional tx_cmd descriptors required */
2339
2340                         if (mp->m_flags & M_VLANTAG)
2341                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2342
2343                         hdr_len = tso_cmd.total_hdr_len;
2344
2345                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2346                         bytes = QL_MIN(bytes, hdr_len);
2347
2348                         num_tx_cmds++;
2349                         hdr_len -= bytes;
2350
2351                         while (hdr_len) {
2352                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2353                                 hdr_len -= bytes;
2354                                 num_tx_cmds++;
2355                         }
2356                         hdr_len = tso_cmd.total_hdr_len;
2357
2358                         if (ret == 0)
2359                                 src = (uint8_t *)eh;
2360                 } else 
2361                         return (EINVAL);
2362         } else {
2363                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2364         }
2365
2366         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2367                 ql_hw_tx_done_locked(ha, txr_idx);
2368                 if (hw->tx_cntxt[txr_idx].txr_free <=
2369                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2370                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2371                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2372                                 __func__));
2373                         return (-1);
2374                 }
2375         }
2376
2377         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2378
2379         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2380
2381                 if (nsegs > ha->hw.max_tx_segs)
2382                         ha->hw.max_tx_segs = nsegs;
2383
2384                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2385
2386                 if (op_code) {
2387                         tx_cmd->flags_opcode = op_code;
2388                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2389
2390                 } else {
2391                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2392                 }
2393         } else {
2394                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2395                 ha->tx_tso_frames++;
2396         }
2397
2398         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2399                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2400
2401                 if (iscsi_pdu)
2402                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2403
2404         } else if (mp->m_flags & M_VLANTAG) {
2405
2406                 if (hdr_len) { /* TSO */
2407                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2408                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2409                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2410                 } else
2411                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2412
2413                 ha->hw_vlan_tx_frames++;
2414                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2415
2416                 if (iscsi_pdu) {
2417                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2418                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2419                 }
2420         }
2421
2422
2423         tx_cmd->n_bufs = (uint8_t)nsegs;
2424         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2425         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2426         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2427
2428         c_seg = segs;
2429
2430         while (1) {
2431                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2432
2433                         switch (i) {
2434                         case 0:
2435                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2436                                 tx_cmd->buf1_len = c_seg->ds_len;
2437                                 break;
2438
2439                         case 1:
2440                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2441                                 tx_cmd->buf2_len = c_seg->ds_len;
2442                                 break;
2443
2444                         case 2:
2445                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2446                                 tx_cmd->buf3_len = c_seg->ds_len;
2447                                 break;
2448
2449                         case 3:
2450                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2451                                 tx_cmd->buf4_len = c_seg->ds_len;
2452                                 break;
2453                         }
2454
2455                         c_seg++;
2456                         nsegs--;
2457                 }
2458
2459                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2460                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2461                                 (NUM_TX_DESCRIPTORS - 1);
2462                 tx_cmd_count++;
2463
2464                 if (!nsegs)
2465                         break;
2466                 
2467                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2468                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2469         }
2470
2471         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2472
2473                 /* TSO : Copy the header in the following tx cmd descriptors */
2474
2475                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2476
2477                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2478                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2479
2480                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2481                 bytes = QL_MIN(bytes, hdr_len);
2482
2483                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2484
2485                 if (mp->m_flags & M_VLANTAG) {
2486                         /* first copy the src/dst MAC addresses */
2487                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2488                         dst += (ETHER_ADDR_LEN * 2);
2489                         src += (ETHER_ADDR_LEN * 2);
2490                         
2491                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2492                         dst += 2;
2493                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2494                         dst += 2;
2495
2496                         /* bytes left in src header */
2497                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2498                                         ETHER_VLAN_ENCAP_LEN);
2499
2500                         /* bytes left in TxCmd Entry */
2501                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2502
2503
2504                         bcopy(src, dst, bytes);
2505                         src += bytes;
2506                         hdr_len -= bytes;
2507                 } else {
2508                         bcopy(src, dst, bytes);
2509                         src += bytes;
2510                         hdr_len -= bytes;
2511                 }
2512
2513                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2514                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2515                                         (NUM_TX_DESCRIPTORS - 1);
2516                 tx_cmd_count++;
2517                 
2518                 while (hdr_len) {
2519                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2520                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2521
2522                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2523
2524                         bcopy(src, tx_cmd, bytes);
2525                         src += bytes;
2526                         hdr_len -= bytes;
2527
2528                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2529                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2530                                         (NUM_TX_DESCRIPTORS - 1);
2531                         tx_cmd_count++;
2532                 }
2533         }
2534
2535         hw->tx_cntxt[txr_idx].txr_free =
2536                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2537
2538         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2539                 txr_idx);
2540         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2541
2542         return (0);
2543 }
2544
2545
2546
2547 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2548 static int
2549 qla_config_rss_ind_table(qla_host_t *ha)
2550 {
2551         uint32_t i, count;
2552         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2553
2554
2555         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2556                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2557         }
2558
2559         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2560                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2561
2562                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2563                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2564                 } else {
2565                         count = Q8_CONFIG_IND_TBL_SIZE;
2566                 }
2567
2568                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2569                         rss_ind_tbl))
2570                         return (-1);
2571         }
2572
2573         return (0);
2574 }
2575
2576 static int
2577 qla_config_soft_lro(qla_host_t *ha)
2578 {
2579         int i;
2580         qla_hw_t *hw = &ha->hw;
2581         struct lro_ctrl *lro;
2582
2583         for (i = 0; i < hw->num_sds_rings; i++) {
2584                 lro = &hw->sds[i].lro;
2585
2586                 bzero(lro, sizeof(struct lro_ctrl));
2587
2588 #if (__FreeBSD_version >= 1100101)
2589                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2590                         device_printf(ha->pci_dev,
2591                                 "%s: tcp_lro_init_args [%d] failed\n",
2592                                 __func__, i);
2593                         return (-1);
2594                 }
2595 #else
2596                 if (tcp_lro_init(lro)) {
2597                         device_printf(ha->pci_dev,
2598                                 "%s: tcp_lro_init [%d] failed\n",
2599                                 __func__, i);
2600                         return (-1);
2601                 }
2602 #endif /* #if (__FreeBSD_version >= 1100101) */
2603
2604                 lro->ifp = ha->ifp;
2605         }
2606
2607         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2608         return (0);
2609 }
2610
2611 static void
2612 qla_drain_soft_lro(qla_host_t *ha)
2613 {
2614         int i;
2615         qla_hw_t *hw = &ha->hw;
2616         struct lro_ctrl *lro;
2617
2618         for (i = 0; i < hw->num_sds_rings; i++) {
2619                 lro = &hw->sds[i].lro;
2620
2621 #if (__FreeBSD_version >= 1100101)
2622                 tcp_lro_flush_all(lro);
2623 #else
2624                 struct lro_entry *queued;
2625
2626                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2627                         queued = SLIST_FIRST(&lro->lro_active);
2628                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2629                         tcp_lro_flush(lro, queued);
2630                 }
2631 #endif /* #if (__FreeBSD_version >= 1100101) */
2632         }
2633
2634         return;
2635 }
2636
2637 static void
2638 qla_free_soft_lro(qla_host_t *ha)
2639 {
2640         int i;
2641         qla_hw_t *hw = &ha->hw;
2642         struct lro_ctrl *lro;
2643
2644         for (i = 0; i < hw->num_sds_rings; i++) {
2645                 lro = &hw->sds[i].lro;
2646                 tcp_lro_free(lro);
2647         }
2648
2649         return;
2650 }
2651
2652
2653 /*
2654  * Name: ql_del_hw_if
2655  * Function: Destroys the hardware specific entities corresponding to an
2656  *      Ethernet Interface
2657  */
2658 void
2659 ql_del_hw_if(qla_host_t *ha)
2660 {
2661         uint32_t i;
2662         uint32_t num_msix;
2663
2664         (void)qla_stop_nic_func(ha);
2665
2666         qla_del_rcv_cntxt(ha);
2667
2668         qla_del_xmt_cntxt(ha);
2669
2670         if (ha->hw.flags.init_intr_cnxt) {
2671                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2672
2673                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2674                                 num_msix = Q8_MAX_INTR_VECTORS;
2675                         else
2676                                 num_msix = ha->hw.num_sds_rings - i;
2677                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2678
2679                         i += num_msix;
2680                 }
2681
2682                 ha->hw.flags.init_intr_cnxt = 0;
2683         }
2684
2685         if (ha->hw.enable_soft_lro) {
2686                 qla_drain_soft_lro(ha);
2687                 qla_free_soft_lro(ha);
2688         }
2689
2690         return;
2691 }
2692
2693 void
2694 qla_confirm_9kb_enable(qla_host_t *ha)
2695 {
2696         uint32_t supports_9kb = 0;
2697
2698         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2699
2700         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2701         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2702         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2703
2704         qla_get_nic_partition(ha, &supports_9kb, NULL);
2705
2706         if (!supports_9kb)
2707                 ha->hw.enable_9kb = 0;
2708
2709         return;
2710 }
2711
2712 /*
2713  * Name: ql_init_hw_if
2714  * Function: Creates the hardware specific entities corresponding to an
2715  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2716  *      corresponding to the interface. Enables LRO if allowed.
2717  */
2718 int
2719 ql_init_hw_if(qla_host_t *ha)
2720 {
2721         device_t        dev;
2722         uint32_t        i;
2723         uint8_t         bcast_mac[6];
2724         qla_rdesc_t     *rdesc;
2725         uint32_t        num_msix;
2726
2727         dev = ha->pci_dev;
2728
2729         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2730                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2731                         ha->hw.dma_buf.sds_ring[i].size);
2732         }
2733
2734         for (i = 0; i < ha->hw.num_sds_rings; ) {
2735
2736                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2737                         num_msix = Q8_MAX_INTR_VECTORS;
2738                 else
2739                         num_msix = ha->hw.num_sds_rings - i;
2740
2741                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2742
2743                         if (i > 0) {
2744
2745                                 num_msix = i;
2746
2747                                 for (i = 0; i < num_msix; ) {
2748                                         qla_config_intr_cntxt(ha, i,
2749                                                 Q8_MAX_INTR_VECTORS, 0);
2750                                         i += Q8_MAX_INTR_VECTORS;
2751                                 }
2752                         }
2753                         return (-1);
2754                 }
2755
2756                 i = i + num_msix;
2757         }
2758
2759         ha->hw.flags.init_intr_cnxt = 1;
2760
2761         /*
2762          * Create Receive Context
2763          */
2764         if (qla_init_rcv_cntxt(ha)) {
2765                 return (-1);
2766         }
2767
2768         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2769                 rdesc = &ha->hw.rds[i];
2770                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2771                 rdesc->rx_in = 0;
2772                 /* Update the RDS Producer Indices */
2773                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2774                         rdesc->rx_next);
2775         }
2776
2777         /*
2778          * Create Transmit Context
2779          */
2780         if (qla_init_xmt_cntxt(ha)) {
2781                 qla_del_rcv_cntxt(ha);
2782                 return (-1);
2783         }
2784         ha->hw.max_tx_segs = 0;
2785
2786         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2787                 return(-1);
2788
2789         ha->hw.flags.unicast_mac = 1;
2790
2791         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2792         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2793
2794         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2795                 return (-1);
2796
2797         ha->hw.flags.bcast_mac = 1;
2798
2799         /*
2800          * program any cached multicast addresses
2801          */
2802         if (qla_hw_add_all_mcast(ha))
2803                 return (-1);
2804
2805         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2806                 return (-1);
2807
2808         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2809                 return (-1);
2810
2811         if (qla_config_rss_ind_table(ha))
2812                 return (-1);
2813
2814         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2815                 return (-1);
2816
2817         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2818                 return (-1);
2819
2820         if (ha->ifp->if_capenable & IFCAP_LRO) {
2821                 if (ha->hw.enable_hw_lro) {
2822                         ha->hw.enable_soft_lro = 0;
2823
2824                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2825                                 return (-1);
2826                 } else {
2827                         ha->hw.enable_soft_lro = 1;
2828
2829                         if (qla_config_soft_lro(ha))
2830                                 return (-1);
2831                 }
2832         }
2833
2834         if (qla_init_nic_func(ha))
2835                 return (-1);
2836
2837         if (qla_query_fw_dcbx_caps(ha))
2838                 return (-1);
2839
2840         for (i = 0; i < ha->hw.num_sds_rings; i++)
2841                 QL_ENABLE_INTERRUPTS(ha, i);
2842
2843         return (0);
2844 }
2845
2846 static int
2847 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2848 {
2849         device_t                dev = ha->pci_dev;
2850         q80_rq_map_sds_to_rds_t *map_rings;
2851         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2852         uint32_t                i, err;
2853         qla_hw_t                *hw = &ha->hw;
2854
2855         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2856         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2857
2858         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2859         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2860         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2861
2862         map_rings->cntxt_id = hw->rcv_cntxt_id;
2863         map_rings->num_rings = num_idx;
2864
2865         for (i = 0; i < num_idx; i++) {
2866                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2867                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2868         }
2869
2870         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2871                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2872                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2873                 device_printf(dev, "%s: failed0\n", __func__);
2874                 return (-1);
2875         }
2876
2877         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2878
2879         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2880
2881         if (err) {
2882                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2883                 return (-1);
2884         }
2885
2886         return (0);
2887 }
2888
2889 /*
2890  * Name: qla_init_rcv_cntxt
2891  * Function: Creates the Receive Context.
2892  */
2893 static int
2894 qla_init_rcv_cntxt(qla_host_t *ha)
2895 {
2896         q80_rq_rcv_cntxt_t      *rcntxt;
2897         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2898         q80_stat_desc_t         *sdesc;
2899         int                     i, j;
2900         qla_hw_t                *hw = &ha->hw;
2901         device_t                dev;
2902         uint32_t                err;
2903         uint32_t                rcntxt_sds_rings;
2904         uint32_t                rcntxt_rds_rings;
2905         uint32_t                max_idx;
2906
2907         dev = ha->pci_dev;
2908
2909         /*
2910          * Create Receive Context
2911          */
2912
2913         for (i = 0; i < hw->num_sds_rings; i++) {
2914                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2915
2916                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2917                         sdesc->data[0] = 1ULL;
2918                         sdesc->data[1] = 1ULL;
2919                 }
2920         }
2921
2922         rcntxt_sds_rings = hw->num_sds_rings;
2923         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2924                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2925
2926         rcntxt_rds_rings = hw->num_rds_rings;
2927
2928         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2929                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2930
2931         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2932         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2933
2934         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2935         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2936         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2937
2938         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2939                         Q8_RCV_CNTXT_CAP0_LRO |
2940                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2941                         Q8_RCV_CNTXT_CAP0_RSS |
2942                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2943
2944         if (ha->hw.enable_9kb)
2945                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2946         else
2947                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2948
2949         if (ha->hw.num_rds_rings > 1) {
2950                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2951                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2952         } else
2953                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2954
2955         rcntxt->nsds_rings = rcntxt_sds_rings;
2956
2957         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2958
2959         rcntxt->rcv_vpid = 0;
2960
2961         for (i = 0; i <  rcntxt_sds_rings; i++) {
2962                 rcntxt->sds[i].paddr =
2963                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2964                 rcntxt->sds[i].size =
2965                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2966                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2967                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2968         }
2969
2970         for (i = 0; i <  rcntxt_rds_rings; i++) {
2971                 rcntxt->rds[i].paddr_std =
2972                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2973
2974                 if (ha->hw.enable_9kb)
2975                         rcntxt->rds[i].std_bsize =
2976                                 qla_host_to_le64(MJUM9BYTES);
2977                 else
2978                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2979
2980                 rcntxt->rds[i].std_nentries =
2981                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2982         }
2983
2984         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2985                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2986                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2987                 device_printf(dev, "%s: failed0\n", __func__);
2988                 return (-1);
2989         }
2990
2991         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2992
2993         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2994
2995         if (err) {
2996                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2997                 return (-1);
2998         }
2999
3000         for (i = 0; i <  rcntxt_sds_rings; i++) {
3001                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3002         }
3003
3004         for (i = 0; i <  rcntxt_rds_rings; i++) {
3005                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3006         }
3007
3008         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3009
3010         ha->hw.flags.init_rx_cnxt = 1;
3011
3012         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3013
3014                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3015
3016                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3017                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3018                         else
3019                                 max_idx = hw->num_sds_rings - i;
3020
3021                         err = qla_add_rcv_rings(ha, i, max_idx);
3022                         if (err)
3023                                 return -1;
3024
3025                         i += max_idx;
3026                 }
3027         }
3028
3029         if (hw->num_rds_rings > 1) {
3030
3031                 for (i = 0; i < hw->num_rds_rings; ) {
3032
3033                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3034                                 max_idx = MAX_SDS_TO_RDS_MAP;
3035                         else
3036                                 max_idx = hw->num_rds_rings - i;
3037
3038                         err = qla_map_sds_to_rds(ha, i, max_idx);
3039                         if (err)
3040                                 return -1;
3041
3042                         i += max_idx;
3043                 }
3044         }
3045
3046         return (0);
3047 }
3048
3049 static int
3050 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3051 {
3052         device_t                dev = ha->pci_dev;
3053         q80_rq_add_rcv_rings_t  *add_rcv;
3054         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3055         uint32_t                i,j, err;
3056         qla_hw_t                *hw = &ha->hw;
3057
3058         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3059         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3060
3061         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3062         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3063         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3064
3065         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3066         add_rcv->nsds_rings = nsds;
3067         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3068
3069         for (i = 0; i <  nsds; i++) {
3070
3071                 j = i + sds_idx;
3072
3073                 add_rcv->sds[i].paddr =
3074                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3075
3076                 add_rcv->sds[i].size =
3077                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3078
3079                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3080                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3081
3082         }
3083
3084         for (i = 0; (i <  nsds); i++) {
3085                 j = i + sds_idx;
3086
3087                 add_rcv->rds[i].paddr_std =
3088                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3089
3090                 if (ha->hw.enable_9kb)
3091                         add_rcv->rds[i].std_bsize =
3092                                 qla_host_to_le64(MJUM9BYTES);
3093                 else
3094                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3095
3096                 add_rcv->rds[i].std_nentries =
3097                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3098         }
3099
3100
3101         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3102                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3103                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3104                 device_printf(dev, "%s: failed0\n", __func__);
3105                 return (-1);
3106         }
3107
3108         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3109
3110         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3111
3112         if (err) {
3113                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3114                 return (-1);
3115         }
3116
3117         for (i = 0; i < nsds; i++) {
3118                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3119         }
3120
3121         for (i = 0; i < nsds; i++) {
3122                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3123         }
3124
3125         return (0);
3126 }
3127
3128 /*
3129  * Name: qla_del_rcv_cntxt
3130  * Function: Destroys the Receive Context.
3131  */
3132 static void
3133 qla_del_rcv_cntxt(qla_host_t *ha)
3134 {
3135         device_t                        dev = ha->pci_dev;
3136         q80_rcv_cntxt_destroy_t         *rcntxt;
3137         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3138         uint32_t                        err;
3139         uint8_t                         bcast_mac[6];
3140
3141         if (!ha->hw.flags.init_rx_cnxt)
3142                 return;
3143
3144         if (qla_hw_del_all_mcast(ha))
3145                 return;
3146
3147         if (ha->hw.flags.bcast_mac) {
3148
3149                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3150                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3151
3152                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3153                         return;
3154                 ha->hw.flags.bcast_mac = 0;
3155
3156         }
3157
3158         if (ha->hw.flags.unicast_mac) {
3159                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3160                         return;
3161                 ha->hw.flags.unicast_mac = 0;
3162         }
3163
3164         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3165         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3166
3167         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3168         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3169         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3170
3171         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3172
3173         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3174                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3175                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3176                 device_printf(dev, "%s: failed0\n", __func__);
3177                 return;
3178         }
3179         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3180
3181         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3182
3183         if (err) {
3184                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3185         }
3186
3187         ha->hw.flags.init_rx_cnxt = 0;
3188         return;
3189 }
3190
3191 /*
3192  * Name: qla_init_xmt_cntxt
3193  * Function: Creates the Transmit Context.
3194  */
3195 static int
3196 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3197 {
3198         device_t                dev;
3199         qla_hw_t                *hw = &ha->hw;
3200         q80_rq_tx_cntxt_t       *tcntxt;
3201         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3202         uint32_t                err;
3203         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3204         uint32_t                intr_idx;
3205
3206         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3207
3208         dev = ha->pci_dev;
3209
3210         /*
3211          * Create Transmit Context
3212          */
3213         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3214         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3215
3216         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3217         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3218         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3219
3220         intr_idx = txr_idx;
3221
3222 #ifdef QL_ENABLE_ISCSI_TLV
3223
3224         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3225                                 Q8_TX_CNTXT_CAP0_TC;
3226
3227         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3228                 tcntxt->traffic_class = 1;
3229         }
3230
3231         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3232
3233 #else
3234         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3235
3236 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3237
3238         tcntxt->ntx_rings = 1;
3239
3240         tcntxt->tx_ring[0].paddr =
3241                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3242         tcntxt->tx_ring[0].tx_consumer =
3243                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3244         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3245
3246         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3247         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3248
3249         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3250         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3251
3252         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3253                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3254                 ha->hw.mbox,
3255                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3256                 device_printf(dev, "%s: failed0\n", __func__);
3257                 return (-1);
3258         }
3259         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3260
3261         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3262
3263         if (err) {
3264                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3265                 return -1;
3266         }
3267
3268         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3269         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3270
3271         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3272                 return (-1);
3273
3274         return (0);
3275 }
3276
3277
3278 /*
3279  * Name: qla_del_xmt_cntxt
3280  * Function: Destroys the Transmit Context.
3281  */
3282 static int
3283 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3284 {
3285         device_t                        dev = ha->pci_dev;
3286         q80_tx_cntxt_destroy_t          *tcntxt;
3287         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3288         uint32_t                        err;
3289
3290         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3291         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3292
3293         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3294         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3295         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3296
3297         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3298
3299         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3300                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3301                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3302                 device_printf(dev, "%s: failed0\n", __func__);
3303                 return (-1);
3304         }
3305         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3306
3307         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3308
3309         if (err) {
3310                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3311                 return (-1);
3312         }
3313
3314         return (0);
3315 }
3316 static void
3317 qla_del_xmt_cntxt(qla_host_t *ha)
3318 {
3319         uint32_t i;
3320
3321         if (!ha->hw.flags.init_tx_cnxt)
3322                 return;
3323
3324         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3325                 if (qla_del_xmt_cntxt_i(ha, i))
3326                         break;
3327         }
3328         ha->hw.flags.init_tx_cnxt = 0;
3329 }
3330
3331 static int
3332 qla_init_xmt_cntxt(qla_host_t *ha)
3333 {
3334         uint32_t i, j;
3335
3336         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3337                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3338                         for (j = 0; j < i; j++)
3339                                 qla_del_xmt_cntxt_i(ha, j);
3340                         return (-1);
3341                 }
3342         }
3343         ha->hw.flags.init_tx_cnxt = 1;
3344         return (0);
3345 }
3346
3347 static int
3348 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3349 {
3350         int i, nmcast;
3351         uint32_t count = 0;
3352         uint8_t *mcast;
3353
3354         nmcast = ha->hw.nmcast;
3355
3356         QL_DPRINT2(ha, (ha->pci_dev,
3357                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3358
3359         mcast = ha->hw.mac_addr_arr;
3360         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3361
3362         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3363                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3364                         (ha->hw.mcast[i].addr[1] != 0) ||
3365                         (ha->hw.mcast[i].addr[2] != 0) ||
3366                         (ha->hw.mcast[i].addr[3] != 0) ||
3367                         (ha->hw.mcast[i].addr[4] != 0) ||
3368                         (ha->hw.mcast[i].addr[5] != 0)) {
3369
3370                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3371                         mcast = mcast + ETHER_ADDR_LEN;
3372                         count++;
3373                         
3374                         if (count == Q8_MAX_MAC_ADDRS) {
3375                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3376                                         add_mcast, count)) {
3377                                         device_printf(ha->pci_dev,
3378                                                 "%s: failed\n", __func__);
3379                                         return (-1);
3380                                 }
3381
3382                                 count = 0;
3383                                 mcast = ha->hw.mac_addr_arr;
3384                                 memset(mcast, 0,
3385                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3386                         }
3387
3388                         nmcast--;
3389                 }
3390         }
3391
3392         if (count) {
3393                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3394                         count)) {
3395                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3396                         return (-1);
3397                 }
3398         }
3399         QL_DPRINT2(ha, (ha->pci_dev,
3400                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3401
3402         return 0;
3403 }
3404
3405 static int
3406 qla_hw_add_all_mcast(qla_host_t *ha)
3407 {
3408         int ret;
3409
3410         ret = qla_hw_all_mcast(ha, 1);
3411
3412         return (ret);
3413 }
3414
3415 int
3416 qla_hw_del_all_mcast(qla_host_t *ha)
3417 {
3418         int ret;
3419
3420         ret = qla_hw_all_mcast(ha, 0);
3421
3422         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3423         ha->hw.nmcast = 0;
3424
3425         return (ret);
3426 }
3427
3428 static int
3429 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3430 {
3431         int i;
3432
3433         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3434                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3435                         return (0); /* its been already added */
3436         }
3437         return (-1);
3438 }
3439
3440 static int
3441 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3442 {
3443         int i;
3444
3445         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3446
3447                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3448                         (ha->hw.mcast[i].addr[1] == 0) &&
3449                         (ha->hw.mcast[i].addr[2] == 0) &&
3450                         (ha->hw.mcast[i].addr[3] == 0) &&
3451                         (ha->hw.mcast[i].addr[4] == 0) &&
3452                         (ha->hw.mcast[i].addr[5] == 0)) {
3453
3454                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3455                         ha->hw.nmcast++;        
3456
3457                         mta = mta + ETHER_ADDR_LEN;
3458                         nmcast--;
3459
3460                         if (nmcast == 0)
3461                                 break;
3462                 }
3463
3464         }
3465         return 0;
3466 }
3467
3468 static int
3469 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3470 {
3471         int i;
3472
3473         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3474                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3475
3476                         ha->hw.mcast[i].addr[0] = 0;
3477                         ha->hw.mcast[i].addr[1] = 0;
3478                         ha->hw.mcast[i].addr[2] = 0;
3479                         ha->hw.mcast[i].addr[3] = 0;
3480                         ha->hw.mcast[i].addr[4] = 0;
3481                         ha->hw.mcast[i].addr[5] = 0;
3482
3483                         ha->hw.nmcast--;        
3484
3485                         mta = mta + ETHER_ADDR_LEN;
3486                         nmcast--;
3487
3488                         if (nmcast == 0)
3489                                 break;
3490                 }
3491         }
3492         return 0;
3493 }
3494
3495 /*
3496  * Name: ql_hw_set_multi
3497  * Function: Sets the Multicast Addresses provided by the host O.S into the
3498  *      hardware (for the given interface)
3499  */
3500 int
3501 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3502         uint32_t add_mac)
3503 {
3504         uint8_t *mta = mcast_addr;
3505         int i;
3506         int ret = 0;
3507         uint32_t count = 0;
3508         uint8_t *mcast;
3509
3510         mcast = ha->hw.mac_addr_arr;
3511         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3512
3513         for (i = 0; i < mcnt; i++) {
3514                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3515                         if (add_mac) {
3516                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3517                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3518                                         mcast = mcast + ETHER_ADDR_LEN;
3519                                         count++;
3520                                 }
3521                         } else {
3522                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3523                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3524                                         mcast = mcast + ETHER_ADDR_LEN;
3525                                         count++;
3526                                 }
3527                         }
3528                 }
3529                 if (count == Q8_MAX_MAC_ADDRS) {
3530                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3531                                 add_mac, count)) {
3532                                 device_printf(ha->pci_dev, "%s: failed\n",
3533                                         __func__);
3534                                 return (-1);
3535                         }
3536
3537                         if (add_mac) {
3538                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3539                                         count);
3540                         } else {
3541                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3542                                         count);
3543                         }
3544
3545                         count = 0;
3546                         mcast = ha->hw.mac_addr_arr;
3547                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3548                 }
3549                         
3550                 mta += Q8_MAC_ADDR_LEN;
3551         }
3552
3553         if (count) {
3554                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3555                         count)) {
3556                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3557                         return (-1);
3558                 }
3559                 if (add_mac) {
3560                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3561                 } else {
3562                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3563                 }
3564         }
3565
3566         return (ret);
3567 }
3568
3569 /*
3570  * Name: ql_hw_tx_done_locked
3571  * Function: Handle Transmit Completions
3572  */
3573 void
3574 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3575 {
3576         qla_tx_buf_t *txb;
3577         qla_hw_t *hw = &ha->hw;
3578         uint32_t comp_idx, comp_count = 0;
3579         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3580
3581         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3582
3583         /* retrieve index of last entry in tx ring completed */
3584         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3585
3586         while (comp_idx != hw_tx_cntxt->txr_comp) {
3587
3588                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3589
3590                 hw_tx_cntxt->txr_comp++;
3591                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3592                         hw_tx_cntxt->txr_comp = 0;
3593
3594                 comp_count++;
3595
3596                 if (txb->m_head) {
3597                         ha->ifp->if_opackets++;
3598
3599                         bus_dmamap_sync(ha->tx_tag, txb->map,
3600                                 BUS_DMASYNC_POSTWRITE);
3601                         bus_dmamap_unload(ha->tx_tag, txb->map);
3602                         m_freem(txb->m_head);
3603
3604                         txb->m_head = NULL;
3605                 }
3606         }
3607
3608         hw_tx_cntxt->txr_free += comp_count;
3609         return;
3610 }
3611
3612 void
3613 ql_update_link_state(qla_host_t *ha)
3614 {
3615         uint32_t link_state;
3616         uint32_t prev_link_state;
3617
3618         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3619                 ha->hw.link_up = 0;
3620                 return;
3621         }
3622         link_state = READ_REG32(ha, Q8_LINK_STATE);
3623
3624         prev_link_state =  ha->hw.link_up;
3625
3626         if (ha->pci_func == 0) 
3627                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3628         else
3629                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3630
3631         if (prev_link_state !=  ha->hw.link_up) {
3632                 if (ha->hw.link_up) {
3633                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3634                 } else {
3635                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3636                 }
3637         }
3638         return;
3639 }
3640
3641 int
3642 ql_hw_check_health(qla_host_t *ha)
3643 {
3644         uint32_t val;
3645
3646         ha->hw.health_count++;
3647
3648         if (ha->hw.health_count < 500)
3649                 return 0;
3650
3651         ha->hw.health_count = 0;
3652
3653         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3654
3655         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3656                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3657                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3658                         __func__, val);
3659                 return -1;
3660         }
3661
3662         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3663
3664         if ((val != ha->hw.hbeat_value) &&
3665                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3666                 ha->hw.hbeat_value = val;
3667                 ha->hw.hbeat_failure = 0;
3668                 return 0;
3669         }
3670
3671         ha->hw.hbeat_failure++;
3672
3673         
3674         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3675                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3676                         __func__, val);
3677         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3678                 return 0;
3679         else 
3680                 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3681                         __func__, val);
3682
3683         return -1;
3684 }
3685
3686 static int
3687 qla_init_nic_func(qla_host_t *ha)
3688 {
3689         device_t                dev;
3690         q80_init_nic_func_t     *init_nic;
3691         q80_init_nic_func_rsp_t *init_nic_rsp;
3692         uint32_t                err;
3693
3694         dev = ha->pci_dev;
3695
3696         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3697         bzero(init_nic, sizeof(q80_init_nic_func_t));
3698
3699         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3700         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3701         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3702
3703         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3704         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3705         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3706
3707 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3708         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3709                 (sizeof (q80_init_nic_func_t) >> 2),
3710                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3711                 device_printf(dev, "%s: failed\n", __func__);
3712                 return -1;
3713         }
3714
3715         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3716 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3717
3718         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3719
3720         if (err) {
3721                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3722         }
3723
3724         return 0;
3725 }
3726
3727 static int
3728 qla_stop_nic_func(qla_host_t *ha)
3729 {
3730         device_t                dev;
3731         q80_stop_nic_func_t     *stop_nic;
3732         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3733         uint32_t                err;
3734
3735         dev = ha->pci_dev;
3736
3737         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3738         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3739
3740         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3741         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3742         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3743
3744         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3745         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3746
3747 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3748         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3749                 (sizeof (q80_stop_nic_func_t) >> 2),
3750                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3751                 device_printf(dev, "%s: failed\n", __func__);
3752                 return -1;
3753         }
3754
3755         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3756 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3757
3758         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3759
3760         if (err) {
3761                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3762         }
3763
3764         return 0;
3765 }
3766
3767 static int
3768 qla_query_fw_dcbx_caps(qla_host_t *ha)
3769 {
3770         device_t                        dev;
3771         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3772         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3773         uint32_t                        err;
3774
3775         dev = ha->pci_dev;
3776
3777         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3778         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3779
3780         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3781         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3782         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3783
3784         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3785         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3786                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3787                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3788                 device_printf(dev, "%s: failed\n", __func__);
3789                 return -1;
3790         }
3791
3792         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3793         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3794                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3795
3796         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3797
3798         if (err) {
3799                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3800         }
3801
3802         return 0;
3803 }
3804
3805 static int
3806 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3807         uint32_t aen_mb3, uint32_t aen_mb4)
3808 {
3809         device_t                dev;
3810         q80_idc_ack_t           *idc_ack;
3811         q80_idc_ack_rsp_t       *idc_ack_rsp;
3812         uint32_t                err;
3813         int                     count = 300;
3814
3815         dev = ha->pci_dev;
3816
3817         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3818         bzero(idc_ack, sizeof(q80_idc_ack_t));
3819
3820         idc_ack->opcode = Q8_MBX_IDC_ACK;
3821         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3822         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3823
3824         idc_ack->aen_mb1 = aen_mb1;
3825         idc_ack->aen_mb2 = aen_mb2;
3826         idc_ack->aen_mb3 = aen_mb3;
3827         idc_ack->aen_mb4 = aen_mb4;
3828
3829         ha->hw.imd_compl= 0;
3830
3831         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3832                 (sizeof (q80_idc_ack_t) >> 2),
3833                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3834                 device_printf(dev, "%s: failed\n", __func__);
3835                 return -1;
3836         }
3837
3838         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3839
3840         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3841
3842         if (err) {
3843                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3844                 return(-1);
3845         }
3846
3847         while (count && !ha->hw.imd_compl) {
3848                 qla_mdelay(__func__, 100);
3849                 count--;
3850         }
3851
3852         if (!count)
3853                 return -1;
3854         else
3855                 device_printf(dev, "%s: count %d\n", __func__, count);
3856
3857         return (0);
3858 }
3859
3860 static int
3861 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3862 {
3863         device_t                dev;
3864         q80_set_port_cfg_t      *pcfg;
3865         q80_set_port_cfg_rsp_t  *pfg_rsp;
3866         uint32_t                err;
3867         int                     count = 300;
3868
3869         dev = ha->pci_dev;
3870
3871         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3872         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3873
3874         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3875         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3876         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3877
3878         pcfg->cfg_bits = cfg_bits;
3879
3880         device_printf(dev, "%s: cfg_bits"
3881                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3882                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3883                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3884                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3885                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3886
3887         ha->hw.imd_compl= 0;
3888
3889         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3890                 (sizeof (q80_set_port_cfg_t) >> 2),
3891                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3892                 device_printf(dev, "%s: failed\n", __func__);
3893                 return -1;
3894         }
3895
3896         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3897
3898         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3899
3900         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3901                 while (count && !ha->hw.imd_compl) {
3902                         qla_mdelay(__func__, 100);
3903                         count--;
3904                 }
3905                 if (count) {
3906                         device_printf(dev, "%s: count %d\n", __func__, count);
3907
3908                         err = 0;
3909                 }
3910         }
3911
3912         if (err) {
3913                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3914                 return(-1);
3915         }
3916
3917         return (0);
3918 }
3919
3920
3921 static int
3922 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3923 {
3924         uint32_t                        err;
3925         device_t                        dev = ha->pci_dev;
3926         q80_config_md_templ_size_t      *md_size;
3927         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3928
3929 #ifndef QL_LDFLASH_FW
3930
3931         ql_minidump_template_hdr_t *hdr;
3932
3933         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3934         *size = hdr->size_of_template;
3935         return (0);
3936
3937 #endif /* #ifdef QL_LDFLASH_FW */
3938
3939         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3940         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3941
3942         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3943         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3944         md_size->count_version |= Q8_MBX_CMD_VERSION;
3945
3946         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3947                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3948                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3949
3950                 device_printf(dev, "%s: failed\n", __func__);
3951
3952                 return (-1);
3953         }
3954
3955         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3956
3957         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3958
3959         if (err) {
3960                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3961                 return(-1);
3962         }
3963
3964         *size = md_size_rsp->templ_size;
3965
3966         return (0);
3967 }
3968
3969 static int
3970 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3971 {
3972         device_t                dev;
3973         q80_get_port_cfg_t      *pcfg;
3974         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3975         uint32_t                err;
3976
3977         dev = ha->pci_dev;
3978
3979         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3980         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3981
3982         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3983         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3984         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3985
3986         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3987                 (sizeof (q80_get_port_cfg_t) >> 2),
3988                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3989                 device_printf(dev, "%s: failed\n", __func__);
3990                 return -1;
3991         }
3992
3993         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3994
3995         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3996
3997         if (err) {
3998                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3999                 return(-1);
4000         }
4001
4002         device_printf(dev, "%s: [cfg_bits, port type]"
4003                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4004                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4005                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4006                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4007                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4008                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4009                 );
4010
4011         *cfg_bits = pcfg_rsp->cfg_bits;
4012
4013         return (0);
4014 }
4015
4016 int
4017 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4018 {
4019         struct ether_vlan_header        *eh;
4020         uint16_t                        etype;
4021         struct ip                       *ip = NULL;
4022         struct ip6_hdr                  *ip6 = NULL;
4023         struct tcphdr                   *th = NULL;
4024         uint32_t                        hdrlen;
4025         uint32_t                        offset;
4026         uint8_t                         buf[sizeof(struct ip6_hdr)];
4027
4028         eh = mtod(mp, struct ether_vlan_header *);
4029
4030         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4031                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4032                 etype = ntohs(eh->evl_proto);
4033         } else {
4034                 hdrlen = ETHER_HDR_LEN;
4035                 etype = ntohs(eh->evl_encap_proto);
4036         }
4037
4038         if (etype == ETHERTYPE_IP) {
4039
4040                 offset = (hdrlen + sizeof (struct ip));
4041
4042                 if (mp->m_len >= offset) {
4043                         ip = (struct ip *)(mp->m_data + hdrlen);
4044                 } else {
4045                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4046                         ip = (struct ip *)buf;
4047                 }
4048
4049                 if (ip->ip_p == IPPROTO_TCP) {
4050
4051                         hdrlen += ip->ip_hl << 2;
4052                         offset = hdrlen + 4;
4053         
4054                         if (mp->m_len >= offset) {
4055                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4056                         } else {
4057                                 m_copydata(mp, hdrlen, 4, buf);
4058                                 th = (struct tcphdr *)buf;
4059                         }
4060                 }
4061
4062         } else if (etype == ETHERTYPE_IPV6) {
4063
4064                 offset = (hdrlen + sizeof (struct ip6_hdr));
4065
4066                 if (mp->m_len >= offset) {
4067                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4068                 } else {
4069                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4070                         ip6 = (struct ip6_hdr *)buf;
4071                 }
4072
4073                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4074
4075                         hdrlen += sizeof(struct ip6_hdr);
4076                         offset = hdrlen + 4;
4077
4078                         if (mp->m_len >= offset) {
4079                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4080                         } else {
4081                                 m_copydata(mp, hdrlen, 4, buf);
4082                                 th = (struct tcphdr *)buf;
4083                         }
4084                 }
4085         }
4086
4087         if (th != NULL) {
4088                 if ((th->th_sport == htons(3260)) ||
4089                         (th->th_dport == htons(3260)))
4090                         return 0;
4091         }
4092         return (-1);
4093 }
4094
4095 void
4096 qla_hw_async_event(qla_host_t *ha)
4097 {
4098         switch (ha->hw.aen_mb0) {
4099         case 0x8101:
4100                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4101                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4102
4103                 break;
4104
4105         default:
4106                 break;
4107         }
4108
4109         return;
4110 }
4111
4112 #ifdef QL_LDFLASH_FW
4113 static int
4114 ql_get_minidump_template(qla_host_t *ha)
4115 {
4116         uint32_t                        err;
4117         device_t                        dev = ha->pci_dev;
4118         q80_config_md_templ_cmd_t       *md_templ;
4119         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4120
4121         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4122         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4123
4124         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4125         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4126         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4127
4128         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4129         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4130
4131         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4132                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4133                  ha->hw.mbox,
4134                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4135
4136                 device_printf(dev, "%s: failed\n", __func__);
4137
4138                 return (-1);
4139         }
4140
4141         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4142
4143         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4144
4145         if (err) {
4146                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4147                 return (-1);
4148         }
4149
4150         return (0);
4151
4152 }
4153 #endif /* #ifdef QL_LDFLASH_FW */
4154
4155 /*
4156  * Minidump related functionality 
4157  */
4158
4159 static int ql_parse_template(qla_host_t *ha);
4160
4161 static uint32_t ql_rdcrb(qla_host_t *ha,
4162                         ql_minidump_entry_rdcrb_t *crb_entry,
4163                         uint32_t * data_buff);
4164
4165 static uint32_t ql_pollrd(qla_host_t *ha,
4166                         ql_minidump_entry_pollrd_t *entry,
4167                         uint32_t * data_buff);
4168
4169 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4170                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4171                         uint32_t *data_buff);
4172
4173 static uint32_t ql_L2Cache(qla_host_t *ha,
4174                         ql_minidump_entry_cache_t *cacheEntry,
4175                         uint32_t * data_buff);
4176
4177 static uint32_t ql_L1Cache(qla_host_t *ha,
4178                         ql_minidump_entry_cache_t *cacheEntry,
4179                         uint32_t *data_buff);
4180
4181 static uint32_t ql_rdocm(qla_host_t *ha,
4182                         ql_minidump_entry_rdocm_t *ocmEntry,
4183                         uint32_t *data_buff);
4184
4185 static uint32_t ql_rdmem(qla_host_t *ha,
4186                         ql_minidump_entry_rdmem_t *mem_entry,
4187                         uint32_t *data_buff);
4188
4189 static uint32_t ql_rdrom(qla_host_t *ha,
4190                         ql_minidump_entry_rdrom_t *romEntry,
4191                         uint32_t *data_buff);
4192
4193 static uint32_t ql_rdmux(qla_host_t *ha,
4194                         ql_minidump_entry_mux_t *muxEntry,
4195                         uint32_t *data_buff);
4196
4197 static uint32_t ql_rdmux2(qla_host_t *ha,
4198                         ql_minidump_entry_mux2_t *muxEntry,
4199                         uint32_t *data_buff);
4200
4201 static uint32_t ql_rdqueue(qla_host_t *ha,
4202                         ql_minidump_entry_queue_t *queueEntry,
4203                         uint32_t *data_buff);
4204
4205 static uint32_t ql_cntrl(qla_host_t *ha,
4206                         ql_minidump_template_hdr_t *template_hdr,
4207                         ql_minidump_entry_cntrl_t *crbEntry);
4208
4209
4210 static uint32_t
4211 ql_minidump_size(qla_host_t *ha)
4212 {
4213         uint32_t i, k;
4214         uint32_t size = 0;
4215         ql_minidump_template_hdr_t *hdr;
4216
4217         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4218
4219         i = 0x2;
4220
4221         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4222                 if (i & ha->hw.mdump_capture_mask)
4223                         size += hdr->capture_size_array[k];
4224                 i = i << 1;
4225         }
4226         return (size);
4227 }
4228
4229 static void
4230 ql_free_minidump_buffer(qla_host_t *ha)
4231 {
4232         if (ha->hw.mdump_buffer != NULL) {
4233                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4234                 ha->hw.mdump_buffer = NULL;
4235                 ha->hw.mdump_buffer_size = 0;
4236         }
4237         return;
4238 }
4239
4240 static int
4241 ql_alloc_minidump_buffer(qla_host_t *ha)
4242 {
4243         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4244
4245         if (!ha->hw.mdump_buffer_size)
4246                 return (-1);
4247
4248         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4249                                         M_NOWAIT);
4250
4251         if (ha->hw.mdump_buffer == NULL)
4252                 return (-1);
4253
4254         return (0);
4255 }
4256
4257 static void
4258 ql_free_minidump_template_buffer(qla_host_t *ha)
4259 {
4260         if (ha->hw.mdump_template != NULL) {
4261                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4262                 ha->hw.mdump_template = NULL;
4263                 ha->hw.mdump_template_size = 0;
4264         }
4265         return;
4266 }
4267
4268 static int
4269 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4270 {
4271         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4272
4273         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4274                                         M_QLA83XXBUF, M_NOWAIT);
4275
4276         if (ha->hw.mdump_template == NULL)
4277                 return (-1);
4278
4279         return (0);
4280 }
4281
4282 static int
4283 ql_alloc_minidump_buffers(qla_host_t *ha)
4284 {
4285         int ret;
4286
4287         ret = ql_alloc_minidump_template_buffer(ha);
4288
4289         if (ret)
4290                 return (ret);
4291
4292         ret = ql_alloc_minidump_buffer(ha);
4293
4294         if (ret)
4295                 ql_free_minidump_template_buffer(ha);
4296
4297         return (ret);
4298 }
4299
4300
4301 static uint32_t
4302 ql_validate_minidump_checksum(qla_host_t *ha)
4303 {
4304         uint64_t sum = 0;
4305         int count;
4306         uint32_t *template_buff;
4307
4308         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4309         template_buff = ha->hw.dma_buf.minidump.dma_b;
4310
4311         while (count-- > 0) {
4312                 sum += *template_buff++;
4313         }
4314
4315         while (sum >> 32) {
4316                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4317         }
4318
4319         return (~sum);
4320 }
4321
4322 int
4323 ql_minidump_init(qla_host_t *ha)
4324 {
4325         int             ret = 0;
4326         uint32_t        template_size = 0;
4327         device_t        dev = ha->pci_dev;
4328
4329         /*
4330          * Get Minidump Template Size
4331          */
4332         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4333
4334         if (ret || (template_size == 0)) {
4335                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4336                         template_size);
4337                 return (-1);
4338         }
4339
4340         /*
4341          * Allocate Memory for Minidump Template
4342          */
4343
4344         ha->hw.dma_buf.minidump.alignment = 8;
4345         ha->hw.dma_buf.minidump.size = template_size;
4346
4347 #ifdef QL_LDFLASH_FW
4348         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4349
4350                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4351
4352                 return (-1);
4353         }
4354         ha->hw.dma_buf.flags.minidump = 1;
4355
4356         /*
4357          * Retrieve Minidump Template
4358          */
4359         ret = ql_get_minidump_template(ha);
4360 #else
4361         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4362
4363 #endif /* #ifdef QL_LDFLASH_FW */
4364
4365         if (ret == 0) {
4366
4367                 ret = ql_validate_minidump_checksum(ha);
4368
4369                 if (ret == 0) {
4370
4371                         ret = ql_alloc_minidump_buffers(ha);
4372
4373                         if (ret == 0)
4374                 ha->hw.mdump_init = 1;
4375                         else
4376                                 device_printf(dev,
4377                                         "%s: ql_alloc_minidump_buffers"
4378                                         " failed\n", __func__);
4379                 } else {
4380                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4381                                 " failed\n", __func__);
4382                 }
4383         } else {
4384                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4385                          __func__);
4386         }
4387
4388         if (ret)
4389                 ql_minidump_free(ha);
4390
4391         return (ret);
4392 }
4393
4394 static void
4395 ql_minidump_free(qla_host_t *ha)
4396 {
4397         ha->hw.mdump_init = 0;
4398         if (ha->hw.dma_buf.flags.minidump) {
4399                 ha->hw.dma_buf.flags.minidump = 0;
4400                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4401         }
4402
4403         ql_free_minidump_template_buffer(ha);
4404         ql_free_minidump_buffer(ha);
4405
4406         return;
4407 }
4408
4409 void
4410 ql_minidump(qla_host_t *ha)
4411 {
4412         if (!ha->hw.mdump_init)
4413                 return;
4414
4415         if (ha->hw.mdump_done)
4416                 return;
4417
4418                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4419
4420         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4421         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4422
4423         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4424                 ha->hw.mdump_template_size);
4425
4426         ql_parse_template(ha);
4427  
4428         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4429
4430         ha->hw.mdump_done = 1;
4431
4432         return;
4433 }
4434
4435
4436 /*
4437  * helper routines
4438  */
4439 static void 
4440 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4441 {
4442         if (esize != entry->hdr.entry_capture_size) {
4443                 entry->hdr.entry_capture_size = esize;
4444                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4445         }
4446         return;
4447 }
4448
4449
4450 static int 
4451 ql_parse_template(qla_host_t *ha)
4452 {
4453         uint32_t num_of_entries, buff_level, e_cnt, esize;
4454         uint32_t end_cnt, rv = 0;
4455         char *dump_buff, *dbuff;
4456         int sane_start = 0, sane_end = 0;
4457         ql_minidump_template_hdr_t *template_hdr;
4458         ql_minidump_entry_t *entry;
4459         uint32_t capture_mask; 
4460         uint32_t dump_size; 
4461
4462         /* Setup parameters */
4463         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4464
4465         if (template_hdr->entry_type == TLHDR)
4466                 sane_start = 1;
4467         
4468         dump_buff = (char *) ha->hw.mdump_buffer;
4469
4470         num_of_entries = template_hdr->num_of_entries;
4471
4472         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4473                         + template_hdr->first_entry_offset );
4474
4475         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4476                 template_hdr->ocm_window_array[ha->pci_func];
4477         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4478
4479         capture_mask = ha->hw.mdump_capture_mask;
4480         dump_size = ha->hw.mdump_buffer_size;
4481
4482         template_hdr->driver_capture_mask = capture_mask;
4483
4484         QL_DPRINT80(ha, (ha->pci_dev,
4485                 "%s: sane_start = %d num_of_entries = %d "
4486                 "capture_mask = 0x%x dump_size = %d \n", 
4487                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4488
4489         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4490
4491                 /*
4492                  * If the capture_mask of the entry does not match capture mask
4493                  * skip the entry after marking the driver_flags indicator.
4494                  */
4495                 
4496                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4497
4498                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4499                         entry = (ql_minidump_entry_t *) ((char *) entry
4500                                         + entry->hdr.entry_size);
4501                         continue;
4502                 }
4503
4504                 /*
4505                  * This is ONLY needed in implementations where
4506                  * the capture buffer allocated is too small to capture
4507                  * all of the required entries for a given capture mask.
4508                  * We need to empty the buffer contents to a file
4509                  * if possible, before processing the next entry
4510                  * If the buff_full_flag is set, no further capture will happen
4511                  * and all remaining non-control entries will be skipped.
4512                  */
4513                 if (entry->hdr.entry_capture_size != 0) {
4514                         if ((buff_level + entry->hdr.entry_capture_size) >
4515                                 dump_size) {
4516                                 /*  Try to recover by emptying buffer to file */
4517                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4518                                 entry = (ql_minidump_entry_t *) ((char *) entry
4519                                                 + entry->hdr.entry_size);
4520                                 continue;
4521                         }
4522                 }
4523
4524                 /*
4525                  * Decode the entry type and process it accordingly
4526                  */
4527
4528                 switch (entry->hdr.entry_type) {
4529                 case RDNOP:
4530                         break;
4531
4532                 case RDEND:
4533                         if (sane_end == 0) {
4534                                 end_cnt = e_cnt;
4535                         }
4536                         sane_end++;
4537                         break;
4538
4539                 case RDCRB:
4540                         dbuff = dump_buff + buff_level;
4541                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4542                         ql_entry_err_chk(entry, esize);
4543                         buff_level += esize;
4544                         break;
4545
4546                 case POLLRD:
4547                         dbuff = dump_buff + buff_level;
4548                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4549                         ql_entry_err_chk(entry, esize);
4550                         buff_level += esize;
4551                         break;
4552
4553                 case POLLRDMWR:
4554                         dbuff = dump_buff + buff_level;
4555                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4556                                         (void *)dbuff);
4557                         ql_entry_err_chk(entry, esize);
4558                         buff_level += esize;
4559                         break;
4560
4561                 case L2ITG:
4562                 case L2DTG:
4563                 case L2DAT:
4564                 case L2INS:
4565                         dbuff = dump_buff + buff_level;
4566                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4567                         if (esize == -1) {
4568                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4569                         } else {
4570                                 ql_entry_err_chk(entry, esize);
4571                                 buff_level += esize;
4572                         }
4573                         break;
4574
4575                 case L1DAT:
4576                 case L1INS:
4577                         dbuff = dump_buff + buff_level;
4578                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4579                         ql_entry_err_chk(entry, esize);
4580                         buff_level += esize;
4581                         break;
4582
4583                 case RDOCM:
4584                         dbuff = dump_buff + buff_level;
4585                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4586                         ql_entry_err_chk(entry, esize);
4587                         buff_level += esize;
4588                         break;
4589
4590                 case RDMEM:
4591                         dbuff = dump_buff + buff_level;
4592                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4593                         ql_entry_err_chk(entry, esize);
4594                         buff_level += esize;
4595                         break;
4596
4597                 case BOARD:
4598                 case RDROM:
4599                         dbuff = dump_buff + buff_level;
4600                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4601                         ql_entry_err_chk(entry, esize);
4602                         buff_level += esize;
4603                         break;
4604
4605                 case RDMUX:
4606                         dbuff = dump_buff + buff_level;
4607                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4608                         ql_entry_err_chk(entry, esize);
4609                         buff_level += esize;
4610                         break;
4611
4612                 case RDMUX2:
4613                         dbuff = dump_buff + buff_level;
4614                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4615                         ql_entry_err_chk(entry, esize);
4616                         buff_level += esize;
4617                         break;
4618
4619                 case QUEUE:
4620                         dbuff = dump_buff + buff_level;
4621                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4622                         ql_entry_err_chk(entry, esize);
4623                         buff_level += esize;
4624                         break;
4625
4626                 case CNTRL:
4627                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4628                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4629                         }
4630                         break;
4631                 default:
4632                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4633                         break;
4634                 }
4635                 /*  next entry in the template */
4636                 entry = (ql_minidump_entry_t *) ((char *) entry
4637                                                 + entry->hdr.entry_size);
4638         }
4639
4640         if (!sane_start || (sane_end > 1)) {
4641                 device_printf(ha->pci_dev,
4642                         "\n%s: Template configuration error. Check Template\n",
4643                         __func__);
4644         }
4645         
4646         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4647                 __func__, template_hdr->num_of_entries));
4648
4649         return 0;
4650 }
4651
4652 /*
4653  * Read CRB operation.
4654  */
4655 static uint32_t
4656 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4657         uint32_t * data_buff)
4658 {
4659         int loop_cnt;
4660         int ret;
4661         uint32_t op_count, addr, stride, value = 0;
4662
4663         addr = crb_entry->addr;
4664         op_count = crb_entry->op_count;
4665         stride = crb_entry->addr_stride;
4666
4667         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4668
4669                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4670
4671                 if (ret)
4672                         return (0);
4673
4674                 *data_buff++ = addr;
4675                 *data_buff++ = value;
4676                 addr = addr + stride;
4677         }
4678
4679         /*
4680          * for testing purpose we return amount of data written
4681          */
4682         return (op_count * (2 * sizeof(uint32_t)));
4683 }
4684
4685 /*
4686  * Handle L2 Cache.
4687  */
4688
4689 static uint32_t 
4690 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4691         uint32_t * data_buff)
4692 {
4693         int i, k;
4694         int loop_cnt;
4695         int ret;
4696
4697         uint32_t read_value;
4698         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4699         uint32_t tag_value, read_cnt;
4700         volatile uint8_t cntl_value_r;
4701         long timeout;
4702         uint32_t data;
4703
4704         loop_cnt = cacheEntry->op_count;
4705
4706         read_addr = cacheEntry->read_addr;
4707         cntrl_addr = cacheEntry->control_addr;
4708         cntl_value_w = (uint32_t) cacheEntry->write_value;
4709
4710         tag_reg_addr = cacheEntry->tag_reg_addr;
4711
4712         tag_value = cacheEntry->init_tag_value;
4713         read_cnt = cacheEntry->read_addr_cnt;
4714
4715         for (i = 0; i < loop_cnt; i++) {
4716
4717                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4718                 if (ret)
4719                         return (0);
4720
4721                 if (cacheEntry->write_value != 0) { 
4722
4723                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4724                                         &cntl_value_w, 0);
4725                         if (ret)
4726                                 return (0);
4727                 }
4728
4729                 if (cacheEntry->poll_mask != 0) { 
4730
4731                         timeout = cacheEntry->poll_wait;
4732
4733                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4734                         if (ret)
4735                                 return (0);
4736
4737                         cntl_value_r = (uint8_t)data;
4738
4739                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4740
4741                                 if (timeout) {
4742                                         qla_mdelay(__func__, 1);
4743                                         timeout--;
4744                                 } else
4745                                         break;
4746
4747                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4748                                                 &data, 1);
4749                                 if (ret)
4750                                         return (0);
4751
4752                                 cntl_value_r = (uint8_t)data;
4753                         }
4754                         if (!timeout) {
4755                                 /* Report timeout error. 
4756                                  * core dump capture failed
4757                                  * Skip remaining entries.
4758                                  * Write buffer out to file
4759                                  * Use driver specific fields in template header
4760                                  * to report this error.
4761                                  */
4762                                 return (-1);
4763                         }
4764                 }
4765
4766                 addr = read_addr;
4767                 for (k = 0; k < read_cnt; k++) {
4768
4769                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4770                         if (ret)
4771                                 return (0);
4772
4773                         *data_buff++ = read_value;
4774                         addr += cacheEntry->read_addr_stride;
4775                 }
4776
4777                 tag_value += cacheEntry->tag_value_stride;
4778         }
4779
4780         return (read_cnt * loop_cnt * sizeof(uint32_t));
4781 }
4782
4783 /*
4784  * Handle L1 Cache.
4785  */
4786
4787 static uint32_t 
4788 ql_L1Cache(qla_host_t *ha,
4789         ql_minidump_entry_cache_t *cacheEntry,
4790         uint32_t *data_buff)
4791 {
4792         int ret;
4793         int i, k;
4794         int loop_cnt;
4795
4796         uint32_t read_value;
4797         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4798         uint32_t tag_value, read_cnt;
4799         uint32_t cntl_value_w;
4800
4801         loop_cnt = cacheEntry->op_count;
4802
4803         read_addr = cacheEntry->read_addr;
4804         cntrl_addr = cacheEntry->control_addr;
4805         cntl_value_w = (uint32_t) cacheEntry->write_value;
4806
4807         tag_reg_addr = cacheEntry->tag_reg_addr;
4808
4809         tag_value = cacheEntry->init_tag_value;
4810         read_cnt = cacheEntry->read_addr_cnt;
4811
4812         for (i = 0; i < loop_cnt; i++) {
4813
4814                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4815                 if (ret)
4816                         return (0);
4817
4818                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4819                 if (ret)
4820                         return (0);
4821
4822                 addr = read_addr;
4823                 for (k = 0; k < read_cnt; k++) {
4824
4825                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4826                         if (ret)
4827                                 return (0);
4828
4829                         *data_buff++ = read_value;
4830                         addr += cacheEntry->read_addr_stride;
4831                 }
4832
4833                 tag_value += cacheEntry->tag_value_stride;
4834         }
4835
4836         return (read_cnt * loop_cnt * sizeof(uint32_t));
4837 }
4838
4839 /*
4840  * Reading OCM memory
4841  */
4842
4843 static uint32_t 
4844 ql_rdocm(qla_host_t *ha,
4845         ql_minidump_entry_rdocm_t *ocmEntry,
4846         uint32_t *data_buff)
4847 {
4848         int i, loop_cnt;
4849         volatile uint32_t addr;
4850         volatile uint32_t value;
4851
4852         addr = ocmEntry->read_addr;
4853         loop_cnt = ocmEntry->op_count;
4854
4855         for (i = 0; i < loop_cnt; i++) {
4856                 value = READ_REG32(ha, addr);
4857                 *data_buff++ = value;
4858                 addr += ocmEntry->read_addr_stride;
4859         }
4860         return (loop_cnt * sizeof(value));
4861 }
4862
4863 /*
4864  * Read memory
4865  */
4866
4867 static uint32_t 
4868 ql_rdmem(qla_host_t *ha,
4869         ql_minidump_entry_rdmem_t *mem_entry,
4870         uint32_t *data_buff)
4871 {
4872         int ret;
4873         int i, loop_cnt;
4874         volatile uint32_t addr;
4875         q80_offchip_mem_val_t val;
4876
4877         addr = mem_entry->read_addr;
4878
4879         /* size in bytes / 16 */
4880         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4881
4882         for (i = 0; i < loop_cnt; i++) {
4883
4884                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4885                 if (ret)
4886                         return (0);
4887
4888                 *data_buff++ = val.data_lo;
4889                 *data_buff++ = val.data_hi;
4890                 *data_buff++ = val.data_ulo;
4891                 *data_buff++ = val.data_uhi;
4892
4893                 addr += (sizeof(uint32_t) * 4);
4894         }
4895
4896         return (loop_cnt * (sizeof(uint32_t) * 4));
4897 }
4898
4899 /*
4900  * Read Rom
4901  */
4902
4903 static uint32_t 
4904 ql_rdrom(qla_host_t *ha,
4905         ql_minidump_entry_rdrom_t *romEntry,
4906         uint32_t *data_buff)
4907 {
4908         int ret;
4909         int i, loop_cnt;
4910         uint32_t addr;
4911         uint32_t value;
4912
4913         addr = romEntry->read_addr;
4914         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4915         loop_cnt /= sizeof(value);
4916
4917         for (i = 0; i < loop_cnt; i++) {
4918
4919                 ret = ql_rd_flash32(ha, addr, &value);
4920                 if (ret)
4921                         return (0);
4922
4923                 *data_buff++ = value;
4924                 addr += sizeof(value);
4925         }
4926
4927         return (loop_cnt * sizeof(value));
4928 }
4929
4930 /*
4931  * Read MUX data
4932  */
4933
4934 static uint32_t 
4935 ql_rdmux(qla_host_t *ha,
4936         ql_minidump_entry_mux_t *muxEntry,
4937         uint32_t *data_buff)
4938 {
4939         int ret;
4940         int loop_cnt;
4941         uint32_t read_value, sel_value;
4942         uint32_t read_addr, select_addr;
4943
4944         select_addr = muxEntry->select_addr;
4945         sel_value = muxEntry->select_value;
4946         read_addr = muxEntry->read_addr;
4947
4948         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4949
4950                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4951                 if (ret)
4952                         return (0);
4953
4954                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4955                 if (ret)
4956                         return (0);
4957
4958                 *data_buff++ = sel_value;
4959                 *data_buff++ = read_value;
4960
4961                 sel_value += muxEntry->select_value_stride;
4962         }
4963
4964         return (loop_cnt * (2 * sizeof(uint32_t)));
4965 }
4966
4967 static uint32_t
4968 ql_rdmux2(qla_host_t *ha,
4969         ql_minidump_entry_mux2_t *muxEntry,
4970         uint32_t *data_buff)
4971 {
4972         int ret;
4973         int loop_cnt;
4974
4975         uint32_t select_addr_1, select_addr_2;
4976         uint32_t select_value_1, select_value_2;
4977         uint32_t select_value_count, select_value_mask;
4978         uint32_t read_addr, read_value;
4979
4980         select_addr_1 = muxEntry->select_addr_1;
4981         select_addr_2 = muxEntry->select_addr_2;
4982         select_value_1 = muxEntry->select_value_1;
4983         select_value_2 = muxEntry->select_value_2;
4984         select_value_count = muxEntry->select_value_count;
4985         select_value_mask  = muxEntry->select_value_mask;
4986
4987         read_addr = muxEntry->read_addr;
4988
4989         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4990                 loop_cnt++) {
4991
4992                 uint32_t temp_sel_val;
4993
4994                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4995                 if (ret)
4996                         return (0);
4997
4998                 temp_sel_val = select_value_1 & select_value_mask;
4999
5000                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5001                 if (ret)
5002                         return (0);
5003
5004                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5005                 if (ret)
5006                         return (0);
5007
5008                 *data_buff++ = temp_sel_val;
5009                 *data_buff++ = read_value;
5010
5011                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5012                 if (ret)
5013                         return (0);
5014
5015                 temp_sel_val = select_value_2 & select_value_mask;
5016
5017                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5018                 if (ret)
5019                         return (0);
5020
5021                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5022                 if (ret)
5023                         return (0);
5024
5025                 *data_buff++ = temp_sel_val;
5026                 *data_buff++ = read_value;
5027
5028                 select_value_1 += muxEntry->select_value_stride;
5029                 select_value_2 += muxEntry->select_value_stride;
5030         }
5031
5032         return (loop_cnt * (4 * sizeof(uint32_t)));
5033 }
5034
5035 /*
5036  * Handling Queue State Reads.
5037  */
5038
5039 static uint32_t 
5040 ql_rdqueue(qla_host_t *ha,
5041         ql_minidump_entry_queue_t *queueEntry,
5042         uint32_t *data_buff)
5043 {
5044         int ret;
5045         int loop_cnt, k;
5046         uint32_t read_value;
5047         uint32_t read_addr, read_stride, select_addr;
5048         uint32_t queue_id, read_cnt;
5049
5050         read_cnt = queueEntry->read_addr_cnt;
5051         read_stride = queueEntry->read_addr_stride;
5052         select_addr = queueEntry->select_addr;
5053
5054         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5055                 loop_cnt++) {
5056
5057                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5058                 if (ret)
5059                         return (0);
5060
5061                 read_addr = queueEntry->read_addr;
5062
5063                 for (k = 0; k < read_cnt; k++) {
5064
5065                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5066                         if (ret)
5067                                 return (0);
5068
5069                         *data_buff++ = read_value;
5070                         read_addr += read_stride;
5071                 }
5072
5073                 queue_id += queueEntry->queue_id_stride;
5074         }
5075
5076         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5077 }
5078
5079 /*
5080  * Handling control entries.
5081  */
5082
5083 static uint32_t 
5084 ql_cntrl(qla_host_t *ha,
5085         ql_minidump_template_hdr_t *template_hdr,
5086         ql_minidump_entry_cntrl_t *crbEntry)
5087 {
5088         int ret;
5089         int count;
5090         uint32_t opcode, read_value, addr, entry_addr;
5091         long timeout;
5092
5093         entry_addr = crbEntry->addr;
5094
5095         for (count = 0; count < crbEntry->op_count; count++) {
5096                 opcode = crbEntry->opcode;
5097
5098                 if (opcode & QL_DBG_OPCODE_WR) {
5099
5100                         ret = ql_rdwr_indreg32(ha, entry_addr,
5101                                         &crbEntry->value_1, 0);
5102                         if (ret)
5103                                 return (0);
5104
5105                         opcode &= ~QL_DBG_OPCODE_WR;
5106                 }
5107
5108                 if (opcode & QL_DBG_OPCODE_RW) {
5109
5110                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5111                         if (ret)
5112                                 return (0);
5113
5114                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5115                         if (ret)
5116                                 return (0);
5117
5118                         opcode &= ~QL_DBG_OPCODE_RW;
5119                 }
5120
5121                 if (opcode & QL_DBG_OPCODE_AND) {
5122
5123                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5124                         if (ret)
5125                                 return (0);
5126
5127                         read_value &= crbEntry->value_2;
5128                         opcode &= ~QL_DBG_OPCODE_AND;
5129
5130                         if (opcode & QL_DBG_OPCODE_OR) {
5131                                 read_value |= crbEntry->value_3;
5132                                 opcode &= ~QL_DBG_OPCODE_OR;
5133                         }
5134
5135                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5136                         if (ret)
5137                                 return (0);
5138                 }
5139
5140                 if (opcode & QL_DBG_OPCODE_OR) {
5141
5142                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5143                         if (ret)
5144                                 return (0);
5145
5146                         read_value |= crbEntry->value_3;
5147
5148                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5149                         if (ret)
5150                                 return (0);
5151
5152                         opcode &= ~QL_DBG_OPCODE_OR;
5153                 }
5154
5155                 if (opcode & QL_DBG_OPCODE_POLL) {
5156
5157                         opcode &= ~QL_DBG_OPCODE_POLL;
5158                         timeout = crbEntry->poll_timeout;
5159                         addr = entry_addr;
5160
5161                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5162                         if (ret)
5163                                 return (0);
5164
5165                         while ((read_value & crbEntry->value_2)
5166                                 != crbEntry->value_1) {
5167
5168                                 if (timeout) {
5169                                         qla_mdelay(__func__, 1);
5170                                         timeout--;
5171                                 } else
5172                                         break;
5173
5174                                 ret = ql_rdwr_indreg32(ha, addr,
5175                                                 &read_value, 1);
5176                                 if (ret)
5177                                         return (0);
5178                         }
5179
5180                         if (!timeout) {
5181                                 /*
5182                                  * Report timeout error.
5183                                  * core dump capture failed
5184                                  * Skip remaining entries.
5185                                  * Write buffer out to file
5186                                  * Use driver specific fields in template header
5187                                  * to report this error.
5188                                  */
5189                                 return (-1);
5190                         }
5191                 }
5192
5193                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5194                         /*
5195                          * decide which address to use.
5196                          */
5197                         if (crbEntry->state_index_a) {
5198                                 addr = template_hdr->saved_state_array[
5199                                                 crbEntry-> state_index_a];
5200                         } else {
5201                                 addr = entry_addr;
5202                         }
5203
5204                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5205                         if (ret)
5206                                 return (0);
5207
5208                         template_hdr->saved_state_array[crbEntry->state_index_v]
5209                                         = read_value;
5210                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5211                 }
5212
5213                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5214                         /*
5215                          * decide which value to use.
5216                          */
5217                         if (crbEntry->state_index_v) {
5218                                 read_value = template_hdr->saved_state_array[
5219                                                 crbEntry->state_index_v];
5220                         } else {
5221                                 read_value = crbEntry->value_1;
5222                         }
5223                         /*
5224                          * decide which address to use.
5225                          */
5226                         if (crbEntry->state_index_a) {
5227                                 addr = template_hdr->saved_state_array[
5228                                                 crbEntry-> state_index_a];
5229                         } else {
5230                                 addr = entry_addr;
5231                         }
5232
5233                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5234                         if (ret)
5235                                 return (0);
5236
5237                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5238                 }
5239
5240                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5241                         /*  Read value from saved state using index */
5242                         read_value = template_hdr->saved_state_array[
5243                                                 crbEntry->state_index_v];
5244
5245                         read_value <<= crbEntry->shl; /*Shift left operation */
5246                         read_value >>= crbEntry->shr; /*Shift right operation */
5247
5248                         if (crbEntry->value_2) {
5249                                 /* check if AND mask is provided */
5250                                 read_value &= crbEntry->value_2;
5251                         }
5252
5253                         read_value |= crbEntry->value_3; /* OR operation */
5254                         read_value += crbEntry->value_1; /* increment op */
5255
5256                         /* Write value back to state area. */
5257
5258                         template_hdr->saved_state_array[crbEntry->state_index_v]
5259                                         = read_value;
5260                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5261                 }
5262
5263                 entry_addr += crbEntry->addr_stride;
5264         }
5265
5266         return (0);
5267 }
5268
5269 /*
5270  * Handling rd poll entry.
5271  */
5272
5273 static uint32_t 
5274 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5275         uint32_t *data_buff)
5276 {
5277         int ret;
5278         int loop_cnt;
5279         uint32_t op_count, select_addr, select_value_stride, select_value;
5280         uint32_t read_addr, poll, mask, data_size, data;
5281         uint32_t wait_count = 0;
5282
5283         select_addr            = entry->select_addr;
5284         read_addr              = entry->read_addr;
5285         select_value           = entry->select_value;
5286         select_value_stride    = entry->select_value_stride;
5287         op_count               = entry->op_count;
5288         poll                   = entry->poll;
5289         mask                   = entry->mask;
5290         data_size              = entry->data_size;
5291
5292         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5293
5294                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5295                 if (ret)
5296                         return (0);
5297
5298                 wait_count = 0;
5299
5300                 while (wait_count < poll) {
5301
5302                         uint32_t temp;
5303
5304                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5305                         if (ret)
5306                                 return (0);
5307
5308                         if ( (temp & mask) != 0 ) {
5309                                 break;
5310                         }
5311                         wait_count++;
5312                 }
5313
5314                 if (wait_count == poll) {
5315                         device_printf(ha->pci_dev,
5316                                 "%s: Error in processing entry\n", __func__);
5317                         device_printf(ha->pci_dev,
5318                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5319                                 __func__, wait_count, poll);
5320                         return 0;
5321                 }
5322
5323                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5324                 if (ret)
5325                         return (0);
5326
5327                 *data_buff++ = select_value;
5328                 *data_buff++ = data;
5329                 select_value = select_value + select_value_stride;
5330         }
5331
5332         /*
5333          * for testing purpose we return amount of data written
5334          */
5335         return (loop_cnt * (2 * sizeof(uint32_t)));
5336 }
5337
5338
5339 /*
5340  * Handling rd modify write poll entry.
5341  */
5342
5343 static uint32_t 
5344 ql_pollrd_modify_write(qla_host_t *ha,
5345         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5346         uint32_t *data_buff)
5347 {
5348         int ret;
5349         uint32_t addr_1, addr_2, value_1, value_2, data;
5350         uint32_t poll, mask, data_size, modify_mask;
5351         uint32_t wait_count = 0;
5352
5353         addr_1          = entry->addr_1;
5354         addr_2          = entry->addr_2;
5355         value_1         = entry->value_1;
5356         value_2         = entry->value_2;
5357
5358         poll            = entry->poll;
5359         mask            = entry->mask;
5360         modify_mask     = entry->modify_mask;
5361         data_size       = entry->data_size;
5362
5363
5364         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5365         if (ret)
5366                 return (0);
5367
5368         wait_count = 0;
5369         while (wait_count < poll) {
5370
5371                 uint32_t temp;
5372
5373                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5374                 if (ret)
5375                         return (0);
5376
5377                 if ( (temp & mask) != 0 ) {
5378                         break;
5379                 }
5380                 wait_count++;
5381         }
5382
5383         if (wait_count == poll) {
5384                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5385                         __func__);
5386         } else {
5387
5388                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5389                 if (ret)
5390                         return (0);
5391
5392                 data = (data & modify_mask);
5393
5394                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5395                 if (ret)
5396                         return (0);
5397
5398                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5399                 if (ret)
5400                         return (0);
5401
5402                 /* Poll again */
5403                 wait_count = 0;
5404                 while (wait_count < poll) {
5405
5406                         uint32_t temp;
5407
5408                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5409                         if (ret)
5410                                 return (0);
5411
5412                         if ( (temp & mask) != 0 ) {
5413                                 break;
5414                         }
5415                         wait_count++;
5416                 }
5417                 *data_buff++ = addr_2;
5418                 *data_buff++ = data;
5419         }
5420
5421         /*
5422          * for testing purpose we return amount of data written
5423          */
5424         return (2 * sizeof(uint32_t));
5425 }
5426
5427