]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r324535
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
68
69 static int qla_init_nic_func(qla_host_t *ha);
70 static int qla_stop_nic_func(qla_host_t *ha);
71 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
72 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75 static int qla_get_cam_search_mode(qla_host_t *ha);
76
77 static void ql_minidump_free(qla_host_t *ha);
78
79 #ifdef QL_DBG
80
81 static void
82 qla_stop_pegs(qla_host_t *ha)
83 {
84         uint32_t val = 1;
85
86         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
91         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
92 }
93
94 static int
95 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
96 {
97         int err, ret = 0;
98         qla_host_t *ha;
99         
100         err = sysctl_handle_int(oidp, &ret, 0, req);
101
102
103         if (err || !req->newptr)
104                 return (err);
105
106         if (ret == 1) {
107                 ha = (qla_host_t *)arg1;
108                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
109                         qla_stop_pegs(ha);      
110                         QLA_UNLOCK(ha, __func__);
111                 }
112         }
113
114         return err;
115 }
116 #endif /* #ifdef QL_DBG */
117
118 static int
119 qla_validate_set_port_cfg_bit(uint32_t bits)
120 {
121         if ((bits & 0xF) > 1)
122                 return (-1);
123
124         if (((bits >> 4) & 0xF) > 2)
125                 return (-1);
126
127         if (((bits >> 8) & 0xF) > 2)
128                 return (-1);
129
130         return (0);
131 }
132
133 static int
134 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
135 {
136         int err, ret = 0;
137         qla_host_t *ha;
138         uint32_t cfg_bits;
139
140         err = sysctl_handle_int(oidp, &ret, 0, req);
141
142         if (err || !req->newptr)
143                 return (err);
144
145         ha = (qla_host_t *)arg1;
146
147         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
148
149                 err = qla_get_port_config(ha, &cfg_bits);
150
151                 if (err)
152                         goto qla_sysctl_set_port_cfg_exit;
153
154                 if (ret & 0x1) {
155                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
156                 } else {
157                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 }
159
160                 ret = ret >> 4;
161                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
162
163                 if ((ret & 0xF) == 0) {
164                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
165                 } else if ((ret & 0xF) == 1){
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
167                 } else {
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
169                 }
170
171                 ret = ret >> 4;
172                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
173
174                 if (ret == 0) {
175                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176                 } else if (ret == 1){
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
178                 } else {
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
180                 }
181
182                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
183                         err = qla_set_port_config(ha, cfg_bits);
184                         QLA_UNLOCK(ha, __func__);
185                 } else {
186                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
187                 }
188         } else {
189                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190                         err = qla_get_port_config(ha, &cfg_bits);
191                         QLA_UNLOCK(ha, __func__);
192                 } else {
193                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
194                 }
195         }
196
197 qla_sysctl_set_port_cfg_exit:
198         return err;
199 }
200
201 static int
202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
203 {
204         int err, ret = 0;
205         qla_host_t *ha;
206
207         err = sysctl_handle_int(oidp, &ret, 0, req);
208
209         if (err || !req->newptr)
210                 return (err);
211
212         ha = (qla_host_t *)arg1;
213
214         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
216
217                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
218                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
219                         QLA_UNLOCK(ha, __func__);
220                 } else {
221                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
222                 }
223
224         } else {
225                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
226         }
227
228         return (err);
229 }
230
231 static int
232 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
233 {
234         int err, ret = 0;
235         qla_host_t *ha;
236
237         err = sysctl_handle_int(oidp, &ret, 0, req);
238
239         if (err || !req->newptr)
240                 return (err);
241
242         ha = (qla_host_t *)arg1;
243         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
244                 err = qla_get_cam_search_mode(ha);
245                 QLA_UNLOCK(ha, __func__);
246         } else {
247                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
248         }
249
250         return (err);
251 }
252
253 static void
254 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
255 {
256         struct sysctl_ctx_list  *ctx;
257         struct sysctl_oid_list  *children;
258         struct sysctl_oid       *ctx_oid;
259
260         ctx = device_get_sysctl_ctx(ha->pci_dev);
261         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
262
263         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
264                         CTLFLAG_RD, NULL, "stats_hw_mac");
265         children = SYSCTL_CHILDREN(ctx_oid);
266
267         SYSCTL_ADD_QUAD(ctx, children,
268                 OID_AUTO, "xmt_frames",
269                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
270                 "xmt_frames");
271
272         SYSCTL_ADD_QUAD(ctx, children,
273                 OID_AUTO, "xmt_bytes",
274                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
275                 "xmt_frames");
276
277         SYSCTL_ADD_QUAD(ctx, children,
278                 OID_AUTO, "xmt_mcast_pkts",
279                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
280                 "xmt_mcast_pkts");
281
282         SYSCTL_ADD_QUAD(ctx, children,
283                 OID_AUTO, "xmt_bcast_pkts",
284                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
285                 "xmt_bcast_pkts");
286
287         SYSCTL_ADD_QUAD(ctx, children,
288                 OID_AUTO, "xmt_pause_frames",
289                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
290                 "xmt_pause_frames");
291
292         SYSCTL_ADD_QUAD(ctx, children,
293                 OID_AUTO, "xmt_cntrl_pkts",
294                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
295                 "xmt_cntrl_pkts");
296
297         SYSCTL_ADD_QUAD(ctx, children,
298                 OID_AUTO, "xmt_pkt_lt_64bytes",
299                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
300                 "xmt_pkt_lt_64bytes");
301
302         SYSCTL_ADD_QUAD(ctx, children,
303                 OID_AUTO, "xmt_pkt_lt_127bytes",
304                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
305                 "xmt_pkt_lt_127bytes");
306
307         SYSCTL_ADD_QUAD(ctx, children,
308                 OID_AUTO, "xmt_pkt_lt_255bytes",
309                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
310                 "xmt_pkt_lt_255bytes");
311
312         SYSCTL_ADD_QUAD(ctx, children,
313                 OID_AUTO, "xmt_pkt_lt_511bytes",
314                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
315                 "xmt_pkt_lt_511bytes");
316
317         SYSCTL_ADD_QUAD(ctx, children,
318                 OID_AUTO, "xmt_pkt_lt_1023bytes",
319                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
320                 "xmt_pkt_lt_1023bytes");
321
322         SYSCTL_ADD_QUAD(ctx, children,
323                 OID_AUTO, "xmt_pkt_lt_1518bytes",
324                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
325                 "xmt_pkt_lt_1518bytes");
326
327         SYSCTL_ADD_QUAD(ctx, children,
328                 OID_AUTO, "xmt_pkt_gt_1518bytes",
329                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
330                 "xmt_pkt_gt_1518bytes");
331
332         SYSCTL_ADD_QUAD(ctx, children,
333                 OID_AUTO, "rcv_frames",
334                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
335                 "rcv_frames");
336
337         SYSCTL_ADD_QUAD(ctx, children,
338                 OID_AUTO, "rcv_bytes",
339                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
340                 "rcv_bytes");
341
342         SYSCTL_ADD_QUAD(ctx, children,
343                 OID_AUTO, "rcv_mcast_pkts",
344                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
345                 "rcv_mcast_pkts");
346
347         SYSCTL_ADD_QUAD(ctx, children,
348                 OID_AUTO, "rcv_bcast_pkts",
349                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
350                 "rcv_bcast_pkts");
351
352         SYSCTL_ADD_QUAD(ctx, children,
353                 OID_AUTO, "rcv_pause_frames",
354                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
355                 "rcv_pause_frames");
356
357         SYSCTL_ADD_QUAD(ctx, children,
358                 OID_AUTO, "rcv_cntrl_pkts",
359                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
360                 "rcv_cntrl_pkts");
361
362         SYSCTL_ADD_QUAD(ctx, children,
363                 OID_AUTO, "rcv_pkt_lt_64bytes",
364                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
365                 "rcv_pkt_lt_64bytes");
366
367         SYSCTL_ADD_QUAD(ctx, children,
368                 OID_AUTO, "rcv_pkt_lt_127bytes",
369                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
370                 "rcv_pkt_lt_127bytes");
371
372         SYSCTL_ADD_QUAD(ctx, children,
373                 OID_AUTO, "rcv_pkt_lt_255bytes",
374                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
375                 "rcv_pkt_lt_255bytes");
376
377         SYSCTL_ADD_QUAD(ctx, children,
378                 OID_AUTO, "rcv_pkt_lt_511bytes",
379                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
380                 "rcv_pkt_lt_511bytes");
381
382         SYSCTL_ADD_QUAD(ctx, children,
383                 OID_AUTO, "rcv_pkt_lt_1023bytes",
384                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
385                 "rcv_pkt_lt_1023bytes");
386
387         SYSCTL_ADD_QUAD(ctx, children,
388                 OID_AUTO, "rcv_pkt_lt_1518bytes",
389                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
390                 "rcv_pkt_lt_1518bytes");
391
392         SYSCTL_ADD_QUAD(ctx, children,
393                 OID_AUTO, "rcv_pkt_gt_1518bytes",
394                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
395                 "rcv_pkt_gt_1518bytes");
396
397         SYSCTL_ADD_QUAD(ctx, children,
398                 OID_AUTO, "rcv_len_error",
399                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
400                 "rcv_len_error");
401
402         SYSCTL_ADD_QUAD(ctx, children,
403                 OID_AUTO, "rcv_len_small",
404                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
405                 "rcv_len_small");
406
407         SYSCTL_ADD_QUAD(ctx, children,
408                 OID_AUTO, "rcv_len_large",
409                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
410                 "rcv_len_large");
411
412         SYSCTL_ADD_QUAD(ctx, children,
413                 OID_AUTO, "rcv_jabber",
414                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
415                 "rcv_jabber");
416
417         SYSCTL_ADD_QUAD(ctx, children,
418                 OID_AUTO, "rcv_dropped",
419                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
420                 "rcv_dropped");
421
422         SYSCTL_ADD_QUAD(ctx, children,
423                 OID_AUTO, "fcs_error",
424                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
425                 "fcs_error");
426
427         SYSCTL_ADD_QUAD(ctx, children,
428                 OID_AUTO, "align_error",
429                 CTLFLAG_RD, &ha->hw.mac.align_error,
430                 "align_error");
431
432         SYSCTL_ADD_QUAD(ctx, children,
433                 OID_AUTO, "eswitched_frames",
434                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
435                 "eswitched_frames");
436
437         SYSCTL_ADD_QUAD(ctx, children,
438                 OID_AUTO, "eswitched_bytes",
439                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
440                 "eswitched_bytes");
441
442         SYSCTL_ADD_QUAD(ctx, children,
443                 OID_AUTO, "eswitched_mcast_frames",
444                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
445                 "eswitched_mcast_frames");
446
447         SYSCTL_ADD_QUAD(ctx, children,
448                 OID_AUTO, "eswitched_bcast_frames",
449                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
450                 "eswitched_bcast_frames");
451
452         SYSCTL_ADD_QUAD(ctx, children,
453                 OID_AUTO, "eswitched_ucast_frames",
454                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
455                 "eswitched_ucast_frames");
456
457         SYSCTL_ADD_QUAD(ctx, children,
458                 OID_AUTO, "eswitched_err_free_frames",
459                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
460                 "eswitched_err_free_frames");
461
462         SYSCTL_ADD_QUAD(ctx, children,
463                 OID_AUTO, "eswitched_err_free_bytes",
464                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
465                 "eswitched_err_free_bytes");
466
467         return;
468 }
469
470 static void
471 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
472 {
473         struct sysctl_ctx_list  *ctx;
474         struct sysctl_oid_list  *children;
475         struct sysctl_oid       *ctx_oid;
476
477         ctx = device_get_sysctl_ctx(ha->pci_dev);
478         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
479
480         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
481                         CTLFLAG_RD, NULL, "stats_hw_rcv");
482         children = SYSCTL_CHILDREN(ctx_oid);
483
484         SYSCTL_ADD_QUAD(ctx, children,
485                 OID_AUTO, "total_bytes",
486                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
487                 "total_bytes");
488
489         SYSCTL_ADD_QUAD(ctx, children,
490                 OID_AUTO, "total_pkts",
491                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
492                 "total_pkts");
493
494         SYSCTL_ADD_QUAD(ctx, children,
495                 OID_AUTO, "lro_pkt_count",
496                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
497                 "lro_pkt_count");
498
499         SYSCTL_ADD_QUAD(ctx, children,
500                 OID_AUTO, "sw_pkt_count",
501                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
502                 "sw_pkt_count");
503
504         SYSCTL_ADD_QUAD(ctx, children,
505                 OID_AUTO, "ip_chksum_err",
506                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
507                 "ip_chksum_err");
508
509         SYSCTL_ADD_QUAD(ctx, children,
510                 OID_AUTO, "pkts_wo_acntxts",
511                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
512                 "pkts_wo_acntxts");
513
514         SYSCTL_ADD_QUAD(ctx, children,
515                 OID_AUTO, "pkts_dropped_no_sds_card",
516                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
517                 "pkts_dropped_no_sds_card");
518
519         SYSCTL_ADD_QUAD(ctx, children,
520                 OID_AUTO, "pkts_dropped_no_sds_host",
521                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
522                 "pkts_dropped_no_sds_host");
523
524         SYSCTL_ADD_QUAD(ctx, children,
525                 OID_AUTO, "oversized_pkts",
526                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
527                 "oversized_pkts");
528
529         SYSCTL_ADD_QUAD(ctx, children,
530                 OID_AUTO, "pkts_dropped_no_rds",
531                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
532                 "pkts_dropped_no_rds");
533
534         SYSCTL_ADD_QUAD(ctx, children,
535                 OID_AUTO, "unxpctd_mcast_pkts",
536                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
537                 "unxpctd_mcast_pkts");
538
539         SYSCTL_ADD_QUAD(ctx, children,
540                 OID_AUTO, "re1_fbq_error",
541                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
542                 "re1_fbq_error");
543
544         SYSCTL_ADD_QUAD(ctx, children,
545                 OID_AUTO, "invalid_mac_addr",
546                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
547                 "invalid_mac_addr");
548
549         SYSCTL_ADD_QUAD(ctx, children,
550                 OID_AUTO, "rds_prime_trys",
551                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
552                 "rds_prime_trys");
553
554         SYSCTL_ADD_QUAD(ctx, children,
555                 OID_AUTO, "rds_prime_success",
556                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
557                 "rds_prime_success");
558
559         SYSCTL_ADD_QUAD(ctx, children,
560                 OID_AUTO, "lro_flows_added",
561                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
562                 "lro_flows_added");
563
564         SYSCTL_ADD_QUAD(ctx, children,
565                 OID_AUTO, "lro_flows_deleted",
566                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
567                 "lro_flows_deleted");
568
569         SYSCTL_ADD_QUAD(ctx, children,
570                 OID_AUTO, "lro_flows_active",
571                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
572                 "lro_flows_active");
573
574         SYSCTL_ADD_QUAD(ctx, children,
575                 OID_AUTO, "pkts_droped_unknown",
576                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
577                 "pkts_droped_unknown");
578
579         SYSCTL_ADD_QUAD(ctx, children,
580                 OID_AUTO, "pkts_cnt_oversized",
581                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
582                 "pkts_cnt_oversized");
583
584         return;
585 }
586
587 static void
588 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
589 {
590         struct sysctl_ctx_list  *ctx;
591         struct sysctl_oid_list  *children;
592         struct sysctl_oid_list  *node_children;
593         struct sysctl_oid       *ctx_oid;
594         int                     i;
595         uint8_t                 name_str[16];
596
597         ctx = device_get_sysctl_ctx(ha->pci_dev);
598         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
599
600         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
601                         CTLFLAG_RD, NULL, "stats_hw_xmt");
602         children = SYSCTL_CHILDREN(ctx_oid);
603
604         for (i = 0; i < ha->hw.num_tx_rings; i++) {
605
606                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
607                 snprintf(name_str, sizeof(name_str), "%d", i);
608
609                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
610                         CTLFLAG_RD, NULL, name_str);
611                 node_children = SYSCTL_CHILDREN(ctx_oid);
612
613                 /* Tx Related */
614
615                 SYSCTL_ADD_QUAD(ctx, node_children,
616                         OID_AUTO, "total_bytes",
617                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
618                         "total_bytes");
619
620                 SYSCTL_ADD_QUAD(ctx, node_children,
621                         OID_AUTO, "total_pkts",
622                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
623                         "total_pkts");
624
625                 SYSCTL_ADD_QUAD(ctx, node_children,
626                         OID_AUTO, "errors",
627                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
628                         "errors");
629
630                 SYSCTL_ADD_QUAD(ctx, node_children,
631                         OID_AUTO, "pkts_dropped",
632                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
633                         "pkts_dropped");
634
635                 SYSCTL_ADD_QUAD(ctx, node_children,
636                         OID_AUTO, "switch_pkts",
637                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
638                         "switch_pkts");
639
640                 SYSCTL_ADD_QUAD(ctx, node_children,
641                         OID_AUTO, "num_buffers",
642                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
643                         "num_buffers");
644         }
645
646         return;
647 }
648
649 static void
650 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
651 {
652         qlnx_add_hw_mac_stats_sysctls(ha);
653         qlnx_add_hw_rcv_stats_sysctls(ha);
654         qlnx_add_hw_xmt_stats_sysctls(ha);
655
656         return;
657 }
658
659 static void
660 qlnx_add_drvr_sds_stats(qla_host_t *ha)
661 {
662         struct sysctl_ctx_list  *ctx;
663         struct sysctl_oid_list  *children;
664         struct sysctl_oid_list  *node_children;
665         struct sysctl_oid       *ctx_oid;
666         int                     i;
667         uint8_t                 name_str[16];
668
669         ctx = device_get_sysctl_ctx(ha->pci_dev);
670         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
671
672         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
673                         CTLFLAG_RD, NULL, "stats_drvr_sds");
674         children = SYSCTL_CHILDREN(ctx_oid);
675
676         for (i = 0; i < ha->hw.num_sds_rings; i++) {
677
678                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
679                 snprintf(name_str, sizeof(name_str), "%d", i);
680
681                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
682                         CTLFLAG_RD, NULL, name_str);
683                 node_children = SYSCTL_CHILDREN(ctx_oid);
684
685                 SYSCTL_ADD_QUAD(ctx, node_children,
686                         OID_AUTO, "intr_count",
687                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
688                         "intr_count");
689
690                 SYSCTL_ADD_UINT(ctx, node_children,
691                         OID_AUTO, "rx_free",
692                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
693                         ha->hw.sds[i].rx_free, "rx_free");
694         }
695
696         return;
697 }
698 static void
699 qlnx_add_drvr_rds_stats(qla_host_t *ha)
700 {
701         struct sysctl_ctx_list  *ctx;
702         struct sysctl_oid_list  *children;
703         struct sysctl_oid_list  *node_children;
704         struct sysctl_oid       *ctx_oid;
705         int                     i;
706         uint8_t                 name_str[16];
707
708         ctx = device_get_sysctl_ctx(ha->pci_dev);
709         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
710
711         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
712                         CTLFLAG_RD, NULL, "stats_drvr_rds");
713         children = SYSCTL_CHILDREN(ctx_oid);
714
715         for (i = 0; i < ha->hw.num_rds_rings; i++) {
716
717                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
718                 snprintf(name_str, sizeof(name_str), "%d", i);
719
720                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
721                         CTLFLAG_RD, NULL, name_str);
722                 node_children = SYSCTL_CHILDREN(ctx_oid);
723
724                 SYSCTL_ADD_QUAD(ctx, node_children,
725                         OID_AUTO, "count",
726                         CTLFLAG_RD, &ha->hw.rds[i].count,
727                         "count");
728
729                 SYSCTL_ADD_QUAD(ctx, node_children,
730                         OID_AUTO, "lro_pkt_count",
731                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
732                         "lro_pkt_count");
733
734                 SYSCTL_ADD_QUAD(ctx, node_children,
735                         OID_AUTO, "lro_bytes",
736                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
737                         "lro_bytes");
738         }
739
740         return;
741 }
742
743 static void
744 qlnx_add_drvr_tx_stats(qla_host_t *ha)
745 {
746         struct sysctl_ctx_list  *ctx;
747         struct sysctl_oid_list  *children;
748         struct sysctl_oid_list  *node_children;
749         struct sysctl_oid       *ctx_oid;
750         int                     i;
751         uint8_t                 name_str[16];
752
753         ctx = device_get_sysctl_ctx(ha->pci_dev);
754         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
755
756         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
757                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
758         children = SYSCTL_CHILDREN(ctx_oid);
759
760         for (i = 0; i < ha->hw.num_tx_rings; i++) {
761
762                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
763                 snprintf(name_str, sizeof(name_str), "%d", i);
764
765                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
766                         CTLFLAG_RD, NULL, name_str);
767                 node_children = SYSCTL_CHILDREN(ctx_oid);
768
769                 SYSCTL_ADD_QUAD(ctx, node_children,
770                         OID_AUTO, "count",
771                         CTLFLAG_RD, &ha->tx_ring[i].count,
772                         "count");
773
774 #ifdef QL_ENABLE_ISCSI_TLV
775                 SYSCTL_ADD_QUAD(ctx, node_children,
776                         OID_AUTO, "iscsi_pkt_count",
777                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
778                         "iscsi_pkt_count");
779 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
780         }
781
782         return;
783 }
784
785 static void
786 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
787 {
788         qlnx_add_drvr_sds_stats(ha);
789         qlnx_add_drvr_rds_stats(ha);
790         qlnx_add_drvr_tx_stats(ha);
791         return;
792 }
793
794 /*
795  * Name: ql_hw_add_sysctls
796  * Function: Add P3Plus specific sysctls
797  */
798 void
799 ql_hw_add_sysctls(qla_host_t *ha)
800 {
801         device_t        dev;
802
803         dev = ha->pci_dev;
804
805         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
806                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
807                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
808                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
809
810         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
811                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
812                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
813                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
814
815         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
816                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
817                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
818                 ha->hw.num_tx_rings, "Number of Transmit Rings");
819
820         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
821                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
823                 ha->txr_idx, "Tx Ring Used");
824
825         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
826                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
828                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
829
830         ha->hw.sds_cidx_thres = 32;
831         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
832                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
833                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
834                 ha->hw.sds_cidx_thres,
835                 "Number of SDS entries to process before updating"
836                 " SDS Ring Consumer Index");
837
838         ha->hw.rds_pidx_thres = 32;
839         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
840                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
841                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
842                 ha->hw.rds_pidx_thres,
843                 "Number of Rcv Rings Entries to post before updating"
844                 " RDS Ring Producer Index");
845
846         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
847         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
848                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
849                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
850                 &ha->hw.rcv_intr_coalesce,
851                 ha->hw.rcv_intr_coalesce,
852                 "Rcv Intr Coalescing Parameters\n"
853                 "\tbits 15:0 max packets\n"
854                 "\tbits 31:16 max micro-seconds to wait\n"
855                 "\tplease run\n"
856                 "\tifconfig <if> down && ifconfig <if> up\n"
857                 "\tto take effect \n");
858
859         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
860         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
861                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
862                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
863                 &ha->hw.xmt_intr_coalesce,
864                 ha->hw.xmt_intr_coalesce,
865                 "Xmt Intr Coalescing Parameters\n"
866                 "\tbits 15:0 max packets\n"
867                 "\tbits 31:16 max micro-seconds to wait\n"
868                 "\tplease run\n"
869                 "\tifconfig <if> down && ifconfig <if> up\n"
870                 "\tto take effect \n");
871
872         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
873                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
874                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
875                 (void *)ha, 0,
876                 qla_sysctl_port_cfg, "I",
877                         "Set Port Configuration if values below "
878                         "otherwise Get Port Configuration\n"
879                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
880                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
881                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
882                         " 1 = xmt only; 2 = rcv only;\n"
883                 );
884
885         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
886                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
887                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
888                 (void *)ha, 0,
889                 qla_sysctl_set_cam_search_mode, "I",
890                         "Set CAM Search Mode"
891                         "\t 1 = search mode internal\n"
892                         "\t 2 = search mode auto\n");
893
894         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
895                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
896                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
897                 (void *)ha, 0,
898                 qla_sysctl_get_cam_search_mode, "I",
899                         "Get CAM Search Mode"
900                         "\t 1 = search mode internal\n"
901                         "\t 2 = search mode auto\n");
902
903         ha->hw.enable_9kb = 1;
904
905         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
906                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
907                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
908                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
909
910         ha->hw.enable_hw_lro = 1;
911
912         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
915                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
916                 "\t 1 : Hardware LRO if LRO is enabled\n"
917                 "\t 0 : Software LRO if LRO is enabled\n"
918                 "\t Any change requires ifconfig down/up to take effect\n"
919                 "\t Note that LRO may be turned off/on via ifconfig\n");
920
921         ha->hw.mdump_active = 0;
922         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
925                 ha->hw.mdump_active,
926                 "Minidump retrieval is Active");
927
928         ha->hw.mdump_done = 0;
929         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                 OID_AUTO, "mdump_done", CTLFLAG_RW,
932                 &ha->hw.mdump_done, ha->hw.mdump_done,
933                 "Minidump has been done and available for retrieval");
934
935         ha->hw.mdump_capture_mask = 0xF;
936         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
937                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
938                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
939                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
940                 "Minidump capture mask");
941 #ifdef QL_DBG
942
943         ha->err_inject = 0;
944         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
945                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
946                 OID_AUTO, "err_inject",
947                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
948                 "Error to be injected\n"
949                 "\t\t\t 0: No Errors\n"
950                 "\t\t\t 1: rcv: rxb struct invalid\n"
951                 "\t\t\t 2: rcv: mp == NULL\n"
952                 "\t\t\t 3: lro: rxb struct invalid\n"
953                 "\t\t\t 4: lro: mp == NULL\n"
954                 "\t\t\t 5: rcv: num handles invalid\n"
955                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
956                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
957                 "\t\t\t 8: mbx: mailbox command failure\n"
958                 "\t\t\t 9: heartbeat failure\n"
959                 "\t\t\t A: temperature failure\n"
960                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
961
962         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
963                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
964                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
965                 (void *)ha, 0,
966                 qla_sysctl_stop_pegs, "I", "Peg Stop");
967
968 #endif /* #ifdef QL_DBG */
969
970         ha->hw.user_pri_nic = 0;
971         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
972                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
973                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
974                 ha->hw.user_pri_nic,
975                 "VLAN Tag User Priority for Normal Ethernet Packets");
976
977         ha->hw.user_pri_iscsi = 4;
978         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
979                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
980                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
981                 ha->hw.user_pri_iscsi,
982                 "VLAN Tag User Priority for iSCSI Packets");
983
984         qlnx_add_hw_stats_sysctls(ha);
985         qlnx_add_drvr_stats_sysctls(ha);
986
987         return;
988 }
989
990 void
991 ql_hw_link_status(qla_host_t *ha)
992 {
993         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
994
995         if (ha->hw.link_up) {
996                 device_printf(ha->pci_dev, "link Up\n");
997         } else {
998                 device_printf(ha->pci_dev, "link Down\n");
999         }
1000
1001         if (ha->hw.flags.fduplex) {
1002                 device_printf(ha->pci_dev, "Full Duplex\n");
1003         } else {
1004                 device_printf(ha->pci_dev, "Half Duplex\n");
1005         }
1006
1007         if (ha->hw.flags.autoneg) {
1008                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1009         } else {
1010                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1011         }
1012
1013         switch (ha->hw.link_speed) {
1014         case 0x710:
1015                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1016                 break;
1017
1018         case 0x3E8:
1019                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1020                 break;
1021
1022         case 0x64:
1023                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1024                 break;
1025
1026         default:
1027                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1028                 break;
1029         }
1030
1031         switch (ha->hw.module_type) {
1032
1033         case 0x01:
1034                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1035                 break;
1036
1037         case 0x02:
1038                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1039                 break;
1040
1041         case 0x03:
1042                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1043                 break;
1044
1045         case 0x04:
1046                 device_printf(ha->pci_dev,
1047                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1048                         ha->hw.cable_length);
1049                 break;
1050
1051         case 0x05:
1052                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1053                         " Limiting Copper(Compliant)[%d m]\n",
1054                         ha->hw.cable_length);
1055                 break;
1056
1057         case 0x06:
1058                 device_printf(ha->pci_dev,
1059                         "Module Type 10GE Passive Copper"
1060                         " (Legacy, Best Effort)[%d m]\n",
1061                         ha->hw.cable_length);
1062                 break;
1063
1064         case 0x07:
1065                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1066                 break;
1067
1068         case 0x08:
1069                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1070                 break;
1071
1072         case 0x09:
1073                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1074                 break;
1075
1076         case 0x0A:
1077                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1078                 break;
1079
1080         case 0x0B:
1081                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1082                         "(Legacy, Best Effort)\n");
1083                 break;
1084
1085         default:
1086                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1087                         ha->hw.module_type);
1088                 break;
1089         }
1090
1091         if (ha->hw.link_faults == 1)
1092                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1093 }
1094
1095 /*
1096  * Name: ql_free_dma
1097  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1098  */
1099 void
1100 ql_free_dma(qla_host_t *ha)
1101 {
1102         uint32_t i;
1103
1104         if (ha->hw.dma_buf.flags.sds_ring) {
1105                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1106                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1107                 }
1108                 ha->hw.dma_buf.flags.sds_ring = 0;
1109         }
1110
1111         if (ha->hw.dma_buf.flags.rds_ring) {
1112                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1113                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1114                 }
1115                 ha->hw.dma_buf.flags.rds_ring = 0;
1116         }
1117
1118         if (ha->hw.dma_buf.flags.tx_ring) {
1119                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1120                 ha->hw.dma_buf.flags.tx_ring = 0;
1121         }
1122         ql_minidump_free(ha);
1123 }
1124
1125 /*
1126  * Name: ql_alloc_dma
1127  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1128  */
1129 int
1130 ql_alloc_dma(qla_host_t *ha)
1131 {
1132         device_t                dev;
1133         uint32_t                i, j, size, tx_ring_size;
1134         qla_hw_t                *hw;
1135         qla_hw_tx_cntxt_t       *tx_cntxt;
1136         uint8_t                 *vaddr;
1137         bus_addr_t              paddr;
1138
1139         dev = ha->pci_dev;
1140
1141         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1142
1143         hw = &ha->hw;
1144         /*
1145          * Allocate Transmit Ring
1146          */
1147         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1148         size = (tx_ring_size * ha->hw.num_tx_rings);
1149
1150         hw->dma_buf.tx_ring.alignment = 8;
1151         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1152         
1153         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1154                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1155                 goto ql_alloc_dma_exit;
1156         }
1157
1158         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1159         paddr = hw->dma_buf.tx_ring.dma_addr;
1160         
1161         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1162                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1163
1164                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1165                 tx_cntxt->tx_ring_paddr = paddr;
1166
1167                 vaddr += tx_ring_size;
1168                 paddr += tx_ring_size;
1169         }
1170
1171         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1172                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1173
1174                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1175                 tx_cntxt->tx_cons_paddr = paddr;
1176
1177                 vaddr += sizeof (uint32_t);
1178                 paddr += sizeof (uint32_t);
1179         }
1180
1181         ha->hw.dma_buf.flags.tx_ring = 1;
1182
1183         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1184                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1185                 hw->dma_buf.tx_ring.dma_b));
1186         /*
1187          * Allocate Receive Descriptor Rings
1188          */
1189
1190         for (i = 0; i < hw->num_rds_rings; i++) {
1191
1192                 hw->dma_buf.rds_ring[i].alignment = 8;
1193                 hw->dma_buf.rds_ring[i].size =
1194                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1195
1196                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1197                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1198                                 __func__, i);
1199
1200                         for (j = 0; j < i; j++)
1201                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1202
1203                         goto ql_alloc_dma_exit;
1204                 }
1205                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1206                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1207                         hw->dma_buf.rds_ring[i].dma_b));
1208         }
1209
1210         hw->dma_buf.flags.rds_ring = 1;
1211
1212         /*
1213          * Allocate Status Descriptor Rings
1214          */
1215
1216         for (i = 0; i < hw->num_sds_rings; i++) {
1217                 hw->dma_buf.sds_ring[i].alignment = 8;
1218                 hw->dma_buf.sds_ring[i].size =
1219                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1220
1221                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1222                         device_printf(dev, "%s: sds ring alloc failed\n",
1223                                 __func__);
1224
1225                         for (j = 0; j < i; j++)
1226                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1227
1228                         goto ql_alloc_dma_exit;
1229                 }
1230                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1231                         __func__, i,
1232                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1233                         hw->dma_buf.sds_ring[i].dma_b));
1234         }
1235         for (i = 0; i < hw->num_sds_rings; i++) {
1236                 hw->sds[i].sds_ring_base =
1237                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1238         }
1239
1240         hw->dma_buf.flags.sds_ring = 1;
1241
1242         return 0;
1243
1244 ql_alloc_dma_exit:
1245         ql_free_dma(ha);
1246         return -1;
1247 }
1248
1249 #define Q8_MBX_MSEC_DELAY       5000
1250
1251 static int
1252 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1253         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1254 {
1255         uint32_t i;
1256         uint32_t data;
1257         int ret = 0;
1258
1259         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1260                 ret = -3;
1261                 ha->qla_initiate_recovery = 1;
1262                 goto exit_qla_mbx_cmd;
1263         }
1264
1265         if (no_pause)
1266                 i = 1000;
1267         else
1268                 i = Q8_MBX_MSEC_DELAY;
1269
1270         while (i) {
1271                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1272                 if (data == 0)
1273                         break;
1274                 if (no_pause) {
1275                         DELAY(1000);
1276                 } else {
1277                         qla_mdelay(__func__, 1);
1278                 }
1279                 i--;
1280         }
1281
1282         if (i == 0) {
1283                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1284                         __func__, data);
1285                 ret = -1;
1286                 ha->qla_initiate_recovery = 1;
1287                 goto exit_qla_mbx_cmd;
1288         }
1289
1290         for (i = 0; i < n_hmbox; i++) {
1291                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1292                 h_mbox++;
1293         }
1294
1295         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1296
1297
1298         i = Q8_MBX_MSEC_DELAY;
1299         while (i) {
1300                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1301
1302                 if ((data & 0x3) == 1) {
1303                         data = READ_REG32(ha, Q8_FW_MBOX0);
1304                         if ((data & 0xF000) != 0x8000)
1305                                 break;
1306                 }
1307                 if (no_pause) {
1308                         DELAY(1000);
1309                 } else {
1310                         qla_mdelay(__func__, 1);
1311                 }
1312                 i--;
1313         }
1314         if (i == 0) {
1315                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1316                         __func__, data);
1317                 ret = -2;
1318                 ha->qla_initiate_recovery = 1;
1319                 goto exit_qla_mbx_cmd;
1320         }
1321
1322         for (i = 0; i < n_fwmbox; i++) {
1323                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1324         }
1325
1326         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1327         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1328
1329 exit_qla_mbx_cmd:
1330         return (ret);
1331 }
1332
1333 int
1334 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1335         uint32_t *num_rcvq)
1336 {
1337         uint32_t *mbox, err;
1338         device_t dev = ha->pci_dev;
1339
1340         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1341
1342         mbox = ha->hw.mbox;
1343
1344         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1345
1346         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1347                 device_printf(dev, "%s: failed0\n", __func__);
1348                 return (-1);
1349         }
1350         err = mbox[0] >> 25; 
1351
1352         if (supports_9kb != NULL) {
1353                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1354                         *supports_9kb = 1;
1355                 else
1356                         *supports_9kb = 0;
1357         }
1358
1359         if (num_rcvq != NULL)
1360                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1361
1362         if ((err != 1) && (err != 0)) {
1363                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1364                 return (-1);
1365         }
1366         return 0;
1367 }
1368
1369 static int
1370 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1371         uint32_t create)
1372 {
1373         uint32_t i, err;
1374         device_t dev = ha->pci_dev;
1375         q80_config_intr_t *c_intr;
1376         q80_config_intr_rsp_t *c_intr_rsp;
1377
1378         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1379         bzero(c_intr, (sizeof (q80_config_intr_t)));
1380
1381         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1382
1383         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1384         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1385
1386         c_intr->nentries = num_intrs;
1387
1388         for (i = 0; i < num_intrs; i++) {
1389                 if (create) {
1390                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1391                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1392                 } else {
1393                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1394                         c_intr->intr[i].msix_index =
1395                                 ha->hw.intr_id[(start_idx + i)];
1396                 }
1397
1398                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1399         }
1400
1401         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1402                 (sizeof (q80_config_intr_t) >> 2),
1403                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1404                 device_printf(dev, "%s: failed0\n", __func__);
1405                 return (-1);
1406         }
1407
1408         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1409
1410         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1411
1412         if (err) {
1413                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1414                         c_intr_rsp->nentries);
1415
1416                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1417                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1418                                 __func__, i, 
1419                                 c_intr_rsp->intr[i].status,
1420                                 c_intr_rsp->intr[i].intr_id,
1421                                 c_intr_rsp->intr[i].intr_src);
1422                 }
1423
1424                 return (-1);
1425         }
1426
1427         for (i = 0; ((i < num_intrs) && create); i++) {
1428                 if (!c_intr_rsp->intr[i].status) {
1429                         ha->hw.intr_id[(start_idx + i)] =
1430                                 c_intr_rsp->intr[i].intr_id;
1431                         ha->hw.intr_src[(start_idx + i)] =
1432                                 c_intr_rsp->intr[i].intr_src;
1433                 }
1434         }
1435
1436         return (0);
1437 }
1438
1439 /*
1440  * Name: qla_config_rss
1441  * Function: Configure RSS for the context/interface.
1442  */
1443 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1444                         0x8030f20c77cb2da3ULL,
1445                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1446                         0x255b0ec26d5a56daULL };
1447
1448 static int
1449 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1450 {
1451         q80_config_rss_t        *c_rss;
1452         q80_config_rss_rsp_t    *c_rss_rsp;
1453         uint32_t                err, i;
1454         device_t                dev = ha->pci_dev;
1455
1456         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1457         bzero(c_rss, (sizeof (q80_config_rss_t)));
1458
1459         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1460
1461         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1462         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1463
1464         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1465                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1466         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1467         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1468
1469         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1470         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1471
1472         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1473
1474         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1475         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1476
1477         c_rss->cntxt_id = cntxt_id;
1478
1479         for (i = 0; i < 5; i++) {
1480                 c_rss->rss_key[i] = rss_key[i];
1481         }
1482
1483         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1484                 (sizeof (q80_config_rss_t) >> 2),
1485                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1486                 device_printf(dev, "%s: failed0\n", __func__);
1487                 return (-1);
1488         }
1489         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1490
1491         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1492
1493         if (err) {
1494                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1495                 return (-1);
1496         }
1497         return 0;
1498 }
1499
1500 static int
1501 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1502         uint16_t cntxt_id, uint8_t *ind_table)
1503 {
1504         q80_config_rss_ind_table_t      *c_rss_ind;
1505         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1506         uint32_t                        err;
1507         device_t                        dev = ha->pci_dev;
1508
1509         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1510                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1511                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1512                         start_idx, count);
1513                 return (-1);
1514         }
1515
1516         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1517         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1518
1519         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1520         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1521         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1522
1523         c_rss_ind->start_idx = start_idx;
1524         c_rss_ind->end_idx = start_idx + count - 1;
1525         c_rss_ind->cntxt_id = cntxt_id;
1526         bcopy(ind_table, c_rss_ind->ind_table, count);
1527
1528         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1529                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1530                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1531                 device_printf(dev, "%s: failed0\n", __func__);
1532                 return (-1);
1533         }
1534
1535         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1536         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1537
1538         if (err) {
1539                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1540                 return (-1);
1541         }
1542         return 0;
1543 }
1544
1545 /*
1546  * Name: qla_config_intr_coalesce
1547  * Function: Configure Interrupt Coalescing.
1548  */
1549 static int
1550 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1551         int rcv)
1552 {
1553         q80_config_intr_coalesc_t       *intrc;
1554         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1555         uint32_t                        err, i;
1556         device_t                        dev = ha->pci_dev;
1557         
1558         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1559         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1560
1561         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1562         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1563         intrc->count_version |= Q8_MBX_CMD_VERSION;
1564
1565         if (rcv) {
1566                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1567                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1568                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1569         } else {
1570                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1571                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1572                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1573         }
1574
1575         intrc->cntxt_id = cntxt_id;
1576
1577         if (tenable) {
1578                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1579                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1580
1581                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1582                         intrc->sds_ring_mask |= (1 << i);
1583                 }
1584                 intrc->ms_timeout = 1000;
1585         }
1586
1587         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1588                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1589                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1590                 device_printf(dev, "%s: failed0\n", __func__);
1591                 return (-1);
1592         }
1593         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1594
1595         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1596
1597         if (err) {
1598                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1599                 return (-1);
1600         }
1601         
1602         return 0;
1603 }
1604
1605
1606 /*
1607  * Name: qla_config_mac_addr
1608  * Function: binds a MAC address to the context/interface.
1609  *      Can be unicast, multicast or broadcast.
1610  */
1611 static int
1612 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1613         uint32_t num_mac)
1614 {
1615         q80_config_mac_addr_t           *cmac;
1616         q80_config_mac_addr_rsp_t       *cmac_rsp;
1617         uint32_t                        err;
1618         device_t                        dev = ha->pci_dev;
1619         int                             i;
1620         uint8_t                         *mac_cpy = mac_addr;
1621
1622         if (num_mac > Q8_MAX_MAC_ADDRS) {
1623                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1624                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1625                 return (-1);
1626         }
1627
1628         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1629         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1630
1631         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1632         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1633         cmac->count_version |= Q8_MBX_CMD_VERSION;
1634
1635         if (add_mac) 
1636                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1637         else
1638                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1639                 
1640         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1641
1642         cmac->nmac_entries = num_mac;
1643         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1644
1645         for (i = 0; i < num_mac; i++) {
1646                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1647                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1648         }
1649
1650         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1651                 (sizeof (q80_config_mac_addr_t) >> 2),
1652                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1653                 device_printf(dev, "%s: %s failed0\n", __func__,
1654                         (add_mac ? "Add" : "Del"));
1655                 return (-1);
1656         }
1657         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1658
1659         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1660
1661         if (err) {
1662                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1663                         (add_mac ? "Add" : "Del"), err);
1664                 for (i = 0; i < num_mac; i++) {
1665                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1666                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1667                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1668                         mac_cpy += ETHER_ADDR_LEN;
1669                 }
1670                 return (-1);
1671         }
1672         
1673         return 0;
1674 }
1675
1676
1677 /*
1678  * Name: qla_set_mac_rcv_mode
1679  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1680  */
1681 static int
1682 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1683 {
1684         q80_config_mac_rcv_mode_t       *rcv_mode;
1685         uint32_t                        err;
1686         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1687         device_t                        dev = ha->pci_dev;
1688
1689         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1690         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1691
1692         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1693         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1694         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1695
1696         rcv_mode->mode = mode;
1697
1698         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1699
1700         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1701                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1702                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1703                 device_printf(dev, "%s: failed0\n", __func__);
1704                 return (-1);
1705         }
1706         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1707
1708         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1709
1710         if (err) {
1711                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1712                 return (-1);
1713         }
1714         
1715         return 0;
1716 }
1717
1718 int
1719 ql_set_promisc(qla_host_t *ha)
1720 {
1721         int ret;
1722
1723         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1724         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1725         return (ret);
1726 }
1727
1728 void
1729 qla_reset_promisc(qla_host_t *ha)
1730 {
1731         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1732         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1733 }
1734
1735 int
1736 ql_set_allmulti(qla_host_t *ha)
1737 {
1738         int ret;
1739
1740         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1741         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1742         return (ret);
1743 }
1744
1745 void
1746 qla_reset_allmulti(qla_host_t *ha)
1747 {
1748         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1749         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1750 }
1751
1752 /*
1753  * Name: ql_set_max_mtu
1754  * Function:
1755  *      Sets the maximum transfer unit size for the specified rcv context.
1756  */
1757 int
1758 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1759 {
1760         device_t                dev;
1761         q80_set_max_mtu_t       *max_mtu;
1762         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1763         uint32_t                err;
1764
1765         dev = ha->pci_dev;
1766
1767         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1768         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1769
1770         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1771         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1772         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1773
1774         max_mtu->cntxt_id = cntxt_id;
1775         max_mtu->mtu = mtu;
1776
1777         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1778                 (sizeof (q80_set_max_mtu_t) >> 2),
1779                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1780                 device_printf(dev, "%s: failed\n", __func__);
1781                 return -1;
1782         }
1783
1784         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1785
1786         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1787
1788         if (err) {
1789                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int
1796 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1797 {
1798         device_t                dev;
1799         q80_link_event_t        *lnk;
1800         q80_link_event_rsp_t    *lnk_rsp;
1801         uint32_t                err;
1802
1803         dev = ha->pci_dev;
1804
1805         lnk = (q80_link_event_t *)ha->hw.mbox;
1806         bzero(lnk, (sizeof (q80_link_event_t)));
1807
1808         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1809         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1810         lnk->count_version |= Q8_MBX_CMD_VERSION;
1811
1812         lnk->cntxt_id = cntxt_id;
1813         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1814
1815         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1816                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1817                 device_printf(dev, "%s: failed\n", __func__);
1818                 return -1;
1819         }
1820
1821         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1822
1823         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1824
1825         if (err) {
1826                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1827         }
1828
1829         return 0;
1830 }
1831
1832 static int
1833 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1834 {
1835         device_t                dev;
1836         q80_config_fw_lro_t     *fw_lro;
1837         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1838         uint32_t                err;
1839
1840         dev = ha->pci_dev;
1841
1842         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1843         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1844
1845         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1846         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1847         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1848
1849         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1850         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1851
1852         fw_lro->cntxt_id = cntxt_id;
1853
1854         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1855                 (sizeof (q80_config_fw_lro_t) >> 2),
1856                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1857                 device_printf(dev, "%s: failed\n", __func__);
1858                 return -1;
1859         }
1860
1861         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1862
1863         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1864
1865         if (err) {
1866                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1867         }
1868
1869         return 0;
1870 }
1871
1872 static int
1873 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1874 {
1875         device_t                dev;
1876         q80_hw_config_t         *hw_config;
1877         q80_hw_config_rsp_t     *hw_config_rsp;
1878         uint32_t                err;
1879
1880         dev = ha->pci_dev;
1881
1882         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1883         bzero(hw_config, sizeof (q80_hw_config_t));
1884
1885         hw_config->opcode = Q8_MBX_HW_CONFIG;
1886         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1887         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1888
1889         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1890
1891         hw_config->u.set_cam_search_mode.mode = search_mode;
1892
1893         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1894                 (sizeof (q80_hw_config_t) >> 2),
1895                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1896                 device_printf(dev, "%s: failed\n", __func__);
1897                 return -1;
1898         }
1899         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1900
1901         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1902
1903         if (err) {
1904                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1905         }
1906
1907         return 0;
1908 }
1909
1910 static int
1911 qla_get_cam_search_mode(qla_host_t *ha)
1912 {
1913         device_t                dev;
1914         q80_hw_config_t         *hw_config;
1915         q80_hw_config_rsp_t     *hw_config_rsp;
1916         uint32_t                err;
1917
1918         dev = ha->pci_dev;
1919
1920         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1921         bzero(hw_config, sizeof (q80_hw_config_t));
1922
1923         hw_config->opcode = Q8_MBX_HW_CONFIG;
1924         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1925         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1926
1927         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1928
1929         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1930                 (sizeof (q80_hw_config_t) >> 2),
1931                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1932                 device_printf(dev, "%s: failed\n", __func__);
1933                 return -1;
1934         }
1935         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1936
1937         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1938
1939         if (err) {
1940                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1941         } else {
1942                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1943                         hw_config_rsp->u.get_cam_search_mode.mode);
1944         }
1945
1946         return 0;
1947 }
1948
1949 static int
1950 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1951 {
1952         device_t                dev;
1953         q80_get_stats_t         *stat;
1954         q80_get_stats_rsp_t     *stat_rsp;
1955         uint32_t                err;
1956
1957         dev = ha->pci_dev;
1958
1959         stat = (q80_get_stats_t *)ha->hw.mbox;
1960         bzero(stat, (sizeof (q80_get_stats_t)));
1961
1962         stat->opcode = Q8_MBX_GET_STATS;
1963         stat->count_version = 2;
1964         stat->count_version |= Q8_MBX_CMD_VERSION;
1965
1966         stat->cmd = cmd;
1967
1968         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1969                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1970                 device_printf(dev, "%s: failed\n", __func__);
1971                 return -1;
1972         }
1973
1974         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1975
1976         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1977
1978         if (err) {
1979                 return -1;
1980         }
1981
1982         return 0;
1983 }
1984
1985 void
1986 ql_get_stats(qla_host_t *ha)
1987 {
1988         q80_get_stats_rsp_t     *stat_rsp;
1989         q80_mac_stats_t         *mstat;
1990         q80_xmt_stats_t         *xstat;
1991         q80_rcv_stats_t         *rstat;
1992         uint32_t                cmd;
1993         int                     i;
1994         struct ifnet *ifp = ha->ifp;
1995
1996         if (ifp == NULL)
1997                 return;
1998
1999         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2000                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2001                 return;
2002         }
2003
2004         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2005                 QLA_UNLOCK(ha, __func__);
2006                 return;
2007         }
2008
2009         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2010         /*
2011          * Get MAC Statistics
2012          */
2013         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2014 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2015
2016         cmd |= ((ha->pci_func & 0x1) << 16);
2017
2018         if (ha->qla_watchdog_pause)
2019                 goto ql_get_stats_exit;
2020
2021         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2022                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2023                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2024         } else {
2025                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2026                         __func__, ha->hw.mbox[0]);
2027         }
2028         /*
2029          * Get RCV Statistics
2030          */
2031         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2032 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2033         cmd |= (ha->hw.rcv_cntxt_id << 16);
2034
2035         if (ha->qla_watchdog_pause)
2036                 goto ql_get_stats_exit;
2037
2038         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2039                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2040                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2041         } else {
2042                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2043                         __func__, ha->hw.mbox[0]);
2044         }
2045
2046         if (ha->qla_watchdog_pause)
2047                 goto ql_get_stats_exit;
2048         /*
2049          * Get XMT Statistics
2050          */
2051         for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2052                 i++) {
2053                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2054 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2055                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2056
2057                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2058                         == 0) {
2059                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2060                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2061                 } else {
2062                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2063                                 __func__, ha->hw.mbox[0]);
2064                 }
2065         }
2066
2067 ql_get_stats_exit:
2068         QLA_UNLOCK(ha, __func__);
2069
2070         return;
2071 }
2072
2073 /*
2074  * Name: qla_tx_tso
2075  * Function: Checks if the packet to be transmitted is a candidate for
2076  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2077  *      Ring Structure are plugged in.
2078  */
2079 static int
2080 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2081 {
2082         struct ether_vlan_header *eh;
2083         struct ip *ip = NULL;
2084         struct ip6_hdr *ip6 = NULL;
2085         struct tcphdr *th = NULL;
2086         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2087         uint16_t etype, opcode, offload = 1;
2088         device_t dev;
2089
2090         dev = ha->pci_dev;
2091
2092
2093         eh = mtod(mp, struct ether_vlan_header *);
2094
2095         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2096                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2097                 etype = ntohs(eh->evl_proto);
2098         } else {
2099                 ehdrlen = ETHER_HDR_LEN;
2100                 etype = ntohs(eh->evl_encap_proto);
2101         }
2102
2103         hdrlen = 0;
2104
2105         switch (etype) {
2106                 case ETHERTYPE_IP:
2107
2108                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2109                                         sizeof(struct tcphdr);
2110
2111                         if (mp->m_len < tcp_opt_off) {
2112                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2113                                 ip = (struct ip *)(hdr + ehdrlen);
2114                         } else {
2115                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2116                         }
2117
2118                         ip_hlen = ip->ip_hl << 2;
2119                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2120
2121                                 
2122                         if ((ip->ip_p != IPPROTO_TCP) ||
2123                                 (ip_hlen != sizeof (struct ip))){
2124                                 /* IP Options are not supported */
2125
2126                                 offload = 0;
2127                         } else
2128                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2129
2130                 break;
2131
2132                 case ETHERTYPE_IPV6:
2133
2134                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2135                                         sizeof (struct tcphdr);
2136
2137                         if (mp->m_len < tcp_opt_off) {
2138                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2139                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2140                         } else {
2141                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2142                         }
2143
2144                         ip_hlen = sizeof(struct ip6_hdr);
2145                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2146
2147                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2148                                 //device_printf(dev, "%s: ipv6\n", __func__);
2149                                 offload = 0;
2150                         } else
2151                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2152                 break;
2153
2154                 default:
2155                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2156                         offload = 0;
2157                 break;
2158         }
2159
2160         if (!offload)
2161                 return (-1);
2162
2163         tcp_hlen = th->th_off << 2;
2164         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2165
2166         if (mp->m_len < hdrlen) {
2167                 if (mp->m_len < tcp_opt_off) {
2168                         if (tcp_hlen > sizeof(struct tcphdr)) {
2169                                 m_copydata(mp, tcp_opt_off,
2170                                         (tcp_hlen - sizeof(struct tcphdr)),
2171                                         &hdr[tcp_opt_off]);
2172                         }
2173                 } else {
2174                         m_copydata(mp, 0, hdrlen, hdr);
2175                 }
2176         }
2177
2178         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2179
2180         tx_cmd->flags_opcode = opcode ;
2181         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2182         tx_cmd->total_hdr_len = hdrlen;
2183
2184         /* Check for Multicast least significant bit of MSB == 1 */
2185         if (eh->evl_dhost[0] & 0x01) {
2186                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2187         }
2188
2189         if (mp->m_len < hdrlen) {
2190                 printf("%d\n", hdrlen);
2191                 return (1);
2192         }
2193
2194         return (0);
2195 }
2196
2197 /*
2198  * Name: qla_tx_chksum
2199  * Function: Checks if the packet to be transmitted is a candidate for
2200  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2201  *      Ring Structure are plugged in.
2202  */
2203 static int
2204 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2205         uint32_t *tcp_hdr_off)
2206 {
2207         struct ether_vlan_header *eh;
2208         struct ip *ip;
2209         struct ip6_hdr *ip6;
2210         uint32_t ehdrlen, ip_hlen;
2211         uint16_t etype, opcode, offload = 1;
2212         device_t dev;
2213         uint8_t buf[sizeof(struct ip6_hdr)];
2214
2215         dev = ha->pci_dev;
2216
2217         *op_code = 0;
2218
2219         if ((mp->m_pkthdr.csum_flags &
2220                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2221                 return (-1);
2222
2223         eh = mtod(mp, struct ether_vlan_header *);
2224
2225         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2226                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227                 etype = ntohs(eh->evl_proto);
2228         } else {
2229                 ehdrlen = ETHER_HDR_LEN;
2230                 etype = ntohs(eh->evl_encap_proto);
2231         }
2232
2233                 
2234         switch (etype) {
2235                 case ETHERTYPE_IP:
2236                         ip = (struct ip *)(mp->m_data + ehdrlen);
2237
2238                         ip_hlen = sizeof (struct ip);
2239
2240                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2241                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2242                                 ip = (struct ip *)buf;
2243                         }
2244
2245                         if (ip->ip_p == IPPROTO_TCP)
2246                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2247                         else if (ip->ip_p == IPPROTO_UDP)
2248                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2249                         else {
2250                                 //device_printf(dev, "%s: ipv4\n", __func__);
2251                                 offload = 0;
2252                         }
2253                 break;
2254
2255                 case ETHERTYPE_IPV6:
2256                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2257
2258                         ip_hlen = sizeof(struct ip6_hdr);
2259
2260                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2261                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2262                                         buf);
2263                                 ip6 = (struct ip6_hdr *)buf;
2264                         }
2265
2266                         if (ip6->ip6_nxt == IPPROTO_TCP)
2267                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2268                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2269                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2270                         else {
2271                                 //device_printf(dev, "%s: ipv6\n", __func__);
2272                                 offload = 0;
2273                         }
2274                 break;
2275
2276                 default:
2277                         offload = 0;
2278                 break;
2279         }
2280         if (!offload)
2281                 return (-1);
2282
2283         *op_code = opcode;
2284         *tcp_hdr_off = (ip_hlen + ehdrlen);
2285
2286         return (0);
2287 }
2288
2289 #define QLA_TX_MIN_FREE 2
2290 /*
2291  * Name: ql_hw_send
2292  * Function: Transmits a packet. It first checks if the packet is a
2293  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2294  *      offload. If either of these creteria are not met, it is transmitted
2295  *      as a regular ethernet frame.
2296  */
2297 int
2298 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2299         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2300 {
2301         struct ether_vlan_header *eh;
2302         qla_hw_t *hw = &ha->hw;
2303         q80_tx_cmd_t *tx_cmd, tso_cmd;
2304         bus_dma_segment_t *c_seg;
2305         uint32_t num_tx_cmds, hdr_len = 0;
2306         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2307         device_t dev;
2308         int i, ret;
2309         uint8_t *src = NULL, *dst = NULL;
2310         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2311         uint32_t op_code = 0;
2312         uint32_t tcp_hdr_off = 0;
2313
2314         dev = ha->pci_dev;
2315
2316         /*
2317          * Always make sure there is atleast one empty slot in the tx_ring
2318          * tx_ring is considered full when there only one entry available
2319          */
2320         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2321
2322         total_length = mp->m_pkthdr.len;
2323         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2324                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2325                         __func__, total_length);
2326                 return (EINVAL);
2327         }
2328         eh = mtod(mp, struct ether_vlan_header *);
2329
2330         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2331
2332                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2333
2334                 src = frame_hdr;
2335                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2336
2337                 if (!(ret & ~1)) {
2338                         /* find the additional tx_cmd descriptors required */
2339
2340                         if (mp->m_flags & M_VLANTAG)
2341                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2342
2343                         hdr_len = tso_cmd.total_hdr_len;
2344
2345                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2346                         bytes = QL_MIN(bytes, hdr_len);
2347
2348                         num_tx_cmds++;
2349                         hdr_len -= bytes;
2350
2351                         while (hdr_len) {
2352                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2353                                 hdr_len -= bytes;
2354                                 num_tx_cmds++;
2355                         }
2356                         hdr_len = tso_cmd.total_hdr_len;
2357
2358                         if (ret == 0)
2359                                 src = (uint8_t *)eh;
2360                 } else 
2361                         return (EINVAL);
2362         } else {
2363                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2364         }
2365
2366         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2367                 ql_hw_tx_done_locked(ha, txr_idx);
2368                 if (hw->tx_cntxt[txr_idx].txr_free <=
2369                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2370                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2371                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2372                                 __func__));
2373                         return (-1);
2374                 }
2375         }
2376
2377         for (i = 0; i < num_tx_cmds; i++) {
2378                 int j;
2379
2380                 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2381
2382                 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2383                         QL_ASSERT(ha, 0, \
2384                                 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2385                                 __func__, __LINE__, txr_idx, j,\
2386                                 ha->tx_ring[txr_idx].tx_buf[j].m_head));
2387                         return (EINVAL);
2388                 }
2389         }
2390
2391         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2392
2393         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2394
2395                 if (nsegs > ha->hw.max_tx_segs)
2396                         ha->hw.max_tx_segs = nsegs;
2397
2398                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2399
2400                 if (op_code) {
2401                         tx_cmd->flags_opcode = op_code;
2402                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2403
2404                 } else {
2405                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2406                 }
2407         } else {
2408                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2409                 ha->tx_tso_frames++;
2410         }
2411
2412         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2413                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2414
2415                 if (iscsi_pdu)
2416                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2417
2418         } else if (mp->m_flags & M_VLANTAG) {
2419
2420                 if (hdr_len) { /* TSO */
2421                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2422                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2423                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2424                 } else
2425                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2426
2427                 ha->hw_vlan_tx_frames++;
2428                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2429
2430                 if (iscsi_pdu) {
2431                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2432                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2433                 }
2434         }
2435
2436
2437         tx_cmd->n_bufs = (uint8_t)nsegs;
2438         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2439         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2440         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2441
2442         c_seg = segs;
2443
2444         while (1) {
2445                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2446
2447                         switch (i) {
2448                         case 0:
2449                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2450                                 tx_cmd->buf1_len = c_seg->ds_len;
2451                                 break;
2452
2453                         case 1:
2454                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2455                                 tx_cmd->buf2_len = c_seg->ds_len;
2456                                 break;
2457
2458                         case 2:
2459                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2460                                 tx_cmd->buf3_len = c_seg->ds_len;
2461                                 break;
2462
2463                         case 3:
2464                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2465                                 tx_cmd->buf4_len = c_seg->ds_len;
2466                                 break;
2467                         }
2468
2469                         c_seg++;
2470                         nsegs--;
2471                 }
2472
2473                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2474                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2475                                 (NUM_TX_DESCRIPTORS - 1);
2476                 tx_cmd_count++;
2477
2478                 if (!nsegs)
2479                         break;
2480                 
2481                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2482                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2483         }
2484
2485         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2486
2487                 /* TSO : Copy the header in the following tx cmd descriptors */
2488
2489                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2490
2491                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2492                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2493
2494                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2495                 bytes = QL_MIN(bytes, hdr_len);
2496
2497                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2498
2499                 if (mp->m_flags & M_VLANTAG) {
2500                         /* first copy the src/dst MAC addresses */
2501                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2502                         dst += (ETHER_ADDR_LEN * 2);
2503                         src += (ETHER_ADDR_LEN * 2);
2504                         
2505                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2506                         dst += 2;
2507                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2508                         dst += 2;
2509
2510                         /* bytes left in src header */
2511                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2512                                         ETHER_VLAN_ENCAP_LEN);
2513
2514                         /* bytes left in TxCmd Entry */
2515                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2516
2517
2518                         bcopy(src, dst, bytes);
2519                         src += bytes;
2520                         hdr_len -= bytes;
2521                 } else {
2522                         bcopy(src, dst, bytes);
2523                         src += bytes;
2524                         hdr_len -= bytes;
2525                 }
2526
2527                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2528                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2529                                         (NUM_TX_DESCRIPTORS - 1);
2530                 tx_cmd_count++;
2531                 
2532                 while (hdr_len) {
2533                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2534                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2535
2536                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2537
2538                         bcopy(src, tx_cmd, bytes);
2539                         src += bytes;
2540                         hdr_len -= bytes;
2541
2542                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2543                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2544                                         (NUM_TX_DESCRIPTORS - 1);
2545                         tx_cmd_count++;
2546                 }
2547         }
2548
2549         hw->tx_cntxt[txr_idx].txr_free =
2550                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2551
2552         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2553                 txr_idx);
2554         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2555
2556         return (0);
2557 }
2558
2559
2560
2561 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2562 static int
2563 qla_config_rss_ind_table(qla_host_t *ha)
2564 {
2565         uint32_t i, count;
2566         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2567
2568
2569         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2570                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2571         }
2572
2573         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2574                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2575
2576                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2577                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2578                 } else {
2579                         count = Q8_CONFIG_IND_TBL_SIZE;
2580                 }
2581
2582                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2583                         rss_ind_tbl))
2584                         return (-1);
2585         }
2586
2587         return (0);
2588 }
2589
2590 static int
2591 qla_config_soft_lro(qla_host_t *ha)
2592 {
2593         int i;
2594         qla_hw_t *hw = &ha->hw;
2595         struct lro_ctrl *lro;
2596
2597         for (i = 0; i < hw->num_sds_rings; i++) {
2598                 lro = &hw->sds[i].lro;
2599
2600                 bzero(lro, sizeof(struct lro_ctrl));
2601
2602 #if (__FreeBSD_version >= 1100101)
2603                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2604                         device_printf(ha->pci_dev,
2605                                 "%s: tcp_lro_init_args [%d] failed\n",
2606                                 __func__, i);
2607                         return (-1);
2608                 }
2609 #else
2610                 if (tcp_lro_init(lro)) {
2611                         device_printf(ha->pci_dev,
2612                                 "%s: tcp_lro_init [%d] failed\n",
2613                                 __func__, i);
2614                         return (-1);
2615                 }
2616 #endif /* #if (__FreeBSD_version >= 1100101) */
2617
2618                 lro->ifp = ha->ifp;
2619         }
2620
2621         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2622         return (0);
2623 }
2624
2625 static void
2626 qla_drain_soft_lro(qla_host_t *ha)
2627 {
2628         int i;
2629         qla_hw_t *hw = &ha->hw;
2630         struct lro_ctrl *lro;
2631
2632         for (i = 0; i < hw->num_sds_rings; i++) {
2633                 lro = &hw->sds[i].lro;
2634
2635 #if (__FreeBSD_version >= 1100101)
2636                 tcp_lro_flush_all(lro);
2637 #else
2638                 struct lro_entry *queued;
2639
2640                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2641                         queued = SLIST_FIRST(&lro->lro_active);
2642                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2643                         tcp_lro_flush(lro, queued);
2644                 }
2645 #endif /* #if (__FreeBSD_version >= 1100101) */
2646         }
2647
2648         return;
2649 }
2650
2651 static void
2652 qla_free_soft_lro(qla_host_t *ha)
2653 {
2654         int i;
2655         qla_hw_t *hw = &ha->hw;
2656         struct lro_ctrl *lro;
2657
2658         for (i = 0; i < hw->num_sds_rings; i++) {
2659                 lro = &hw->sds[i].lro;
2660                 tcp_lro_free(lro);
2661         }
2662
2663         return;
2664 }
2665
2666
2667 /*
2668  * Name: ql_del_hw_if
2669  * Function: Destroys the hardware specific entities corresponding to an
2670  *      Ethernet Interface
2671  */
2672 void
2673 ql_del_hw_if(qla_host_t *ha)
2674 {
2675         uint32_t i;
2676         uint32_t num_msix;
2677
2678         (void)qla_stop_nic_func(ha);
2679
2680         qla_del_rcv_cntxt(ha);
2681
2682         qla_del_xmt_cntxt(ha);
2683
2684         if (ha->hw.flags.init_intr_cnxt) {
2685                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2686
2687                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2688                                 num_msix = Q8_MAX_INTR_VECTORS;
2689                         else
2690                                 num_msix = ha->hw.num_sds_rings - i;
2691                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2692
2693                         i += num_msix;
2694                 }
2695
2696                 ha->hw.flags.init_intr_cnxt = 0;
2697         }
2698
2699         if (ha->hw.enable_soft_lro) {
2700                 qla_drain_soft_lro(ha);
2701                 qla_free_soft_lro(ha);
2702         }
2703
2704         return;
2705 }
2706
2707 void
2708 qla_confirm_9kb_enable(qla_host_t *ha)
2709 {
2710         uint32_t supports_9kb = 0;
2711
2712         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2713
2714         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2715         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2716         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2717
2718         qla_get_nic_partition(ha, &supports_9kb, NULL);
2719
2720         if (!supports_9kb)
2721                 ha->hw.enable_9kb = 0;
2722
2723         return;
2724 }
2725
2726 /*
2727  * Name: ql_init_hw_if
2728  * Function: Creates the hardware specific entities corresponding to an
2729  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2730  *      corresponding to the interface. Enables LRO if allowed.
2731  */
2732 int
2733 ql_init_hw_if(qla_host_t *ha)
2734 {
2735         device_t        dev;
2736         uint32_t        i;
2737         uint8_t         bcast_mac[6];
2738         qla_rdesc_t     *rdesc;
2739         uint32_t        num_msix;
2740
2741         dev = ha->pci_dev;
2742
2743         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2744                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2745                         ha->hw.dma_buf.sds_ring[i].size);
2746         }
2747
2748         for (i = 0; i < ha->hw.num_sds_rings; ) {
2749
2750                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2751                         num_msix = Q8_MAX_INTR_VECTORS;
2752                 else
2753                         num_msix = ha->hw.num_sds_rings - i;
2754
2755                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2756
2757                         if (i > 0) {
2758
2759                                 num_msix = i;
2760
2761                                 for (i = 0; i < num_msix; ) {
2762                                         qla_config_intr_cntxt(ha, i,
2763                                                 Q8_MAX_INTR_VECTORS, 0);
2764                                         i += Q8_MAX_INTR_VECTORS;
2765                                 }
2766                         }
2767                         return (-1);
2768                 }
2769
2770                 i = i + num_msix;
2771         }
2772
2773         ha->hw.flags.init_intr_cnxt = 1;
2774
2775         /*
2776          * Create Receive Context
2777          */
2778         if (qla_init_rcv_cntxt(ha)) {
2779                 return (-1);
2780         }
2781
2782         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2783                 rdesc = &ha->hw.rds[i];
2784                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2785                 rdesc->rx_in = 0;
2786                 /* Update the RDS Producer Indices */
2787                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2788                         rdesc->rx_next);
2789         }
2790
2791         /*
2792          * Create Transmit Context
2793          */
2794         if (qla_init_xmt_cntxt(ha)) {
2795                 qla_del_rcv_cntxt(ha);
2796                 return (-1);
2797         }
2798         ha->hw.max_tx_segs = 0;
2799
2800         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2801                 return(-1);
2802
2803         ha->hw.flags.unicast_mac = 1;
2804
2805         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2806         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2807
2808         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2809                 return (-1);
2810
2811         ha->hw.flags.bcast_mac = 1;
2812
2813         /*
2814          * program any cached multicast addresses
2815          */
2816         if (qla_hw_add_all_mcast(ha))
2817                 return (-1);
2818
2819         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2820                 return (-1);
2821
2822         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2823                 return (-1);
2824
2825         if (qla_config_rss_ind_table(ha))
2826                 return (-1);
2827
2828         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2829                 return (-1);
2830
2831         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2832                 return (-1);
2833
2834         if (ha->ifp->if_capenable & IFCAP_LRO) {
2835                 if (ha->hw.enable_hw_lro) {
2836                         ha->hw.enable_soft_lro = 0;
2837
2838                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2839                                 return (-1);
2840                 } else {
2841                         ha->hw.enable_soft_lro = 1;
2842
2843                         if (qla_config_soft_lro(ha))
2844                                 return (-1);
2845                 }
2846         }
2847
2848         if (qla_init_nic_func(ha))
2849                 return (-1);
2850
2851         if (qla_query_fw_dcbx_caps(ha))
2852                 return (-1);
2853
2854         for (i = 0; i < ha->hw.num_sds_rings; i++)
2855                 QL_ENABLE_INTERRUPTS(ha, i);
2856
2857         return (0);
2858 }
2859
2860 static int
2861 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2862 {
2863         device_t                dev = ha->pci_dev;
2864         q80_rq_map_sds_to_rds_t *map_rings;
2865         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2866         uint32_t                i, err;
2867         qla_hw_t                *hw = &ha->hw;
2868
2869         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2870         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2871
2872         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2873         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2874         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2875
2876         map_rings->cntxt_id = hw->rcv_cntxt_id;
2877         map_rings->num_rings = num_idx;
2878
2879         for (i = 0; i < num_idx; i++) {
2880                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2881                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2882         }
2883
2884         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2885                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2886                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2887                 device_printf(dev, "%s: failed0\n", __func__);
2888                 return (-1);
2889         }
2890
2891         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2892
2893         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2894
2895         if (err) {
2896                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2897                 return (-1);
2898         }
2899
2900         return (0);
2901 }
2902
2903 /*
2904  * Name: qla_init_rcv_cntxt
2905  * Function: Creates the Receive Context.
2906  */
2907 static int
2908 qla_init_rcv_cntxt(qla_host_t *ha)
2909 {
2910         q80_rq_rcv_cntxt_t      *rcntxt;
2911         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2912         q80_stat_desc_t         *sdesc;
2913         int                     i, j;
2914         qla_hw_t                *hw = &ha->hw;
2915         device_t                dev;
2916         uint32_t                err;
2917         uint32_t                rcntxt_sds_rings;
2918         uint32_t                rcntxt_rds_rings;
2919         uint32_t                max_idx;
2920
2921         dev = ha->pci_dev;
2922
2923         /*
2924          * Create Receive Context
2925          */
2926
2927         for (i = 0; i < hw->num_sds_rings; i++) {
2928                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2929
2930                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2931                         sdesc->data[0] = 1ULL;
2932                         sdesc->data[1] = 1ULL;
2933                 }
2934         }
2935
2936         rcntxt_sds_rings = hw->num_sds_rings;
2937         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2938                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2939
2940         rcntxt_rds_rings = hw->num_rds_rings;
2941
2942         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2943                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2944
2945         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2946         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2947
2948         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2949         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2950         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2951
2952         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2953                         Q8_RCV_CNTXT_CAP0_LRO |
2954                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2955                         Q8_RCV_CNTXT_CAP0_RSS |
2956                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2957
2958         if (ha->hw.enable_9kb)
2959                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2960         else
2961                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2962
2963         if (ha->hw.num_rds_rings > 1) {
2964                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2965                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2966         } else
2967                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2968
2969         rcntxt->nsds_rings = rcntxt_sds_rings;
2970
2971         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2972
2973         rcntxt->rcv_vpid = 0;
2974
2975         for (i = 0; i <  rcntxt_sds_rings; i++) {
2976                 rcntxt->sds[i].paddr =
2977                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2978                 rcntxt->sds[i].size =
2979                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2980                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2981                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2982         }
2983
2984         for (i = 0; i <  rcntxt_rds_rings; i++) {
2985                 rcntxt->rds[i].paddr_std =
2986                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2987
2988                 if (ha->hw.enable_9kb)
2989                         rcntxt->rds[i].std_bsize =
2990                                 qla_host_to_le64(MJUM9BYTES);
2991                 else
2992                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2993
2994                 rcntxt->rds[i].std_nentries =
2995                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2996         }
2997
2998         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2999                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
3000                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3001                 device_printf(dev, "%s: failed0\n", __func__);
3002                 return (-1);
3003         }
3004
3005         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3006
3007         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3008
3009         if (err) {
3010                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3011                 return (-1);
3012         }
3013
3014         for (i = 0; i <  rcntxt_sds_rings; i++) {
3015                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3016         }
3017
3018         for (i = 0; i <  rcntxt_rds_rings; i++) {
3019                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3020         }
3021
3022         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3023
3024         ha->hw.flags.init_rx_cnxt = 1;
3025
3026         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3027
3028                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3029
3030                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3031                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3032                         else
3033                                 max_idx = hw->num_sds_rings - i;
3034
3035                         err = qla_add_rcv_rings(ha, i, max_idx);
3036                         if (err)
3037                                 return -1;
3038
3039                         i += max_idx;
3040                 }
3041         }
3042
3043         if (hw->num_rds_rings > 1) {
3044
3045                 for (i = 0; i < hw->num_rds_rings; ) {
3046
3047                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3048                                 max_idx = MAX_SDS_TO_RDS_MAP;
3049                         else
3050                                 max_idx = hw->num_rds_rings - i;
3051
3052                         err = qla_map_sds_to_rds(ha, i, max_idx);
3053                         if (err)
3054                                 return -1;
3055
3056                         i += max_idx;
3057                 }
3058         }
3059
3060         return (0);
3061 }
3062
3063 static int
3064 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3065 {
3066         device_t                dev = ha->pci_dev;
3067         q80_rq_add_rcv_rings_t  *add_rcv;
3068         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3069         uint32_t                i,j, err;
3070         qla_hw_t                *hw = &ha->hw;
3071
3072         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3073         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3074
3075         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3076         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3077         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3078
3079         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3080         add_rcv->nsds_rings = nsds;
3081         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3082
3083         for (i = 0; i <  nsds; i++) {
3084
3085                 j = i + sds_idx;
3086
3087                 add_rcv->sds[i].paddr =
3088                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3089
3090                 add_rcv->sds[i].size =
3091                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3092
3093                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3094                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3095
3096         }
3097
3098         for (i = 0; (i <  nsds); i++) {
3099                 j = i + sds_idx;
3100
3101                 add_rcv->rds[i].paddr_std =
3102                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3103
3104                 if (ha->hw.enable_9kb)
3105                         add_rcv->rds[i].std_bsize =
3106                                 qla_host_to_le64(MJUM9BYTES);
3107                 else
3108                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3109
3110                 add_rcv->rds[i].std_nentries =
3111                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3112         }
3113
3114
3115         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3116                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3117                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3118                 device_printf(dev, "%s: failed0\n", __func__);
3119                 return (-1);
3120         }
3121
3122         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3123
3124         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3125
3126         if (err) {
3127                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3128                 return (-1);
3129         }
3130
3131         for (i = 0; i < nsds; i++) {
3132                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3133         }
3134
3135         for (i = 0; i < nsds; i++) {
3136                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3137         }
3138
3139         return (0);
3140 }
3141
3142 /*
3143  * Name: qla_del_rcv_cntxt
3144  * Function: Destroys the Receive Context.
3145  */
3146 static void
3147 qla_del_rcv_cntxt(qla_host_t *ha)
3148 {
3149         device_t                        dev = ha->pci_dev;
3150         q80_rcv_cntxt_destroy_t         *rcntxt;
3151         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3152         uint32_t                        err;
3153         uint8_t                         bcast_mac[6];
3154
3155         if (!ha->hw.flags.init_rx_cnxt)
3156                 return;
3157
3158         if (qla_hw_del_all_mcast(ha))
3159                 return;
3160
3161         if (ha->hw.flags.bcast_mac) {
3162
3163                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3164                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3165
3166                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3167                         return;
3168                 ha->hw.flags.bcast_mac = 0;
3169
3170         }
3171
3172         if (ha->hw.flags.unicast_mac) {
3173                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3174                         return;
3175                 ha->hw.flags.unicast_mac = 0;
3176         }
3177
3178         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3179         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3180
3181         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3182         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3183         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3184
3185         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3186
3187         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3188                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3189                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3190                 device_printf(dev, "%s: failed0\n", __func__);
3191                 return;
3192         }
3193         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3194
3195         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3196
3197         if (err) {
3198                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3199         }
3200
3201         ha->hw.flags.init_rx_cnxt = 0;
3202         return;
3203 }
3204
3205 /*
3206  * Name: qla_init_xmt_cntxt
3207  * Function: Creates the Transmit Context.
3208  */
3209 static int
3210 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3211 {
3212         device_t                dev;
3213         qla_hw_t                *hw = &ha->hw;
3214         q80_rq_tx_cntxt_t       *tcntxt;
3215         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3216         uint32_t                err;
3217         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3218         uint32_t                intr_idx;
3219
3220         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3221
3222         dev = ha->pci_dev;
3223
3224         /*
3225          * Create Transmit Context
3226          */
3227         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3228         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3229
3230         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3231         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3232         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3233
3234         intr_idx = txr_idx;
3235
3236 #ifdef QL_ENABLE_ISCSI_TLV
3237
3238         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3239                                 Q8_TX_CNTXT_CAP0_TC;
3240
3241         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3242                 tcntxt->traffic_class = 1;
3243         }
3244
3245         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3246
3247 #else
3248         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3249
3250 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3251
3252         tcntxt->ntx_rings = 1;
3253
3254         tcntxt->tx_ring[0].paddr =
3255                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3256         tcntxt->tx_ring[0].tx_consumer =
3257                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3258         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3259
3260         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3261         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3262
3263         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3264         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3265         *hw_tx_cntxt->tx_cons = 0;
3266
3267         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3268                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3269                 ha->hw.mbox,
3270                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3271                 device_printf(dev, "%s: failed0\n", __func__);
3272                 return (-1);
3273         }
3274         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3275
3276         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3277
3278         if (err) {
3279                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3280                 return -1;
3281         }
3282
3283         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3284         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3285
3286         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3287                 return (-1);
3288
3289         return (0);
3290 }
3291
3292
3293 /*
3294  * Name: qla_del_xmt_cntxt
3295  * Function: Destroys the Transmit Context.
3296  */
3297 static int
3298 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3299 {
3300         device_t                        dev = ha->pci_dev;
3301         q80_tx_cntxt_destroy_t          *tcntxt;
3302         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3303         uint32_t                        err;
3304
3305         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3306         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3307
3308         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3309         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3310         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3311
3312         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3313
3314         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3315                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3316                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3317                 device_printf(dev, "%s: failed0\n", __func__);
3318                 return (-1);
3319         }
3320         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3321
3322         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3323
3324         if (err) {
3325                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3326                 return (-1);
3327         }
3328
3329         return (0);
3330 }
3331 static void
3332 qla_del_xmt_cntxt(qla_host_t *ha)
3333 {
3334         uint32_t i;
3335
3336         if (!ha->hw.flags.init_tx_cnxt)
3337                 return;
3338
3339         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3340                 if (qla_del_xmt_cntxt_i(ha, i))
3341                         break;
3342         }
3343         ha->hw.flags.init_tx_cnxt = 0;
3344 }
3345
3346 static int
3347 qla_init_xmt_cntxt(qla_host_t *ha)
3348 {
3349         uint32_t i, j;
3350
3351         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3352                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3353                         for (j = 0; j < i; j++)
3354                                 qla_del_xmt_cntxt_i(ha, j);
3355                         return (-1);
3356                 }
3357         }
3358         ha->hw.flags.init_tx_cnxt = 1;
3359         return (0);
3360 }
3361
3362 static int
3363 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3364 {
3365         int i, nmcast;
3366         uint32_t count = 0;
3367         uint8_t *mcast;
3368
3369         nmcast = ha->hw.nmcast;
3370
3371         QL_DPRINT2(ha, (ha->pci_dev,
3372                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3373
3374         mcast = ha->hw.mac_addr_arr;
3375         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3376
3377         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3378                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3379                         (ha->hw.mcast[i].addr[1] != 0) ||
3380                         (ha->hw.mcast[i].addr[2] != 0) ||
3381                         (ha->hw.mcast[i].addr[3] != 0) ||
3382                         (ha->hw.mcast[i].addr[4] != 0) ||
3383                         (ha->hw.mcast[i].addr[5] != 0)) {
3384
3385                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3386                         mcast = mcast + ETHER_ADDR_LEN;
3387                         count++;
3388                         
3389                         if (count == Q8_MAX_MAC_ADDRS) {
3390                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3391                                         add_mcast, count)) {
3392                                         device_printf(ha->pci_dev,
3393                                                 "%s: failed\n", __func__);
3394                                         return (-1);
3395                                 }
3396
3397                                 count = 0;
3398                                 mcast = ha->hw.mac_addr_arr;
3399                                 memset(mcast, 0,
3400                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3401                         }
3402
3403                         nmcast--;
3404                 }
3405         }
3406
3407         if (count) {
3408                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3409                         count)) {
3410                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3411                         return (-1);
3412                 }
3413         }
3414         QL_DPRINT2(ha, (ha->pci_dev,
3415                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3416
3417         return 0;
3418 }
3419
3420 static int
3421 qla_hw_add_all_mcast(qla_host_t *ha)
3422 {
3423         int ret;
3424
3425         ret = qla_hw_all_mcast(ha, 1);
3426
3427         return (ret);
3428 }
3429
3430 int
3431 qla_hw_del_all_mcast(qla_host_t *ha)
3432 {
3433         int ret;
3434
3435         ret = qla_hw_all_mcast(ha, 0);
3436
3437         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3438         ha->hw.nmcast = 0;
3439
3440         return (ret);
3441 }
3442
3443 static int
3444 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3445 {
3446         int i;
3447
3448         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3449                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3450                         return (0); /* its been already added */
3451         }
3452         return (-1);
3453 }
3454
3455 static int
3456 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3457 {
3458         int i;
3459
3460         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3461
3462                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3463                         (ha->hw.mcast[i].addr[1] == 0) &&
3464                         (ha->hw.mcast[i].addr[2] == 0) &&
3465                         (ha->hw.mcast[i].addr[3] == 0) &&
3466                         (ha->hw.mcast[i].addr[4] == 0) &&
3467                         (ha->hw.mcast[i].addr[5] == 0)) {
3468
3469                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3470                         ha->hw.nmcast++;        
3471
3472                         mta = mta + ETHER_ADDR_LEN;
3473                         nmcast--;
3474
3475                         if (nmcast == 0)
3476                                 break;
3477                 }
3478
3479         }
3480         return 0;
3481 }
3482
3483 static int
3484 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3485 {
3486         int i;
3487
3488         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3489                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3490
3491                         ha->hw.mcast[i].addr[0] = 0;
3492                         ha->hw.mcast[i].addr[1] = 0;
3493                         ha->hw.mcast[i].addr[2] = 0;
3494                         ha->hw.mcast[i].addr[3] = 0;
3495                         ha->hw.mcast[i].addr[4] = 0;
3496                         ha->hw.mcast[i].addr[5] = 0;
3497
3498                         ha->hw.nmcast--;        
3499
3500                         mta = mta + ETHER_ADDR_LEN;
3501                         nmcast--;
3502
3503                         if (nmcast == 0)
3504                                 break;
3505                 }
3506         }
3507         return 0;
3508 }
3509
3510 /*
3511  * Name: ql_hw_set_multi
3512  * Function: Sets the Multicast Addresses provided by the host O.S into the
3513  *      hardware (for the given interface)
3514  */
3515 int
3516 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3517         uint32_t add_mac)
3518 {
3519         uint8_t *mta = mcast_addr;
3520         int i;
3521         int ret = 0;
3522         uint32_t count = 0;
3523         uint8_t *mcast;
3524
3525         mcast = ha->hw.mac_addr_arr;
3526         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3527
3528         for (i = 0; i < mcnt; i++) {
3529                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3530                         if (add_mac) {
3531                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3532                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3533                                         mcast = mcast + ETHER_ADDR_LEN;
3534                                         count++;
3535                                 }
3536                         } else {
3537                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3538                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3539                                         mcast = mcast + ETHER_ADDR_LEN;
3540                                         count++;
3541                                 }
3542                         }
3543                 }
3544                 if (count == Q8_MAX_MAC_ADDRS) {
3545                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3546                                 add_mac, count)) {
3547                                 device_printf(ha->pci_dev, "%s: failed\n",
3548                                         __func__);
3549                                 return (-1);
3550                         }
3551
3552                         if (add_mac) {
3553                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3554                                         count);
3555                         } else {
3556                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3557                                         count);
3558                         }
3559
3560                         count = 0;
3561                         mcast = ha->hw.mac_addr_arr;
3562                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3563                 }
3564                         
3565                 mta += Q8_MAC_ADDR_LEN;
3566         }
3567
3568         if (count) {
3569                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3570                         count)) {
3571                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3572                         return (-1);
3573                 }
3574                 if (add_mac) {
3575                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3576                 } else {
3577                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3578                 }
3579         }
3580
3581         return (ret);
3582 }
3583
3584 /*
3585  * Name: ql_hw_tx_done_locked
3586  * Function: Handle Transmit Completions
3587  */
3588 void
3589 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3590 {
3591         qla_tx_buf_t *txb;
3592         qla_hw_t *hw = &ha->hw;
3593         uint32_t comp_idx, comp_count = 0;
3594         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3595
3596         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3597
3598         /* retrieve index of last entry in tx ring completed */
3599         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3600
3601         while (comp_idx != hw_tx_cntxt->txr_comp) {
3602
3603                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3604
3605                 hw_tx_cntxt->txr_comp++;
3606                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3607                         hw_tx_cntxt->txr_comp = 0;
3608
3609                 comp_count++;
3610
3611                 if (txb->m_head) {
3612                         ha->ifp->if_opackets++;
3613
3614                         bus_dmamap_sync(ha->tx_tag, txb->map,
3615                                 BUS_DMASYNC_POSTWRITE);
3616                         bus_dmamap_unload(ha->tx_tag, txb->map);
3617                         m_freem(txb->m_head);
3618
3619                         txb->m_head = NULL;
3620                 }
3621         }
3622
3623         hw_tx_cntxt->txr_free += comp_count;
3624         return;
3625 }
3626
3627 void
3628 ql_update_link_state(qla_host_t *ha)
3629 {
3630         uint32_t link_state;
3631         uint32_t prev_link_state;
3632
3633         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3634                 ha->hw.link_up = 0;
3635                 return;
3636         }
3637         link_state = READ_REG32(ha, Q8_LINK_STATE);
3638
3639         prev_link_state =  ha->hw.link_up;
3640
3641         if (ha->pci_func == 0) 
3642                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3643         else
3644                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3645
3646         if (prev_link_state !=  ha->hw.link_up) {
3647                 if (ha->hw.link_up) {
3648                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3649                 } else {
3650                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3651                 }
3652         }
3653         return;
3654 }
3655
3656 int
3657 ql_hw_check_health(qla_host_t *ha)
3658 {
3659         uint32_t val;
3660
3661         ha->hw.health_count++;
3662
3663         if (ha->hw.health_count < 500)
3664                 return 0;
3665
3666         ha->hw.health_count = 0;
3667
3668         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3669
3670         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3671                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3672                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3673                         __func__, val);
3674                 return -1;
3675         }
3676
3677         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3678
3679         if ((val != ha->hw.hbeat_value) &&
3680                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3681                 ha->hw.hbeat_value = val;
3682                 ha->hw.hbeat_failure = 0;
3683                 return 0;
3684         }
3685
3686         ha->hw.hbeat_failure++;
3687
3688         
3689         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3690                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3691                         __func__, val);
3692         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3693                 return 0;
3694         else 
3695                 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3696                         __func__, val);
3697
3698         return -1;
3699 }
3700
3701 static int
3702 qla_init_nic_func(qla_host_t *ha)
3703 {
3704         device_t                dev;
3705         q80_init_nic_func_t     *init_nic;
3706         q80_init_nic_func_rsp_t *init_nic_rsp;
3707         uint32_t                err;
3708
3709         dev = ha->pci_dev;
3710
3711         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3712         bzero(init_nic, sizeof(q80_init_nic_func_t));
3713
3714         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3715         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3716         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3717
3718         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3719         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3720         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3721
3722 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3723         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3724                 (sizeof (q80_init_nic_func_t) >> 2),
3725                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3726                 device_printf(dev, "%s: failed\n", __func__);
3727                 return -1;
3728         }
3729
3730         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3731 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3732
3733         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3734
3735         if (err) {
3736                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3737         }
3738
3739         return 0;
3740 }
3741
3742 static int
3743 qla_stop_nic_func(qla_host_t *ha)
3744 {
3745         device_t                dev;
3746         q80_stop_nic_func_t     *stop_nic;
3747         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3748         uint32_t                err;
3749
3750         dev = ha->pci_dev;
3751
3752         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3753         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3754
3755         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3756         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3757         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3758
3759         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3760         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3761
3762 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3763         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3764                 (sizeof (q80_stop_nic_func_t) >> 2),
3765                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3766                 device_printf(dev, "%s: failed\n", __func__);
3767                 return -1;
3768         }
3769
3770         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3771 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3772
3773         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3774
3775         if (err) {
3776                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3777         }
3778
3779         return 0;
3780 }
3781
3782 static int
3783 qla_query_fw_dcbx_caps(qla_host_t *ha)
3784 {
3785         device_t                        dev;
3786         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3787         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3788         uint32_t                        err;
3789
3790         dev = ha->pci_dev;
3791
3792         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3793         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3794
3795         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3796         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3797         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3798
3799         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3800         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3801                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3802                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3803                 device_printf(dev, "%s: failed\n", __func__);
3804                 return -1;
3805         }
3806
3807         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3808         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3809                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3810
3811         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3812
3813         if (err) {
3814                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3815         }
3816
3817         return 0;
3818 }
3819
3820 static int
3821 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3822         uint32_t aen_mb3, uint32_t aen_mb4)
3823 {
3824         device_t                dev;
3825         q80_idc_ack_t           *idc_ack;
3826         q80_idc_ack_rsp_t       *idc_ack_rsp;
3827         uint32_t                err;
3828         int                     count = 300;
3829
3830         dev = ha->pci_dev;
3831
3832         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3833         bzero(idc_ack, sizeof(q80_idc_ack_t));
3834
3835         idc_ack->opcode = Q8_MBX_IDC_ACK;
3836         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3837         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3838
3839         idc_ack->aen_mb1 = aen_mb1;
3840         idc_ack->aen_mb2 = aen_mb2;
3841         idc_ack->aen_mb3 = aen_mb3;
3842         idc_ack->aen_mb4 = aen_mb4;
3843
3844         ha->hw.imd_compl= 0;
3845
3846         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3847                 (sizeof (q80_idc_ack_t) >> 2),
3848                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3849                 device_printf(dev, "%s: failed\n", __func__);
3850                 return -1;
3851         }
3852
3853         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3854
3855         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3856
3857         if (err) {
3858                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3859                 return(-1);
3860         }
3861
3862         while (count && !ha->hw.imd_compl) {
3863                 qla_mdelay(__func__, 100);
3864                 count--;
3865         }
3866
3867         if (!count)
3868                 return -1;
3869         else
3870                 device_printf(dev, "%s: count %d\n", __func__, count);
3871
3872         return (0);
3873 }
3874
3875 static int
3876 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3877 {
3878         device_t                dev;
3879         q80_set_port_cfg_t      *pcfg;
3880         q80_set_port_cfg_rsp_t  *pfg_rsp;
3881         uint32_t                err;
3882         int                     count = 300;
3883
3884         dev = ha->pci_dev;
3885
3886         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3887         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3888
3889         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3890         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3891         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3892
3893         pcfg->cfg_bits = cfg_bits;
3894
3895         device_printf(dev, "%s: cfg_bits"
3896                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3897                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3898                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3899                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3900                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3901
3902         ha->hw.imd_compl= 0;
3903
3904         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3905                 (sizeof (q80_set_port_cfg_t) >> 2),
3906                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3907                 device_printf(dev, "%s: failed\n", __func__);
3908                 return -1;
3909         }
3910
3911         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3912
3913         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3914
3915         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3916                 while (count && !ha->hw.imd_compl) {
3917                         qla_mdelay(__func__, 100);
3918                         count--;
3919                 }
3920                 if (count) {
3921                         device_printf(dev, "%s: count %d\n", __func__, count);
3922
3923                         err = 0;
3924                 }
3925         }
3926
3927         if (err) {
3928                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3929                 return(-1);
3930         }
3931
3932         return (0);
3933 }
3934
3935
3936 static int
3937 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3938 {
3939         uint32_t                        err;
3940         device_t                        dev = ha->pci_dev;
3941         q80_config_md_templ_size_t      *md_size;
3942         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3943
3944 #ifndef QL_LDFLASH_FW
3945
3946         ql_minidump_template_hdr_t *hdr;
3947
3948         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3949         *size = hdr->size_of_template;
3950         return (0);
3951
3952 #endif /* #ifdef QL_LDFLASH_FW */
3953
3954         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3955         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3956
3957         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3958         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3959         md_size->count_version |= Q8_MBX_CMD_VERSION;
3960
3961         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3962                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3963                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3964
3965                 device_printf(dev, "%s: failed\n", __func__);
3966
3967                 return (-1);
3968         }
3969
3970         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3971
3972         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3973
3974         if (err) {
3975                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3976                 return(-1);
3977         }
3978
3979         *size = md_size_rsp->templ_size;
3980
3981         return (0);
3982 }
3983
3984 static int
3985 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3986 {
3987         device_t                dev;
3988         q80_get_port_cfg_t      *pcfg;
3989         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3990         uint32_t                err;
3991
3992         dev = ha->pci_dev;
3993
3994         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3995         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3996
3997         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3998         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3999         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4000
4001         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4002                 (sizeof (q80_get_port_cfg_t) >> 2),
4003                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4004                 device_printf(dev, "%s: failed\n", __func__);
4005                 return -1;
4006         }
4007
4008         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4009
4010         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4011
4012         if (err) {
4013                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4014                 return(-1);
4015         }
4016
4017         device_printf(dev, "%s: [cfg_bits, port type]"
4018                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4019                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4020                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4021                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4022                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4023                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4024                 );
4025
4026         *cfg_bits = pcfg_rsp->cfg_bits;
4027
4028         return (0);
4029 }
4030
4031 int
4032 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4033 {
4034         struct ether_vlan_header        *eh;
4035         uint16_t                        etype;
4036         struct ip                       *ip = NULL;
4037         struct ip6_hdr                  *ip6 = NULL;
4038         struct tcphdr                   *th = NULL;
4039         uint32_t                        hdrlen;
4040         uint32_t                        offset;
4041         uint8_t                         buf[sizeof(struct ip6_hdr)];
4042
4043         eh = mtod(mp, struct ether_vlan_header *);
4044
4045         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4046                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4047                 etype = ntohs(eh->evl_proto);
4048         } else {
4049                 hdrlen = ETHER_HDR_LEN;
4050                 etype = ntohs(eh->evl_encap_proto);
4051         }
4052
4053         if (etype == ETHERTYPE_IP) {
4054
4055                 offset = (hdrlen + sizeof (struct ip));
4056
4057                 if (mp->m_len >= offset) {
4058                         ip = (struct ip *)(mp->m_data + hdrlen);
4059                 } else {
4060                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4061                         ip = (struct ip *)buf;
4062                 }
4063
4064                 if (ip->ip_p == IPPROTO_TCP) {
4065
4066                         hdrlen += ip->ip_hl << 2;
4067                         offset = hdrlen + 4;
4068         
4069                         if (mp->m_len >= offset) {
4070                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4071                         } else {
4072                                 m_copydata(mp, hdrlen, 4, buf);
4073                                 th = (struct tcphdr *)buf;
4074                         }
4075                 }
4076
4077         } else if (etype == ETHERTYPE_IPV6) {
4078
4079                 offset = (hdrlen + sizeof (struct ip6_hdr));
4080
4081                 if (mp->m_len >= offset) {
4082                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4083                 } else {
4084                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4085                         ip6 = (struct ip6_hdr *)buf;
4086                 }
4087
4088                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4089
4090                         hdrlen += sizeof(struct ip6_hdr);
4091                         offset = hdrlen + 4;
4092
4093                         if (mp->m_len >= offset) {
4094                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4095                         } else {
4096                                 m_copydata(mp, hdrlen, 4, buf);
4097                                 th = (struct tcphdr *)buf;
4098                         }
4099                 }
4100         }
4101
4102         if (th != NULL) {
4103                 if ((th->th_sport == htons(3260)) ||
4104                         (th->th_dport == htons(3260)))
4105                         return 0;
4106         }
4107         return (-1);
4108 }
4109
4110 void
4111 qla_hw_async_event(qla_host_t *ha)
4112 {
4113         switch (ha->hw.aen_mb0) {
4114         case 0x8101:
4115                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4116                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4117
4118                 break;
4119
4120         default:
4121                 break;
4122         }
4123
4124         return;
4125 }
4126
4127 #ifdef QL_LDFLASH_FW
4128 static int
4129 ql_get_minidump_template(qla_host_t *ha)
4130 {
4131         uint32_t                        err;
4132         device_t                        dev = ha->pci_dev;
4133         q80_config_md_templ_cmd_t       *md_templ;
4134         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4135
4136         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4137         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4138
4139         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4140         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4141         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4142
4143         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4144         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4145
4146         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4147                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4148                  ha->hw.mbox,
4149                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4150
4151                 device_printf(dev, "%s: failed\n", __func__);
4152
4153                 return (-1);
4154         }
4155
4156         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4157
4158         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4159
4160         if (err) {
4161                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4162                 return (-1);
4163         }
4164
4165         return (0);
4166
4167 }
4168 #endif /* #ifdef QL_LDFLASH_FW */
4169
4170 /*
4171  * Minidump related functionality 
4172  */
4173
4174 static int ql_parse_template(qla_host_t *ha);
4175
4176 static uint32_t ql_rdcrb(qla_host_t *ha,
4177                         ql_minidump_entry_rdcrb_t *crb_entry,
4178                         uint32_t * data_buff);
4179
4180 static uint32_t ql_pollrd(qla_host_t *ha,
4181                         ql_minidump_entry_pollrd_t *entry,
4182                         uint32_t * data_buff);
4183
4184 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4185                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4186                         uint32_t *data_buff);
4187
4188 static uint32_t ql_L2Cache(qla_host_t *ha,
4189                         ql_minidump_entry_cache_t *cacheEntry,
4190                         uint32_t * data_buff);
4191
4192 static uint32_t ql_L1Cache(qla_host_t *ha,
4193                         ql_minidump_entry_cache_t *cacheEntry,
4194                         uint32_t *data_buff);
4195
4196 static uint32_t ql_rdocm(qla_host_t *ha,
4197                         ql_minidump_entry_rdocm_t *ocmEntry,
4198                         uint32_t *data_buff);
4199
4200 static uint32_t ql_rdmem(qla_host_t *ha,
4201                         ql_minidump_entry_rdmem_t *mem_entry,
4202                         uint32_t *data_buff);
4203
4204 static uint32_t ql_rdrom(qla_host_t *ha,
4205                         ql_minidump_entry_rdrom_t *romEntry,
4206                         uint32_t *data_buff);
4207
4208 static uint32_t ql_rdmux(qla_host_t *ha,
4209                         ql_minidump_entry_mux_t *muxEntry,
4210                         uint32_t *data_buff);
4211
4212 static uint32_t ql_rdmux2(qla_host_t *ha,
4213                         ql_minidump_entry_mux2_t *muxEntry,
4214                         uint32_t *data_buff);
4215
4216 static uint32_t ql_rdqueue(qla_host_t *ha,
4217                         ql_minidump_entry_queue_t *queueEntry,
4218                         uint32_t *data_buff);
4219
4220 static uint32_t ql_cntrl(qla_host_t *ha,
4221                         ql_minidump_template_hdr_t *template_hdr,
4222                         ql_minidump_entry_cntrl_t *crbEntry);
4223
4224
4225 static uint32_t
4226 ql_minidump_size(qla_host_t *ha)
4227 {
4228         uint32_t i, k;
4229         uint32_t size = 0;
4230         ql_minidump_template_hdr_t *hdr;
4231
4232         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4233
4234         i = 0x2;
4235
4236         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4237                 if (i & ha->hw.mdump_capture_mask)
4238                         size += hdr->capture_size_array[k];
4239                 i = i << 1;
4240         }
4241         return (size);
4242 }
4243
4244 static void
4245 ql_free_minidump_buffer(qla_host_t *ha)
4246 {
4247         if (ha->hw.mdump_buffer != NULL) {
4248                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4249                 ha->hw.mdump_buffer = NULL;
4250                 ha->hw.mdump_buffer_size = 0;
4251         }
4252         return;
4253 }
4254
4255 static int
4256 ql_alloc_minidump_buffer(qla_host_t *ha)
4257 {
4258         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4259
4260         if (!ha->hw.mdump_buffer_size)
4261                 return (-1);
4262
4263         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4264                                         M_NOWAIT);
4265
4266         if (ha->hw.mdump_buffer == NULL)
4267                 return (-1);
4268
4269         return (0);
4270 }
4271
4272 static void
4273 ql_free_minidump_template_buffer(qla_host_t *ha)
4274 {
4275         if (ha->hw.mdump_template != NULL) {
4276                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4277                 ha->hw.mdump_template = NULL;
4278                 ha->hw.mdump_template_size = 0;
4279         }
4280         return;
4281 }
4282
4283 static int
4284 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4285 {
4286         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4287
4288         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4289                                         M_QLA83XXBUF, M_NOWAIT);
4290
4291         if (ha->hw.mdump_template == NULL)
4292                 return (-1);
4293
4294         return (0);
4295 }
4296
4297 static int
4298 ql_alloc_minidump_buffers(qla_host_t *ha)
4299 {
4300         int ret;
4301
4302         ret = ql_alloc_minidump_template_buffer(ha);
4303
4304         if (ret)
4305                 return (ret);
4306
4307         ret = ql_alloc_minidump_buffer(ha);
4308
4309         if (ret)
4310                 ql_free_minidump_template_buffer(ha);
4311
4312         return (ret);
4313 }
4314
4315
4316 static uint32_t
4317 ql_validate_minidump_checksum(qla_host_t *ha)
4318 {
4319         uint64_t sum = 0;
4320         int count;
4321         uint32_t *template_buff;
4322
4323         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4324         template_buff = ha->hw.dma_buf.minidump.dma_b;
4325
4326         while (count-- > 0) {
4327                 sum += *template_buff++;
4328         }
4329
4330         while (sum >> 32) {
4331                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4332         }
4333
4334         return (~sum);
4335 }
4336
4337 int
4338 ql_minidump_init(qla_host_t *ha)
4339 {
4340         int             ret = 0;
4341         uint32_t        template_size = 0;
4342         device_t        dev = ha->pci_dev;
4343
4344         /*
4345          * Get Minidump Template Size
4346          */
4347         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4348
4349         if (ret || (template_size == 0)) {
4350                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4351                         template_size);
4352                 return (-1);
4353         }
4354
4355         /*
4356          * Allocate Memory for Minidump Template
4357          */
4358
4359         ha->hw.dma_buf.minidump.alignment = 8;
4360         ha->hw.dma_buf.minidump.size = template_size;
4361
4362 #ifdef QL_LDFLASH_FW
4363         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4364
4365                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4366
4367                 return (-1);
4368         }
4369         ha->hw.dma_buf.flags.minidump = 1;
4370
4371         /*
4372          * Retrieve Minidump Template
4373          */
4374         ret = ql_get_minidump_template(ha);
4375 #else
4376         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4377
4378 #endif /* #ifdef QL_LDFLASH_FW */
4379
4380         if (ret == 0) {
4381
4382                 ret = ql_validate_minidump_checksum(ha);
4383
4384                 if (ret == 0) {
4385
4386                         ret = ql_alloc_minidump_buffers(ha);
4387
4388                         if (ret == 0)
4389                 ha->hw.mdump_init = 1;
4390                         else
4391                                 device_printf(dev,
4392                                         "%s: ql_alloc_minidump_buffers"
4393                                         " failed\n", __func__);
4394                 } else {
4395                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4396                                 " failed\n", __func__);
4397                 }
4398         } else {
4399                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4400                          __func__);
4401         }
4402
4403         if (ret)
4404                 ql_minidump_free(ha);
4405
4406         return (ret);
4407 }
4408
4409 static void
4410 ql_minidump_free(qla_host_t *ha)
4411 {
4412         ha->hw.mdump_init = 0;
4413         if (ha->hw.dma_buf.flags.minidump) {
4414                 ha->hw.dma_buf.flags.minidump = 0;
4415                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4416         }
4417
4418         ql_free_minidump_template_buffer(ha);
4419         ql_free_minidump_buffer(ha);
4420
4421         return;
4422 }
4423
4424 void
4425 ql_minidump(qla_host_t *ha)
4426 {
4427         if (!ha->hw.mdump_init)
4428                 return;
4429
4430         if (ha->hw.mdump_done)
4431                 return;
4432
4433                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4434
4435         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4436         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4437
4438         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4439                 ha->hw.mdump_template_size);
4440
4441         ql_parse_template(ha);
4442  
4443         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4444
4445         ha->hw.mdump_done = 1;
4446
4447         return;
4448 }
4449
4450
4451 /*
4452  * helper routines
4453  */
4454 static void 
4455 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4456 {
4457         if (esize != entry->hdr.entry_capture_size) {
4458                 entry->hdr.entry_capture_size = esize;
4459                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4460         }
4461         return;
4462 }
4463
4464
4465 static int 
4466 ql_parse_template(qla_host_t *ha)
4467 {
4468         uint32_t num_of_entries, buff_level, e_cnt, esize;
4469         uint32_t end_cnt, rv = 0;
4470         char *dump_buff, *dbuff;
4471         int sane_start = 0, sane_end = 0;
4472         ql_minidump_template_hdr_t *template_hdr;
4473         ql_minidump_entry_t *entry;
4474         uint32_t capture_mask; 
4475         uint32_t dump_size; 
4476
4477         /* Setup parameters */
4478         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4479
4480         if (template_hdr->entry_type == TLHDR)
4481                 sane_start = 1;
4482         
4483         dump_buff = (char *) ha->hw.mdump_buffer;
4484
4485         num_of_entries = template_hdr->num_of_entries;
4486
4487         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4488                         + template_hdr->first_entry_offset );
4489
4490         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4491                 template_hdr->ocm_window_array[ha->pci_func];
4492         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4493
4494         capture_mask = ha->hw.mdump_capture_mask;
4495         dump_size = ha->hw.mdump_buffer_size;
4496
4497         template_hdr->driver_capture_mask = capture_mask;
4498
4499         QL_DPRINT80(ha, (ha->pci_dev,
4500                 "%s: sane_start = %d num_of_entries = %d "
4501                 "capture_mask = 0x%x dump_size = %d \n", 
4502                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4503
4504         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4505
4506                 /*
4507                  * If the capture_mask of the entry does not match capture mask
4508                  * skip the entry after marking the driver_flags indicator.
4509                  */
4510                 
4511                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4512
4513                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4514                         entry = (ql_minidump_entry_t *) ((char *) entry
4515                                         + entry->hdr.entry_size);
4516                         continue;
4517                 }
4518
4519                 /*
4520                  * This is ONLY needed in implementations where
4521                  * the capture buffer allocated is too small to capture
4522                  * all of the required entries for a given capture mask.
4523                  * We need to empty the buffer contents to a file
4524                  * if possible, before processing the next entry
4525                  * If the buff_full_flag is set, no further capture will happen
4526                  * and all remaining non-control entries will be skipped.
4527                  */
4528                 if (entry->hdr.entry_capture_size != 0) {
4529                         if ((buff_level + entry->hdr.entry_capture_size) >
4530                                 dump_size) {
4531                                 /*  Try to recover by emptying buffer to file */
4532                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4533                                 entry = (ql_minidump_entry_t *) ((char *) entry
4534                                                 + entry->hdr.entry_size);
4535                                 continue;
4536                         }
4537                 }
4538
4539                 /*
4540                  * Decode the entry type and process it accordingly
4541                  */
4542
4543                 switch (entry->hdr.entry_type) {
4544                 case RDNOP:
4545                         break;
4546
4547                 case RDEND:
4548                         if (sane_end == 0) {
4549                                 end_cnt = e_cnt;
4550                         }
4551                         sane_end++;
4552                         break;
4553
4554                 case RDCRB:
4555                         dbuff = dump_buff + buff_level;
4556                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4557                         ql_entry_err_chk(entry, esize);
4558                         buff_level += esize;
4559                         break;
4560
4561                 case POLLRD:
4562                         dbuff = dump_buff + buff_level;
4563                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4564                         ql_entry_err_chk(entry, esize);
4565                         buff_level += esize;
4566                         break;
4567
4568                 case POLLRDMWR:
4569                         dbuff = dump_buff + buff_level;
4570                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4571                                         (void *)dbuff);
4572                         ql_entry_err_chk(entry, esize);
4573                         buff_level += esize;
4574                         break;
4575
4576                 case L2ITG:
4577                 case L2DTG:
4578                 case L2DAT:
4579                 case L2INS:
4580                         dbuff = dump_buff + buff_level;
4581                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4582                         if (esize == -1) {
4583                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4584                         } else {
4585                                 ql_entry_err_chk(entry, esize);
4586                                 buff_level += esize;
4587                         }
4588                         break;
4589
4590                 case L1DAT:
4591                 case L1INS:
4592                         dbuff = dump_buff + buff_level;
4593                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4594                         ql_entry_err_chk(entry, esize);
4595                         buff_level += esize;
4596                         break;
4597
4598                 case RDOCM:
4599                         dbuff = dump_buff + buff_level;
4600                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4601                         ql_entry_err_chk(entry, esize);
4602                         buff_level += esize;
4603                         break;
4604
4605                 case RDMEM:
4606                         dbuff = dump_buff + buff_level;
4607                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4608                         ql_entry_err_chk(entry, esize);
4609                         buff_level += esize;
4610                         break;
4611
4612                 case BOARD:
4613                 case RDROM:
4614                         dbuff = dump_buff + buff_level;
4615                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4616                         ql_entry_err_chk(entry, esize);
4617                         buff_level += esize;
4618                         break;
4619
4620                 case RDMUX:
4621                         dbuff = dump_buff + buff_level;
4622                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4623                         ql_entry_err_chk(entry, esize);
4624                         buff_level += esize;
4625                         break;
4626
4627                 case RDMUX2:
4628                         dbuff = dump_buff + buff_level;
4629                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4630                         ql_entry_err_chk(entry, esize);
4631                         buff_level += esize;
4632                         break;
4633
4634                 case QUEUE:
4635                         dbuff = dump_buff + buff_level;
4636                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4637                         ql_entry_err_chk(entry, esize);
4638                         buff_level += esize;
4639                         break;
4640
4641                 case CNTRL:
4642                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4643                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4644                         }
4645                         break;
4646                 default:
4647                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4648                         break;
4649                 }
4650                 /*  next entry in the template */
4651                 entry = (ql_minidump_entry_t *) ((char *) entry
4652                                                 + entry->hdr.entry_size);
4653         }
4654
4655         if (!sane_start || (sane_end > 1)) {
4656                 device_printf(ha->pci_dev,
4657                         "\n%s: Template configuration error. Check Template\n",
4658                         __func__);
4659         }
4660         
4661         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4662                 __func__, template_hdr->num_of_entries));
4663
4664         return 0;
4665 }
4666
4667 /*
4668  * Read CRB operation.
4669  */
4670 static uint32_t
4671 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4672         uint32_t * data_buff)
4673 {
4674         int loop_cnt;
4675         int ret;
4676         uint32_t op_count, addr, stride, value = 0;
4677
4678         addr = crb_entry->addr;
4679         op_count = crb_entry->op_count;
4680         stride = crb_entry->addr_stride;
4681
4682         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4683
4684                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4685
4686                 if (ret)
4687                         return (0);
4688
4689                 *data_buff++ = addr;
4690                 *data_buff++ = value;
4691                 addr = addr + stride;
4692         }
4693
4694         /*
4695          * for testing purpose we return amount of data written
4696          */
4697         return (op_count * (2 * sizeof(uint32_t)));
4698 }
4699
4700 /*
4701  * Handle L2 Cache.
4702  */
4703
4704 static uint32_t 
4705 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4706         uint32_t * data_buff)
4707 {
4708         int i, k;
4709         int loop_cnt;
4710         int ret;
4711
4712         uint32_t read_value;
4713         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4714         uint32_t tag_value, read_cnt;
4715         volatile uint8_t cntl_value_r;
4716         long timeout;
4717         uint32_t data;
4718
4719         loop_cnt = cacheEntry->op_count;
4720
4721         read_addr = cacheEntry->read_addr;
4722         cntrl_addr = cacheEntry->control_addr;
4723         cntl_value_w = (uint32_t) cacheEntry->write_value;
4724
4725         tag_reg_addr = cacheEntry->tag_reg_addr;
4726
4727         tag_value = cacheEntry->init_tag_value;
4728         read_cnt = cacheEntry->read_addr_cnt;
4729
4730         for (i = 0; i < loop_cnt; i++) {
4731
4732                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4733                 if (ret)
4734                         return (0);
4735
4736                 if (cacheEntry->write_value != 0) { 
4737
4738                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4739                                         &cntl_value_w, 0);
4740                         if (ret)
4741                                 return (0);
4742                 }
4743
4744                 if (cacheEntry->poll_mask != 0) { 
4745
4746                         timeout = cacheEntry->poll_wait;
4747
4748                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4749                         if (ret)
4750                                 return (0);
4751
4752                         cntl_value_r = (uint8_t)data;
4753
4754                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4755
4756                                 if (timeout) {
4757                                         qla_mdelay(__func__, 1);
4758                                         timeout--;
4759                                 } else
4760                                         break;
4761
4762                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4763                                                 &data, 1);
4764                                 if (ret)
4765                                         return (0);
4766
4767                                 cntl_value_r = (uint8_t)data;
4768                         }
4769                         if (!timeout) {
4770                                 /* Report timeout error. 
4771                                  * core dump capture failed
4772                                  * Skip remaining entries.
4773                                  * Write buffer out to file
4774                                  * Use driver specific fields in template header
4775                                  * to report this error.
4776                                  */
4777                                 return (-1);
4778                         }
4779                 }
4780
4781                 addr = read_addr;
4782                 for (k = 0; k < read_cnt; k++) {
4783
4784                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4785                         if (ret)
4786                                 return (0);
4787
4788                         *data_buff++ = read_value;
4789                         addr += cacheEntry->read_addr_stride;
4790                 }
4791
4792                 tag_value += cacheEntry->tag_value_stride;
4793         }
4794
4795         return (read_cnt * loop_cnt * sizeof(uint32_t));
4796 }
4797
4798 /*
4799  * Handle L1 Cache.
4800  */
4801
4802 static uint32_t 
4803 ql_L1Cache(qla_host_t *ha,
4804         ql_minidump_entry_cache_t *cacheEntry,
4805         uint32_t *data_buff)
4806 {
4807         int ret;
4808         int i, k;
4809         int loop_cnt;
4810
4811         uint32_t read_value;
4812         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4813         uint32_t tag_value, read_cnt;
4814         uint32_t cntl_value_w;
4815
4816         loop_cnt = cacheEntry->op_count;
4817
4818         read_addr = cacheEntry->read_addr;
4819         cntrl_addr = cacheEntry->control_addr;
4820         cntl_value_w = (uint32_t) cacheEntry->write_value;
4821
4822         tag_reg_addr = cacheEntry->tag_reg_addr;
4823
4824         tag_value = cacheEntry->init_tag_value;
4825         read_cnt = cacheEntry->read_addr_cnt;
4826
4827         for (i = 0; i < loop_cnt; i++) {
4828
4829                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4830                 if (ret)
4831                         return (0);
4832
4833                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4834                 if (ret)
4835                         return (0);
4836
4837                 addr = read_addr;
4838                 for (k = 0; k < read_cnt; k++) {
4839
4840                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4841                         if (ret)
4842                                 return (0);
4843
4844                         *data_buff++ = read_value;
4845                         addr += cacheEntry->read_addr_stride;
4846                 }
4847
4848                 tag_value += cacheEntry->tag_value_stride;
4849         }
4850
4851         return (read_cnt * loop_cnt * sizeof(uint32_t));
4852 }
4853
4854 /*
4855  * Reading OCM memory
4856  */
4857
4858 static uint32_t 
4859 ql_rdocm(qla_host_t *ha,
4860         ql_minidump_entry_rdocm_t *ocmEntry,
4861         uint32_t *data_buff)
4862 {
4863         int i, loop_cnt;
4864         volatile uint32_t addr;
4865         volatile uint32_t value;
4866
4867         addr = ocmEntry->read_addr;
4868         loop_cnt = ocmEntry->op_count;
4869
4870         for (i = 0; i < loop_cnt; i++) {
4871                 value = READ_REG32(ha, addr);
4872                 *data_buff++ = value;
4873                 addr += ocmEntry->read_addr_stride;
4874         }
4875         return (loop_cnt * sizeof(value));
4876 }
4877
4878 /*
4879  * Read memory
4880  */
4881
4882 static uint32_t 
4883 ql_rdmem(qla_host_t *ha,
4884         ql_minidump_entry_rdmem_t *mem_entry,
4885         uint32_t *data_buff)
4886 {
4887         int ret;
4888         int i, loop_cnt;
4889         volatile uint32_t addr;
4890         q80_offchip_mem_val_t val;
4891
4892         addr = mem_entry->read_addr;
4893
4894         /* size in bytes / 16 */
4895         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4896
4897         for (i = 0; i < loop_cnt; i++) {
4898
4899                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4900                 if (ret)
4901                         return (0);
4902
4903                 *data_buff++ = val.data_lo;
4904                 *data_buff++ = val.data_hi;
4905                 *data_buff++ = val.data_ulo;
4906                 *data_buff++ = val.data_uhi;
4907
4908                 addr += (sizeof(uint32_t) * 4);
4909         }
4910
4911         return (loop_cnt * (sizeof(uint32_t) * 4));
4912 }
4913
4914 /*
4915  * Read Rom
4916  */
4917
4918 static uint32_t 
4919 ql_rdrom(qla_host_t *ha,
4920         ql_minidump_entry_rdrom_t *romEntry,
4921         uint32_t *data_buff)
4922 {
4923         int ret;
4924         int i, loop_cnt;
4925         uint32_t addr;
4926         uint32_t value;
4927
4928         addr = romEntry->read_addr;
4929         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4930         loop_cnt /= sizeof(value);
4931
4932         for (i = 0; i < loop_cnt; i++) {
4933
4934                 ret = ql_rd_flash32(ha, addr, &value);
4935                 if (ret)
4936                         return (0);
4937
4938                 *data_buff++ = value;
4939                 addr += sizeof(value);
4940         }
4941
4942         return (loop_cnt * sizeof(value));
4943 }
4944
4945 /*
4946  * Read MUX data
4947  */
4948
4949 static uint32_t 
4950 ql_rdmux(qla_host_t *ha,
4951         ql_minidump_entry_mux_t *muxEntry,
4952         uint32_t *data_buff)
4953 {
4954         int ret;
4955         int loop_cnt;
4956         uint32_t read_value, sel_value;
4957         uint32_t read_addr, select_addr;
4958
4959         select_addr = muxEntry->select_addr;
4960         sel_value = muxEntry->select_value;
4961         read_addr = muxEntry->read_addr;
4962
4963         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4964
4965                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4966                 if (ret)
4967                         return (0);
4968
4969                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4970                 if (ret)
4971                         return (0);
4972
4973                 *data_buff++ = sel_value;
4974                 *data_buff++ = read_value;
4975
4976                 sel_value += muxEntry->select_value_stride;
4977         }
4978
4979         return (loop_cnt * (2 * sizeof(uint32_t)));
4980 }
4981
4982 static uint32_t
4983 ql_rdmux2(qla_host_t *ha,
4984         ql_minidump_entry_mux2_t *muxEntry,
4985         uint32_t *data_buff)
4986 {
4987         int ret;
4988         int loop_cnt;
4989
4990         uint32_t select_addr_1, select_addr_2;
4991         uint32_t select_value_1, select_value_2;
4992         uint32_t select_value_count, select_value_mask;
4993         uint32_t read_addr, read_value;
4994
4995         select_addr_1 = muxEntry->select_addr_1;
4996         select_addr_2 = muxEntry->select_addr_2;
4997         select_value_1 = muxEntry->select_value_1;
4998         select_value_2 = muxEntry->select_value_2;
4999         select_value_count = muxEntry->select_value_count;
5000         select_value_mask  = muxEntry->select_value_mask;
5001
5002         read_addr = muxEntry->read_addr;
5003
5004         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5005                 loop_cnt++) {
5006
5007                 uint32_t temp_sel_val;
5008
5009                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5010                 if (ret)
5011                         return (0);
5012
5013                 temp_sel_val = select_value_1 & select_value_mask;
5014
5015                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5016                 if (ret)
5017                         return (0);
5018
5019                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5020                 if (ret)
5021                         return (0);
5022
5023                 *data_buff++ = temp_sel_val;
5024                 *data_buff++ = read_value;
5025
5026                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5027                 if (ret)
5028                         return (0);
5029
5030                 temp_sel_val = select_value_2 & select_value_mask;
5031
5032                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5033                 if (ret)
5034                         return (0);
5035
5036                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5037                 if (ret)
5038                         return (0);
5039
5040                 *data_buff++ = temp_sel_val;
5041                 *data_buff++ = read_value;
5042
5043                 select_value_1 += muxEntry->select_value_stride;
5044                 select_value_2 += muxEntry->select_value_stride;
5045         }
5046
5047         return (loop_cnt * (4 * sizeof(uint32_t)));
5048 }
5049
5050 /*
5051  * Handling Queue State Reads.
5052  */
5053
5054 static uint32_t 
5055 ql_rdqueue(qla_host_t *ha,
5056         ql_minidump_entry_queue_t *queueEntry,
5057         uint32_t *data_buff)
5058 {
5059         int ret;
5060         int loop_cnt, k;
5061         uint32_t read_value;
5062         uint32_t read_addr, read_stride, select_addr;
5063         uint32_t queue_id, read_cnt;
5064
5065         read_cnt = queueEntry->read_addr_cnt;
5066         read_stride = queueEntry->read_addr_stride;
5067         select_addr = queueEntry->select_addr;
5068
5069         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5070                 loop_cnt++) {
5071
5072                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5073                 if (ret)
5074                         return (0);
5075
5076                 read_addr = queueEntry->read_addr;
5077
5078                 for (k = 0; k < read_cnt; k++) {
5079
5080                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5081                         if (ret)
5082                                 return (0);
5083
5084                         *data_buff++ = read_value;
5085                         read_addr += read_stride;
5086                 }
5087
5088                 queue_id += queueEntry->queue_id_stride;
5089         }
5090
5091         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5092 }
5093
5094 /*
5095  * Handling control entries.
5096  */
5097
5098 static uint32_t 
5099 ql_cntrl(qla_host_t *ha,
5100         ql_minidump_template_hdr_t *template_hdr,
5101         ql_minidump_entry_cntrl_t *crbEntry)
5102 {
5103         int ret;
5104         int count;
5105         uint32_t opcode, read_value, addr, entry_addr;
5106         long timeout;
5107
5108         entry_addr = crbEntry->addr;
5109
5110         for (count = 0; count < crbEntry->op_count; count++) {
5111                 opcode = crbEntry->opcode;
5112
5113                 if (opcode & QL_DBG_OPCODE_WR) {
5114
5115                         ret = ql_rdwr_indreg32(ha, entry_addr,
5116                                         &crbEntry->value_1, 0);
5117                         if (ret)
5118                                 return (0);
5119
5120                         opcode &= ~QL_DBG_OPCODE_WR;
5121                 }
5122
5123                 if (opcode & QL_DBG_OPCODE_RW) {
5124
5125                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5126                         if (ret)
5127                                 return (0);
5128
5129                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5130                         if (ret)
5131                                 return (0);
5132
5133                         opcode &= ~QL_DBG_OPCODE_RW;
5134                 }
5135
5136                 if (opcode & QL_DBG_OPCODE_AND) {
5137
5138                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5139                         if (ret)
5140                                 return (0);
5141
5142                         read_value &= crbEntry->value_2;
5143                         opcode &= ~QL_DBG_OPCODE_AND;
5144
5145                         if (opcode & QL_DBG_OPCODE_OR) {
5146                                 read_value |= crbEntry->value_3;
5147                                 opcode &= ~QL_DBG_OPCODE_OR;
5148                         }
5149
5150                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5151                         if (ret)
5152                                 return (0);
5153                 }
5154
5155                 if (opcode & QL_DBG_OPCODE_OR) {
5156
5157                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5158                         if (ret)
5159                                 return (0);
5160
5161                         read_value |= crbEntry->value_3;
5162
5163                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5164                         if (ret)
5165                                 return (0);
5166
5167                         opcode &= ~QL_DBG_OPCODE_OR;
5168                 }
5169
5170                 if (opcode & QL_DBG_OPCODE_POLL) {
5171
5172                         opcode &= ~QL_DBG_OPCODE_POLL;
5173                         timeout = crbEntry->poll_timeout;
5174                         addr = entry_addr;
5175
5176                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5177                         if (ret)
5178                                 return (0);
5179
5180                         while ((read_value & crbEntry->value_2)
5181                                 != crbEntry->value_1) {
5182
5183                                 if (timeout) {
5184                                         qla_mdelay(__func__, 1);
5185                                         timeout--;
5186                                 } else
5187                                         break;
5188
5189                                 ret = ql_rdwr_indreg32(ha, addr,
5190                                                 &read_value, 1);
5191                                 if (ret)
5192                                         return (0);
5193                         }
5194
5195                         if (!timeout) {
5196                                 /*
5197                                  * Report timeout error.
5198                                  * core dump capture failed
5199                                  * Skip remaining entries.
5200                                  * Write buffer out to file
5201                                  * Use driver specific fields in template header
5202                                  * to report this error.
5203                                  */
5204                                 return (-1);
5205                         }
5206                 }
5207
5208                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5209                         /*
5210                          * decide which address to use.
5211                          */
5212                         if (crbEntry->state_index_a) {
5213                                 addr = template_hdr->saved_state_array[
5214                                                 crbEntry-> state_index_a];
5215                         } else {
5216                                 addr = entry_addr;
5217                         }
5218
5219                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5220                         if (ret)
5221                                 return (0);
5222
5223                         template_hdr->saved_state_array[crbEntry->state_index_v]
5224                                         = read_value;
5225                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5226                 }
5227
5228                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5229                         /*
5230                          * decide which value to use.
5231                          */
5232                         if (crbEntry->state_index_v) {
5233                                 read_value = template_hdr->saved_state_array[
5234                                                 crbEntry->state_index_v];
5235                         } else {
5236                                 read_value = crbEntry->value_1;
5237                         }
5238                         /*
5239                          * decide which address to use.
5240                          */
5241                         if (crbEntry->state_index_a) {
5242                                 addr = template_hdr->saved_state_array[
5243                                                 crbEntry-> state_index_a];
5244                         } else {
5245                                 addr = entry_addr;
5246                         }
5247
5248                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5249                         if (ret)
5250                                 return (0);
5251
5252                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5253                 }
5254
5255                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5256                         /*  Read value from saved state using index */
5257                         read_value = template_hdr->saved_state_array[
5258                                                 crbEntry->state_index_v];
5259
5260                         read_value <<= crbEntry->shl; /*Shift left operation */
5261                         read_value >>= crbEntry->shr; /*Shift right operation */
5262
5263                         if (crbEntry->value_2) {
5264                                 /* check if AND mask is provided */
5265                                 read_value &= crbEntry->value_2;
5266                         }
5267
5268                         read_value |= crbEntry->value_3; /* OR operation */
5269                         read_value += crbEntry->value_1; /* increment op */
5270
5271                         /* Write value back to state area. */
5272
5273                         template_hdr->saved_state_array[crbEntry->state_index_v]
5274                                         = read_value;
5275                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5276                 }
5277
5278                 entry_addr += crbEntry->addr_stride;
5279         }
5280
5281         return (0);
5282 }
5283
5284 /*
5285  * Handling rd poll entry.
5286  */
5287
5288 static uint32_t 
5289 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5290         uint32_t *data_buff)
5291 {
5292         int ret;
5293         int loop_cnt;
5294         uint32_t op_count, select_addr, select_value_stride, select_value;
5295         uint32_t read_addr, poll, mask, data_size, data;
5296         uint32_t wait_count = 0;
5297
5298         select_addr            = entry->select_addr;
5299         read_addr              = entry->read_addr;
5300         select_value           = entry->select_value;
5301         select_value_stride    = entry->select_value_stride;
5302         op_count               = entry->op_count;
5303         poll                   = entry->poll;
5304         mask                   = entry->mask;
5305         data_size              = entry->data_size;
5306
5307         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5308
5309                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5310                 if (ret)
5311                         return (0);
5312
5313                 wait_count = 0;
5314
5315                 while (wait_count < poll) {
5316
5317                         uint32_t temp;
5318
5319                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5320                         if (ret)
5321                                 return (0);
5322
5323                         if ( (temp & mask) != 0 ) {
5324                                 break;
5325                         }
5326                         wait_count++;
5327                 }
5328
5329                 if (wait_count == poll) {
5330                         device_printf(ha->pci_dev,
5331                                 "%s: Error in processing entry\n", __func__);
5332                         device_printf(ha->pci_dev,
5333                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5334                                 __func__, wait_count, poll);
5335                         return 0;
5336                 }
5337
5338                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5339                 if (ret)
5340                         return (0);
5341
5342                 *data_buff++ = select_value;
5343                 *data_buff++ = data;
5344                 select_value = select_value + select_value_stride;
5345         }
5346
5347         /*
5348          * for testing purpose we return amount of data written
5349          */
5350         return (loop_cnt * (2 * sizeof(uint32_t)));
5351 }
5352
5353
5354 /*
5355  * Handling rd modify write poll entry.
5356  */
5357
5358 static uint32_t 
5359 ql_pollrd_modify_write(qla_host_t *ha,
5360         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5361         uint32_t *data_buff)
5362 {
5363         int ret;
5364         uint32_t addr_1, addr_2, value_1, value_2, data;
5365         uint32_t poll, mask, data_size, modify_mask;
5366         uint32_t wait_count = 0;
5367
5368         addr_1          = entry->addr_1;
5369         addr_2          = entry->addr_2;
5370         value_1         = entry->value_1;
5371         value_2         = entry->value_2;
5372
5373         poll            = entry->poll;
5374         mask            = entry->mask;
5375         modify_mask     = entry->modify_mask;
5376         data_size       = entry->data_size;
5377
5378
5379         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5380         if (ret)
5381                 return (0);
5382
5383         wait_count = 0;
5384         while (wait_count < poll) {
5385
5386                 uint32_t temp;
5387
5388                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5389                 if (ret)
5390                         return (0);
5391
5392                 if ( (temp & mask) != 0 ) {
5393                         break;
5394                 }
5395                 wait_count++;
5396         }
5397
5398         if (wait_count == poll) {
5399                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5400                         __func__);
5401         } else {
5402
5403                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5404                 if (ret)
5405                         return (0);
5406
5407                 data = (data & modify_mask);
5408
5409                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5410                 if (ret)
5411                         return (0);
5412
5413                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5414                 if (ret)
5415                         return (0);
5416
5417                 /* Poll again */
5418                 wait_count = 0;
5419                 while (wait_count < poll) {
5420
5421                         uint32_t temp;
5422
5423                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5424                         if (ret)
5425                                 return (0);
5426
5427                         if ( (temp & mask) != 0 ) {
5428                                 break;
5429                         }
5430                         wait_count++;
5431                 }
5432                 *data_buff++ = addr_2;
5433                 *data_buff++ = data;
5434         }
5435
5436         /*
5437          * for testing purpose we return amount of data written
5438          */
5439         return (2 * sizeof(uint32_t));
5440 }
5441
5442