]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgbe/ql_hw.c
MFV r329803:
[FreeBSD/FreeBSD.git] / sys / dev / qlxgbe / ql_hw.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  * File: ql_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include "ql_minidump.h"
47
48 /*
49  * Static Functions
50  */
51
52 static void qla_del_rcv_cntxt(qla_host_t *ha);
53 static int qla_init_rcv_cntxt(qla_host_t *ha);
54 static void qla_del_xmt_cntxt(qla_host_t *ha);
55 static int qla_init_xmt_cntxt(qla_host_t *ha);
56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
57         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
59         uint32_t num_intrs, uint32_t create);
60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62         int tenable, int rcv);
63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65
66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67                 uint8_t *hdr);
68 static int qla_hw_add_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78
79 static void ql_minidump_free(qla_host_t *ha);
80
81 #ifdef QL_DBG
82
83 static void
84 qla_stop_pegs(qla_host_t *ha)
85 {
86         uint32_t val = 1;
87
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
94 }
95
96 static int
97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
98 {
99         int err, ret = 0;
100         qla_host_t *ha;
101         
102         err = sysctl_handle_int(oidp, &ret, 0, req);
103
104
105         if (err || !req->newptr)
106                 return (err);
107
108         if (ret == 1) {
109                 ha = (qla_host_t *)arg1;
110                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
111                         qla_stop_pegs(ha);      
112                         QLA_UNLOCK(ha, __func__);
113                 }
114         }
115
116         return err;
117 }
118 #endif /* #ifdef QL_DBG */
119
120 static int
121 qla_validate_set_port_cfg_bit(uint32_t bits)
122 {
123         if ((bits & 0xF) > 1)
124                 return (-1);
125
126         if (((bits >> 4) & 0xF) > 2)
127                 return (-1);
128
129         if (((bits >> 8) & 0xF) > 2)
130                 return (-1);
131
132         return (0);
133 }
134
135 static int
136 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
137 {
138         int err, ret = 0;
139         qla_host_t *ha;
140         uint32_t cfg_bits;
141
142         err = sysctl_handle_int(oidp, &ret, 0, req);
143
144         if (err || !req->newptr)
145                 return (err);
146
147         ha = (qla_host_t *)arg1;
148
149         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
150
151                 err = qla_get_port_config(ha, &cfg_bits);
152
153                 if (err)
154                         goto qla_sysctl_set_port_cfg_exit;
155
156                 if (ret & 0x1) {
157                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 } else {
159                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
160                 }
161
162                 ret = ret >> 4;
163                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
164
165                 if ((ret & 0xF) == 0) {
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
167                 } else if ((ret & 0xF) == 1){
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
169                 } else {
170                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
171                 }
172
173                 ret = ret >> 4;
174                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
175
176                 if (ret == 0) {
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
178                 } else if (ret == 1){
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
180                 } else {
181                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
182                 }
183
184                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
185                         err = qla_set_port_config(ha, cfg_bits);
186                         QLA_UNLOCK(ha, __func__);
187                 } else {
188                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
189                 }
190         } else {
191                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
192                         err = qla_get_port_config(ha, &cfg_bits);
193                         QLA_UNLOCK(ha, __func__);
194                 } else {
195                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
196                 }
197         }
198
199 qla_sysctl_set_port_cfg_exit:
200         return err;
201 }
202
203 static int
204 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
205 {
206         int err, ret = 0;
207         qla_host_t *ha;
208
209         err = sysctl_handle_int(oidp, &ret, 0, req);
210
211         if (err || !req->newptr)
212                 return (err);
213
214         ha = (qla_host_t *)arg1;
215
216         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
217                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
218
219                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
220                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
221                         QLA_UNLOCK(ha, __func__);
222                 } else {
223                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
224                 }
225
226         } else {
227                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
228         }
229
230         return (err);
231 }
232
233 static int
234 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
235 {
236         int err, ret = 0;
237         qla_host_t *ha;
238
239         err = sysctl_handle_int(oidp, &ret, 0, req);
240
241         if (err || !req->newptr)
242                 return (err);
243
244         ha = (qla_host_t *)arg1;
245         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
246                 err = qla_get_cam_search_mode(ha);
247                 QLA_UNLOCK(ha, __func__);
248         } else {
249                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
250         }
251
252         return (err);
253 }
254
255 static void
256 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
257 {
258         struct sysctl_ctx_list  *ctx;
259         struct sysctl_oid_list  *children;
260         struct sysctl_oid       *ctx_oid;
261
262         ctx = device_get_sysctl_ctx(ha->pci_dev);
263         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
264
265         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
266                         CTLFLAG_RD, NULL, "stats_hw_mac");
267         children = SYSCTL_CHILDREN(ctx_oid);
268
269         SYSCTL_ADD_QUAD(ctx, children,
270                 OID_AUTO, "xmt_frames",
271                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
272                 "xmt_frames");
273
274         SYSCTL_ADD_QUAD(ctx, children,
275                 OID_AUTO, "xmt_bytes",
276                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
277                 "xmt_frames");
278
279         SYSCTL_ADD_QUAD(ctx, children,
280                 OID_AUTO, "xmt_mcast_pkts",
281                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
282                 "xmt_mcast_pkts");
283
284         SYSCTL_ADD_QUAD(ctx, children,
285                 OID_AUTO, "xmt_bcast_pkts",
286                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
287                 "xmt_bcast_pkts");
288
289         SYSCTL_ADD_QUAD(ctx, children,
290                 OID_AUTO, "xmt_pause_frames",
291                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
292                 "xmt_pause_frames");
293
294         SYSCTL_ADD_QUAD(ctx, children,
295                 OID_AUTO, "xmt_cntrl_pkts",
296                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
297                 "xmt_cntrl_pkts");
298
299         SYSCTL_ADD_QUAD(ctx, children,
300                 OID_AUTO, "xmt_pkt_lt_64bytes",
301                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
302                 "xmt_pkt_lt_64bytes");
303
304         SYSCTL_ADD_QUAD(ctx, children,
305                 OID_AUTO, "xmt_pkt_lt_127bytes",
306                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
307                 "xmt_pkt_lt_127bytes");
308
309         SYSCTL_ADD_QUAD(ctx, children,
310                 OID_AUTO, "xmt_pkt_lt_255bytes",
311                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
312                 "xmt_pkt_lt_255bytes");
313
314         SYSCTL_ADD_QUAD(ctx, children,
315                 OID_AUTO, "xmt_pkt_lt_511bytes",
316                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
317                 "xmt_pkt_lt_511bytes");
318
319         SYSCTL_ADD_QUAD(ctx, children,
320                 OID_AUTO, "xmt_pkt_lt_1023bytes",
321                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
322                 "xmt_pkt_lt_1023bytes");
323
324         SYSCTL_ADD_QUAD(ctx, children,
325                 OID_AUTO, "xmt_pkt_lt_1518bytes",
326                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
327                 "xmt_pkt_lt_1518bytes");
328
329         SYSCTL_ADD_QUAD(ctx, children,
330                 OID_AUTO, "xmt_pkt_gt_1518bytes",
331                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
332                 "xmt_pkt_gt_1518bytes");
333
334         SYSCTL_ADD_QUAD(ctx, children,
335                 OID_AUTO, "rcv_frames",
336                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
337                 "rcv_frames");
338
339         SYSCTL_ADD_QUAD(ctx, children,
340                 OID_AUTO, "rcv_bytes",
341                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
342                 "rcv_bytes");
343
344         SYSCTL_ADD_QUAD(ctx, children,
345                 OID_AUTO, "rcv_mcast_pkts",
346                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
347                 "rcv_mcast_pkts");
348
349         SYSCTL_ADD_QUAD(ctx, children,
350                 OID_AUTO, "rcv_bcast_pkts",
351                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
352                 "rcv_bcast_pkts");
353
354         SYSCTL_ADD_QUAD(ctx, children,
355                 OID_AUTO, "rcv_pause_frames",
356                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
357                 "rcv_pause_frames");
358
359         SYSCTL_ADD_QUAD(ctx, children,
360                 OID_AUTO, "rcv_cntrl_pkts",
361                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
362                 "rcv_cntrl_pkts");
363
364         SYSCTL_ADD_QUAD(ctx, children,
365                 OID_AUTO, "rcv_pkt_lt_64bytes",
366                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
367                 "rcv_pkt_lt_64bytes");
368
369         SYSCTL_ADD_QUAD(ctx, children,
370                 OID_AUTO, "rcv_pkt_lt_127bytes",
371                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
372                 "rcv_pkt_lt_127bytes");
373
374         SYSCTL_ADD_QUAD(ctx, children,
375                 OID_AUTO, "rcv_pkt_lt_255bytes",
376                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
377                 "rcv_pkt_lt_255bytes");
378
379         SYSCTL_ADD_QUAD(ctx, children,
380                 OID_AUTO, "rcv_pkt_lt_511bytes",
381                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
382                 "rcv_pkt_lt_511bytes");
383
384         SYSCTL_ADD_QUAD(ctx, children,
385                 OID_AUTO, "rcv_pkt_lt_1023bytes",
386                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
387                 "rcv_pkt_lt_1023bytes");
388
389         SYSCTL_ADD_QUAD(ctx, children,
390                 OID_AUTO, "rcv_pkt_lt_1518bytes",
391                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
392                 "rcv_pkt_lt_1518bytes");
393
394         SYSCTL_ADD_QUAD(ctx, children,
395                 OID_AUTO, "rcv_pkt_gt_1518bytes",
396                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
397                 "rcv_pkt_gt_1518bytes");
398
399         SYSCTL_ADD_QUAD(ctx, children,
400                 OID_AUTO, "rcv_len_error",
401                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
402                 "rcv_len_error");
403
404         SYSCTL_ADD_QUAD(ctx, children,
405                 OID_AUTO, "rcv_len_small",
406                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
407                 "rcv_len_small");
408
409         SYSCTL_ADD_QUAD(ctx, children,
410                 OID_AUTO, "rcv_len_large",
411                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
412                 "rcv_len_large");
413
414         SYSCTL_ADD_QUAD(ctx, children,
415                 OID_AUTO, "rcv_jabber",
416                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
417                 "rcv_jabber");
418
419         SYSCTL_ADD_QUAD(ctx, children,
420                 OID_AUTO, "rcv_dropped",
421                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
422                 "rcv_dropped");
423
424         SYSCTL_ADD_QUAD(ctx, children,
425                 OID_AUTO, "fcs_error",
426                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
427                 "fcs_error");
428
429         SYSCTL_ADD_QUAD(ctx, children,
430                 OID_AUTO, "align_error",
431                 CTLFLAG_RD, &ha->hw.mac.align_error,
432                 "align_error");
433
434         SYSCTL_ADD_QUAD(ctx, children,
435                 OID_AUTO, "eswitched_frames",
436                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
437                 "eswitched_frames");
438
439         SYSCTL_ADD_QUAD(ctx, children,
440                 OID_AUTO, "eswitched_bytes",
441                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
442                 "eswitched_bytes");
443
444         SYSCTL_ADD_QUAD(ctx, children,
445                 OID_AUTO, "eswitched_mcast_frames",
446                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
447                 "eswitched_mcast_frames");
448
449         SYSCTL_ADD_QUAD(ctx, children,
450                 OID_AUTO, "eswitched_bcast_frames",
451                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
452                 "eswitched_bcast_frames");
453
454         SYSCTL_ADD_QUAD(ctx, children,
455                 OID_AUTO, "eswitched_ucast_frames",
456                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
457                 "eswitched_ucast_frames");
458
459         SYSCTL_ADD_QUAD(ctx, children,
460                 OID_AUTO, "eswitched_err_free_frames",
461                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
462                 "eswitched_err_free_frames");
463
464         SYSCTL_ADD_QUAD(ctx, children,
465                 OID_AUTO, "eswitched_err_free_bytes",
466                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
467                 "eswitched_err_free_bytes");
468
469         return;
470 }
471
472 static void
473 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
474 {
475         struct sysctl_ctx_list  *ctx;
476         struct sysctl_oid_list  *children;
477         struct sysctl_oid       *ctx_oid;
478
479         ctx = device_get_sysctl_ctx(ha->pci_dev);
480         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
481
482         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
483                         CTLFLAG_RD, NULL, "stats_hw_rcv");
484         children = SYSCTL_CHILDREN(ctx_oid);
485
486         SYSCTL_ADD_QUAD(ctx, children,
487                 OID_AUTO, "total_bytes",
488                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
489                 "total_bytes");
490
491         SYSCTL_ADD_QUAD(ctx, children,
492                 OID_AUTO, "total_pkts",
493                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
494                 "total_pkts");
495
496         SYSCTL_ADD_QUAD(ctx, children,
497                 OID_AUTO, "lro_pkt_count",
498                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
499                 "lro_pkt_count");
500
501         SYSCTL_ADD_QUAD(ctx, children,
502                 OID_AUTO, "sw_pkt_count",
503                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
504                 "sw_pkt_count");
505
506         SYSCTL_ADD_QUAD(ctx, children,
507                 OID_AUTO, "ip_chksum_err",
508                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
509                 "ip_chksum_err");
510
511         SYSCTL_ADD_QUAD(ctx, children,
512                 OID_AUTO, "pkts_wo_acntxts",
513                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
514                 "pkts_wo_acntxts");
515
516         SYSCTL_ADD_QUAD(ctx, children,
517                 OID_AUTO, "pkts_dropped_no_sds_card",
518                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
519                 "pkts_dropped_no_sds_card");
520
521         SYSCTL_ADD_QUAD(ctx, children,
522                 OID_AUTO, "pkts_dropped_no_sds_host",
523                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
524                 "pkts_dropped_no_sds_host");
525
526         SYSCTL_ADD_QUAD(ctx, children,
527                 OID_AUTO, "oversized_pkts",
528                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
529                 "oversized_pkts");
530
531         SYSCTL_ADD_QUAD(ctx, children,
532                 OID_AUTO, "pkts_dropped_no_rds",
533                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
534                 "pkts_dropped_no_rds");
535
536         SYSCTL_ADD_QUAD(ctx, children,
537                 OID_AUTO, "unxpctd_mcast_pkts",
538                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
539                 "unxpctd_mcast_pkts");
540
541         SYSCTL_ADD_QUAD(ctx, children,
542                 OID_AUTO, "re1_fbq_error",
543                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
544                 "re1_fbq_error");
545
546         SYSCTL_ADD_QUAD(ctx, children,
547                 OID_AUTO, "invalid_mac_addr",
548                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
549                 "invalid_mac_addr");
550
551         SYSCTL_ADD_QUAD(ctx, children,
552                 OID_AUTO, "rds_prime_trys",
553                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
554                 "rds_prime_trys");
555
556         SYSCTL_ADD_QUAD(ctx, children,
557                 OID_AUTO, "rds_prime_success",
558                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
559                 "rds_prime_success");
560
561         SYSCTL_ADD_QUAD(ctx, children,
562                 OID_AUTO, "lro_flows_added",
563                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
564                 "lro_flows_added");
565
566         SYSCTL_ADD_QUAD(ctx, children,
567                 OID_AUTO, "lro_flows_deleted",
568                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
569                 "lro_flows_deleted");
570
571         SYSCTL_ADD_QUAD(ctx, children,
572                 OID_AUTO, "lro_flows_active",
573                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
574                 "lro_flows_active");
575
576         SYSCTL_ADD_QUAD(ctx, children,
577                 OID_AUTO, "pkts_droped_unknown",
578                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
579                 "pkts_droped_unknown");
580
581         SYSCTL_ADD_QUAD(ctx, children,
582                 OID_AUTO, "pkts_cnt_oversized",
583                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
584                 "pkts_cnt_oversized");
585
586         return;
587 }
588
589 static void
590 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
591 {
592         struct sysctl_ctx_list  *ctx;
593         struct sysctl_oid_list  *children;
594         struct sysctl_oid_list  *node_children;
595         struct sysctl_oid       *ctx_oid;
596         int                     i;
597         uint8_t                 name_str[16];
598
599         ctx = device_get_sysctl_ctx(ha->pci_dev);
600         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
601
602         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
603                         CTLFLAG_RD, NULL, "stats_hw_xmt");
604         children = SYSCTL_CHILDREN(ctx_oid);
605
606         for (i = 0; i < ha->hw.num_tx_rings; i++) {
607
608                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
609                 snprintf(name_str, sizeof(name_str), "%d", i);
610
611                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
612                         CTLFLAG_RD, NULL, name_str);
613                 node_children = SYSCTL_CHILDREN(ctx_oid);
614
615                 /* Tx Related */
616
617                 SYSCTL_ADD_QUAD(ctx, node_children,
618                         OID_AUTO, "total_bytes",
619                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
620                         "total_bytes");
621
622                 SYSCTL_ADD_QUAD(ctx, node_children,
623                         OID_AUTO, "total_pkts",
624                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
625                         "total_pkts");
626
627                 SYSCTL_ADD_QUAD(ctx, node_children,
628                         OID_AUTO, "errors",
629                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
630                         "errors");
631
632                 SYSCTL_ADD_QUAD(ctx, node_children,
633                         OID_AUTO, "pkts_dropped",
634                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
635                         "pkts_dropped");
636
637                 SYSCTL_ADD_QUAD(ctx, node_children,
638                         OID_AUTO, "switch_pkts",
639                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
640                         "switch_pkts");
641
642                 SYSCTL_ADD_QUAD(ctx, node_children,
643                         OID_AUTO, "num_buffers",
644                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
645                         "num_buffers");
646         }
647
648         return;
649 }
650
651 static void
652 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
653 {
654         qlnx_add_hw_mac_stats_sysctls(ha);
655         qlnx_add_hw_rcv_stats_sysctls(ha);
656         qlnx_add_hw_xmt_stats_sysctls(ha);
657
658         return;
659 }
660
661 static void
662 qlnx_add_drvr_sds_stats(qla_host_t *ha)
663 {
664         struct sysctl_ctx_list  *ctx;
665         struct sysctl_oid_list  *children;
666         struct sysctl_oid_list  *node_children;
667         struct sysctl_oid       *ctx_oid;
668         int                     i;
669         uint8_t                 name_str[16];
670
671         ctx = device_get_sysctl_ctx(ha->pci_dev);
672         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
673
674         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
675                         CTLFLAG_RD, NULL, "stats_drvr_sds");
676         children = SYSCTL_CHILDREN(ctx_oid);
677
678         for (i = 0; i < ha->hw.num_sds_rings; i++) {
679
680                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
681                 snprintf(name_str, sizeof(name_str), "%d", i);
682
683                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
684                         CTLFLAG_RD, NULL, name_str);
685                 node_children = SYSCTL_CHILDREN(ctx_oid);
686
687                 SYSCTL_ADD_QUAD(ctx, node_children,
688                         OID_AUTO, "intr_count",
689                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
690                         "intr_count");
691
692                 SYSCTL_ADD_UINT(ctx, node_children,
693                         OID_AUTO, "rx_free",
694                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
695                         ha->hw.sds[i].rx_free, "rx_free");
696         }
697
698         return;
699 }
700 static void
701 qlnx_add_drvr_rds_stats(qla_host_t *ha)
702 {
703         struct sysctl_ctx_list  *ctx;
704         struct sysctl_oid_list  *children;
705         struct sysctl_oid_list  *node_children;
706         struct sysctl_oid       *ctx_oid;
707         int                     i;
708         uint8_t                 name_str[16];
709
710         ctx = device_get_sysctl_ctx(ha->pci_dev);
711         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
712
713         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
714                         CTLFLAG_RD, NULL, "stats_drvr_rds");
715         children = SYSCTL_CHILDREN(ctx_oid);
716
717         for (i = 0; i < ha->hw.num_rds_rings; i++) {
718
719                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
720                 snprintf(name_str, sizeof(name_str), "%d", i);
721
722                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
723                         CTLFLAG_RD, NULL, name_str);
724                 node_children = SYSCTL_CHILDREN(ctx_oid);
725
726                 SYSCTL_ADD_QUAD(ctx, node_children,
727                         OID_AUTO, "count",
728                         CTLFLAG_RD, &ha->hw.rds[i].count,
729                         "count");
730
731                 SYSCTL_ADD_QUAD(ctx, node_children,
732                         OID_AUTO, "lro_pkt_count",
733                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
734                         "lro_pkt_count");
735
736                 SYSCTL_ADD_QUAD(ctx, node_children,
737                         OID_AUTO, "lro_bytes",
738                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
739                         "lro_bytes");
740         }
741
742         return;
743 }
744
745 static void
746 qlnx_add_drvr_tx_stats(qla_host_t *ha)
747 {
748         struct sysctl_ctx_list  *ctx;
749         struct sysctl_oid_list  *children;
750         struct sysctl_oid_list  *node_children;
751         struct sysctl_oid       *ctx_oid;
752         int                     i;
753         uint8_t                 name_str[16];
754
755         ctx = device_get_sysctl_ctx(ha->pci_dev);
756         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
757
758         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
759                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
760         children = SYSCTL_CHILDREN(ctx_oid);
761
762         for (i = 0; i < ha->hw.num_tx_rings; i++) {
763
764                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
765                 snprintf(name_str, sizeof(name_str), "%d", i);
766
767                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
768                         CTLFLAG_RD, NULL, name_str);
769                 node_children = SYSCTL_CHILDREN(ctx_oid);
770
771                 SYSCTL_ADD_QUAD(ctx, node_children,
772                         OID_AUTO, "count",
773                         CTLFLAG_RD, &ha->tx_ring[i].count,
774                         "count");
775
776 #ifdef QL_ENABLE_ISCSI_TLV
777                 SYSCTL_ADD_QUAD(ctx, node_children,
778                         OID_AUTO, "iscsi_pkt_count",
779                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
780                         "iscsi_pkt_count");
781 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
782         }
783
784         return;
785 }
786
787 static void
788 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
789 {
790         qlnx_add_drvr_sds_stats(ha);
791         qlnx_add_drvr_rds_stats(ha);
792         qlnx_add_drvr_tx_stats(ha);
793         return;
794 }
795
796 /*
797  * Name: ql_hw_add_sysctls
798  * Function: Add P3Plus specific sysctls
799  */
800 void
801 ql_hw_add_sysctls(qla_host_t *ha)
802 {
803         device_t        dev;
804
805         dev = ha->pci_dev;
806
807         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
808                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
809                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
810                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
811
812         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
813                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
814                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
815                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
816
817         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
818                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
819                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
820                 ha->hw.num_tx_rings, "Number of Transmit Rings");
821
822         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
823                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
824                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
825                 ha->txr_idx, "Tx Ring Used");
826
827         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
828                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
829                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
830                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
831
832         ha->hw.sds_cidx_thres = 32;
833         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
834                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
835                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
836                 ha->hw.sds_cidx_thres,
837                 "Number of SDS entries to process before updating"
838                 " SDS Ring Consumer Index");
839
840         ha->hw.rds_pidx_thres = 32;
841         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
842                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
843                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
844                 ha->hw.rds_pidx_thres,
845                 "Number of Rcv Rings Entries to post before updating"
846                 " RDS Ring Producer Index");
847
848         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
849         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
850                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
851                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
852                 &ha->hw.rcv_intr_coalesce,
853                 ha->hw.rcv_intr_coalesce,
854                 "Rcv Intr Coalescing Parameters\n"
855                 "\tbits 15:0 max packets\n"
856                 "\tbits 31:16 max micro-seconds to wait\n"
857                 "\tplease run\n"
858                 "\tifconfig <if> down && ifconfig <if> up\n"
859                 "\tto take effect \n");
860
861         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
862         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
863                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
864                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
865                 &ha->hw.xmt_intr_coalesce,
866                 ha->hw.xmt_intr_coalesce,
867                 "Xmt Intr Coalescing Parameters\n"
868                 "\tbits 15:0 max packets\n"
869                 "\tbits 31:16 max micro-seconds to wait\n"
870                 "\tplease run\n"
871                 "\tifconfig <if> down && ifconfig <if> up\n"
872                 "\tto take effect \n");
873
874         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
875                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
876                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
877                 (void *)ha, 0,
878                 qla_sysctl_port_cfg, "I",
879                         "Set Port Configuration if values below "
880                         "otherwise Get Port Configuration\n"
881                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
882                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
883                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
884                         " 1 = xmt only; 2 = rcv only;\n"
885                 );
886
887         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
888                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
889                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
890                 (void *)ha, 0,
891                 qla_sysctl_set_cam_search_mode, "I",
892                         "Set CAM Search Mode"
893                         "\t 1 = search mode internal\n"
894                         "\t 2 = search mode auto\n");
895
896         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
897                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
898                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
899                 (void *)ha, 0,
900                 qla_sysctl_get_cam_search_mode, "I",
901                         "Get CAM Search Mode"
902                         "\t 1 = search mode internal\n"
903                         "\t 2 = search mode auto\n");
904
905         ha->hw.enable_9kb = 1;
906
907         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
908                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
909                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
910                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
911
912         ha->hw.enable_hw_lro = 1;
913
914         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
915                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
916                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
917                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
918                 "\t 1 : Hardware LRO if LRO is enabled\n"
919                 "\t 0 : Software LRO if LRO is enabled\n"
920                 "\t Any change requires ifconfig down/up to take effect\n"
921                 "\t Note that LRO may be turned off/on via ifconfig\n");
922
923         ha->hw.mdump_active = 0;
924         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
925                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
926                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
927                 ha->hw.mdump_active,
928                 "Minidump retrieval is Active");
929
930         ha->hw.mdump_done = 0;
931         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
932                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
933                 OID_AUTO, "mdump_done", CTLFLAG_RW,
934                 &ha->hw.mdump_done, ha->hw.mdump_done,
935                 "Minidump has been done and available for retrieval");
936
937         ha->hw.mdump_capture_mask = 0xF;
938         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
939                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
940                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
941                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
942                 "Minidump capture mask");
943 #ifdef QL_DBG
944
945         ha->err_inject = 0;
946         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
947                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
948                 OID_AUTO, "err_inject",
949                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
950                 "Error to be injected\n"
951                 "\t\t\t 0: No Errors\n"
952                 "\t\t\t 1: rcv: rxb struct invalid\n"
953                 "\t\t\t 2: rcv: mp == NULL\n"
954                 "\t\t\t 3: lro: rxb struct invalid\n"
955                 "\t\t\t 4: lro: mp == NULL\n"
956                 "\t\t\t 5: rcv: num handles invalid\n"
957                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
958                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
959                 "\t\t\t 8: mbx: mailbox command failure\n"
960                 "\t\t\t 9: heartbeat failure\n"
961                 "\t\t\t A: temperature failure\n"
962                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
963
964         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
965                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
966                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
967                 (void *)ha, 0,
968                 qla_sysctl_stop_pegs, "I", "Peg Stop");
969
970 #endif /* #ifdef QL_DBG */
971
972         ha->hw.user_pri_nic = 0;
973         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
974                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
975                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
976                 ha->hw.user_pri_nic,
977                 "VLAN Tag User Priority for Normal Ethernet Packets");
978
979         ha->hw.user_pri_iscsi = 4;
980         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
981                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
982                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
983                 ha->hw.user_pri_iscsi,
984                 "VLAN Tag User Priority for iSCSI Packets");
985
986         qlnx_add_hw_stats_sysctls(ha);
987         qlnx_add_drvr_stats_sysctls(ha);
988
989         return;
990 }
991
992 void
993 ql_hw_link_status(qla_host_t *ha)
994 {
995         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
996
997         if (ha->hw.link_up) {
998                 device_printf(ha->pci_dev, "link Up\n");
999         } else {
1000                 device_printf(ha->pci_dev, "link Down\n");
1001         }
1002
1003         if (ha->hw.flags.fduplex) {
1004                 device_printf(ha->pci_dev, "Full Duplex\n");
1005         } else {
1006                 device_printf(ha->pci_dev, "Half Duplex\n");
1007         }
1008
1009         if (ha->hw.flags.autoneg) {
1010                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1011         } else {
1012                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1013         }
1014
1015         switch (ha->hw.link_speed) {
1016         case 0x710:
1017                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1018                 break;
1019
1020         case 0x3E8:
1021                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1022                 break;
1023
1024         case 0x64:
1025                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1026                 break;
1027
1028         default:
1029                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1030                 break;
1031         }
1032
1033         switch (ha->hw.module_type) {
1034
1035         case 0x01:
1036                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1037                 break;
1038
1039         case 0x02:
1040                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1041                 break;
1042
1043         case 0x03:
1044                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1045                 break;
1046
1047         case 0x04:
1048                 device_printf(ha->pci_dev,
1049                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1050                         ha->hw.cable_length);
1051                 break;
1052
1053         case 0x05:
1054                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1055                         " Limiting Copper(Compliant)[%d m]\n",
1056                         ha->hw.cable_length);
1057                 break;
1058
1059         case 0x06:
1060                 device_printf(ha->pci_dev,
1061                         "Module Type 10GE Passive Copper"
1062                         " (Legacy, Best Effort)[%d m]\n",
1063                         ha->hw.cable_length);
1064                 break;
1065
1066         case 0x07:
1067                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1068                 break;
1069
1070         case 0x08:
1071                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1072                 break;
1073
1074         case 0x09:
1075                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1076                 break;
1077
1078         case 0x0A:
1079                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1080                 break;
1081
1082         case 0x0B:
1083                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1084                         "(Legacy, Best Effort)\n");
1085                 break;
1086
1087         default:
1088                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1089                         ha->hw.module_type);
1090                 break;
1091         }
1092
1093         if (ha->hw.link_faults == 1)
1094                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1095 }
1096
1097 /*
1098  * Name: ql_free_dma
1099  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1100  */
1101 void
1102 ql_free_dma(qla_host_t *ha)
1103 {
1104         uint32_t i;
1105
1106         if (ha->hw.dma_buf.flags.sds_ring) {
1107                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1108                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1109                 }
1110                 ha->hw.dma_buf.flags.sds_ring = 0;
1111         }
1112
1113         if (ha->hw.dma_buf.flags.rds_ring) {
1114                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1115                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1116                 }
1117                 ha->hw.dma_buf.flags.rds_ring = 0;
1118         }
1119
1120         if (ha->hw.dma_buf.flags.tx_ring) {
1121                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1122                 ha->hw.dma_buf.flags.tx_ring = 0;
1123         }
1124         ql_minidump_free(ha);
1125 }
1126
1127 /*
1128  * Name: ql_alloc_dma
1129  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1130  */
1131 int
1132 ql_alloc_dma(qla_host_t *ha)
1133 {
1134         device_t                dev;
1135         uint32_t                i, j, size, tx_ring_size;
1136         qla_hw_t                *hw;
1137         qla_hw_tx_cntxt_t       *tx_cntxt;
1138         uint8_t                 *vaddr;
1139         bus_addr_t              paddr;
1140
1141         dev = ha->pci_dev;
1142
1143         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1144
1145         hw = &ha->hw;
1146         /*
1147          * Allocate Transmit Ring
1148          */
1149         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1150         size = (tx_ring_size * ha->hw.num_tx_rings);
1151
1152         hw->dma_buf.tx_ring.alignment = 8;
1153         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1154         
1155         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1156                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1157                 goto ql_alloc_dma_exit;
1158         }
1159
1160         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1161         paddr = hw->dma_buf.tx_ring.dma_addr;
1162         
1163         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1164                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1165
1166                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1167                 tx_cntxt->tx_ring_paddr = paddr;
1168
1169                 vaddr += tx_ring_size;
1170                 paddr += tx_ring_size;
1171         }
1172
1173         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1174                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1175
1176                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1177                 tx_cntxt->tx_cons_paddr = paddr;
1178
1179                 vaddr += sizeof (uint32_t);
1180                 paddr += sizeof (uint32_t);
1181         }
1182
1183         ha->hw.dma_buf.flags.tx_ring = 1;
1184
1185         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1186                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1187                 hw->dma_buf.tx_ring.dma_b));
1188         /*
1189          * Allocate Receive Descriptor Rings
1190          */
1191
1192         for (i = 0; i < hw->num_rds_rings; i++) {
1193
1194                 hw->dma_buf.rds_ring[i].alignment = 8;
1195                 hw->dma_buf.rds_ring[i].size =
1196                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1197
1198                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1199                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1200                                 __func__, i);
1201
1202                         for (j = 0; j < i; j++)
1203                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1204
1205                         goto ql_alloc_dma_exit;
1206                 }
1207                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1208                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1209                         hw->dma_buf.rds_ring[i].dma_b));
1210         }
1211
1212         hw->dma_buf.flags.rds_ring = 1;
1213
1214         /*
1215          * Allocate Status Descriptor Rings
1216          */
1217
1218         for (i = 0; i < hw->num_sds_rings; i++) {
1219                 hw->dma_buf.sds_ring[i].alignment = 8;
1220                 hw->dma_buf.sds_ring[i].size =
1221                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1222
1223                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1224                         device_printf(dev, "%s: sds ring alloc failed\n",
1225                                 __func__);
1226
1227                         for (j = 0; j < i; j++)
1228                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1229
1230                         goto ql_alloc_dma_exit;
1231                 }
1232                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1233                         __func__, i,
1234                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1235                         hw->dma_buf.sds_ring[i].dma_b));
1236         }
1237         for (i = 0; i < hw->num_sds_rings; i++) {
1238                 hw->sds[i].sds_ring_base =
1239                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1240         }
1241
1242         hw->dma_buf.flags.sds_ring = 1;
1243
1244         return 0;
1245
1246 ql_alloc_dma_exit:
1247         ql_free_dma(ha);
1248         return -1;
1249 }
1250
1251 #define Q8_MBX_MSEC_DELAY       5000
1252
1253 static int
1254 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1255         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1256 {
1257         uint32_t i;
1258         uint32_t data;
1259         int ret = 0;
1260
1261         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1262                 ret = -3;
1263                 ha->qla_initiate_recovery = 1;
1264                 goto exit_qla_mbx_cmd;
1265         }
1266
1267         if (no_pause)
1268                 i = 1000;
1269         else
1270                 i = Q8_MBX_MSEC_DELAY;
1271
1272         while (i) {
1273                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1274                 if (data == 0)
1275                         break;
1276                 if (no_pause) {
1277                         DELAY(1000);
1278                 } else {
1279                         qla_mdelay(__func__, 1);
1280                 }
1281                 i--;
1282         }
1283
1284         if (i == 0) {
1285                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1286                         __func__, data);
1287                 ret = -1;
1288                 ha->qla_initiate_recovery = 1;
1289                 goto exit_qla_mbx_cmd;
1290         }
1291
1292         for (i = 0; i < n_hmbox; i++) {
1293                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1294                 h_mbox++;
1295         }
1296
1297         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1298
1299
1300         i = Q8_MBX_MSEC_DELAY;
1301         while (i) {
1302                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1303
1304                 if ((data & 0x3) == 1) {
1305                         data = READ_REG32(ha, Q8_FW_MBOX0);
1306                         if ((data & 0xF000) != 0x8000)
1307                                 break;
1308                 }
1309                 if (no_pause) {
1310                         DELAY(1000);
1311                 } else {
1312                         qla_mdelay(__func__, 1);
1313                 }
1314                 i--;
1315         }
1316         if (i == 0) {
1317                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1318                         __func__, data);
1319                 ret = -2;
1320                 ha->qla_initiate_recovery = 1;
1321                 goto exit_qla_mbx_cmd;
1322         }
1323
1324         for (i = 0; i < n_fwmbox; i++) {
1325                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1326         }
1327
1328         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1329         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1330
1331 exit_qla_mbx_cmd:
1332         return (ret);
1333 }
1334
1335 int
1336 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1337         uint32_t *num_rcvq)
1338 {
1339         uint32_t *mbox, err;
1340         device_t dev = ha->pci_dev;
1341
1342         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1343
1344         mbox = ha->hw.mbox;
1345
1346         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1347
1348         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1349                 device_printf(dev, "%s: failed0\n", __func__);
1350                 return (-1);
1351         }
1352         err = mbox[0] >> 25; 
1353
1354         if (supports_9kb != NULL) {
1355                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1356                         *supports_9kb = 1;
1357                 else
1358                         *supports_9kb = 0;
1359         }
1360
1361         if (num_rcvq != NULL)
1362                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1363
1364         if ((err != 1) && (err != 0)) {
1365                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1366                 return (-1);
1367         }
1368         return 0;
1369 }
1370
1371 static int
1372 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1373         uint32_t create)
1374 {
1375         uint32_t i, err;
1376         device_t dev = ha->pci_dev;
1377         q80_config_intr_t *c_intr;
1378         q80_config_intr_rsp_t *c_intr_rsp;
1379
1380         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1381         bzero(c_intr, (sizeof (q80_config_intr_t)));
1382
1383         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1384
1385         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1386         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1387
1388         c_intr->nentries = num_intrs;
1389
1390         for (i = 0; i < num_intrs; i++) {
1391                 if (create) {
1392                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1393                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1394                 } else {
1395                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1396                         c_intr->intr[i].msix_index =
1397                                 ha->hw.intr_id[(start_idx + i)];
1398                 }
1399
1400                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1401         }
1402
1403         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1404                 (sizeof (q80_config_intr_t) >> 2),
1405                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1406                 device_printf(dev, "%s: failed0\n", __func__);
1407                 return (-1);
1408         }
1409
1410         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1411
1412         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1413
1414         if (err) {
1415                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1416                         c_intr_rsp->nentries);
1417
1418                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1419                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1420                                 __func__, i, 
1421                                 c_intr_rsp->intr[i].status,
1422                                 c_intr_rsp->intr[i].intr_id,
1423                                 c_intr_rsp->intr[i].intr_src);
1424                 }
1425
1426                 return (-1);
1427         }
1428
1429         for (i = 0; ((i < num_intrs) && create); i++) {
1430                 if (!c_intr_rsp->intr[i].status) {
1431                         ha->hw.intr_id[(start_idx + i)] =
1432                                 c_intr_rsp->intr[i].intr_id;
1433                         ha->hw.intr_src[(start_idx + i)] =
1434                                 c_intr_rsp->intr[i].intr_src;
1435                 }
1436         }
1437
1438         return (0);
1439 }
1440
1441 /*
1442  * Name: qla_config_rss
1443  * Function: Configure RSS for the context/interface.
1444  */
1445 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1446                         0x8030f20c77cb2da3ULL,
1447                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1448                         0x255b0ec26d5a56daULL };
1449
1450 static int
1451 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1452 {
1453         q80_config_rss_t        *c_rss;
1454         q80_config_rss_rsp_t    *c_rss_rsp;
1455         uint32_t                err, i;
1456         device_t                dev = ha->pci_dev;
1457
1458         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1459         bzero(c_rss, (sizeof (q80_config_rss_t)));
1460
1461         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1462
1463         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1464         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1465
1466         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1467                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1468         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1469         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1470
1471         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1472         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1473
1474         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1475
1476         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1477         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1478
1479         c_rss->cntxt_id = cntxt_id;
1480
1481         for (i = 0; i < 5; i++) {
1482                 c_rss->rss_key[i] = rss_key[i];
1483         }
1484
1485         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1486                 (sizeof (q80_config_rss_t) >> 2),
1487                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1488                 device_printf(dev, "%s: failed0\n", __func__);
1489                 return (-1);
1490         }
1491         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1492
1493         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1494
1495         if (err) {
1496                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1497                 return (-1);
1498         }
1499         return 0;
1500 }
1501
1502 static int
1503 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1504         uint16_t cntxt_id, uint8_t *ind_table)
1505 {
1506         q80_config_rss_ind_table_t      *c_rss_ind;
1507         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1508         uint32_t                        err;
1509         device_t                        dev = ha->pci_dev;
1510
1511         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1512                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1513                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1514                         start_idx, count);
1515                 return (-1);
1516         }
1517
1518         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1519         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1520
1521         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1522         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1523         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1524
1525         c_rss_ind->start_idx = start_idx;
1526         c_rss_ind->end_idx = start_idx + count - 1;
1527         c_rss_ind->cntxt_id = cntxt_id;
1528         bcopy(ind_table, c_rss_ind->ind_table, count);
1529
1530         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1531                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1532                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1533                 device_printf(dev, "%s: failed0\n", __func__);
1534                 return (-1);
1535         }
1536
1537         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1538         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1539
1540         if (err) {
1541                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1542                 return (-1);
1543         }
1544         return 0;
1545 }
1546
1547 /*
1548  * Name: qla_config_intr_coalesce
1549  * Function: Configure Interrupt Coalescing.
1550  */
1551 static int
1552 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1553         int rcv)
1554 {
1555         q80_config_intr_coalesc_t       *intrc;
1556         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1557         uint32_t                        err, i;
1558         device_t                        dev = ha->pci_dev;
1559         
1560         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1561         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1562
1563         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1564         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1565         intrc->count_version |= Q8_MBX_CMD_VERSION;
1566
1567         if (rcv) {
1568                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1569                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1570                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1571         } else {
1572                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1573                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1574                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1575         }
1576
1577         intrc->cntxt_id = cntxt_id;
1578
1579         if (tenable) {
1580                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1581                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1582
1583                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1584                         intrc->sds_ring_mask |= (1 << i);
1585                 }
1586                 intrc->ms_timeout = 1000;
1587         }
1588
1589         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1590                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1591                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1592                 device_printf(dev, "%s: failed0\n", __func__);
1593                 return (-1);
1594         }
1595         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1596
1597         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1598
1599         if (err) {
1600                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1601                 return (-1);
1602         }
1603         
1604         return 0;
1605 }
1606
1607
1608 /*
1609  * Name: qla_config_mac_addr
1610  * Function: binds a MAC address to the context/interface.
1611  *      Can be unicast, multicast or broadcast.
1612  */
1613 static int
1614 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1615         uint32_t num_mac)
1616 {
1617         q80_config_mac_addr_t           *cmac;
1618         q80_config_mac_addr_rsp_t       *cmac_rsp;
1619         uint32_t                        err;
1620         device_t                        dev = ha->pci_dev;
1621         int                             i;
1622         uint8_t                         *mac_cpy = mac_addr;
1623
1624         if (num_mac > Q8_MAX_MAC_ADDRS) {
1625                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1626                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1627                 return (-1);
1628         }
1629
1630         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1631         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1632
1633         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1634         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1635         cmac->count_version |= Q8_MBX_CMD_VERSION;
1636
1637         if (add_mac) 
1638                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1639         else
1640                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1641                 
1642         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1643
1644         cmac->nmac_entries = num_mac;
1645         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1646
1647         for (i = 0; i < num_mac; i++) {
1648                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1649                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1650         }
1651
1652         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1653                 (sizeof (q80_config_mac_addr_t) >> 2),
1654                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1655                 device_printf(dev, "%s: %s failed0\n", __func__,
1656                         (add_mac ? "Add" : "Del"));
1657                 return (-1);
1658         }
1659         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1660
1661         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1662
1663         if (err) {
1664                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1665                         (add_mac ? "Add" : "Del"), err);
1666                 for (i = 0; i < num_mac; i++) {
1667                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1668                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1669                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1670                         mac_cpy += ETHER_ADDR_LEN;
1671                 }
1672                 return (-1);
1673         }
1674         
1675         return 0;
1676 }
1677
1678
1679 /*
1680  * Name: qla_set_mac_rcv_mode
1681  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1682  */
1683 static int
1684 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1685 {
1686         q80_config_mac_rcv_mode_t       *rcv_mode;
1687         uint32_t                        err;
1688         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1689         device_t                        dev = ha->pci_dev;
1690
1691         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1692         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1693
1694         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1695         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1696         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1697
1698         rcv_mode->mode = mode;
1699
1700         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1701
1702         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1703                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1704                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1705                 device_printf(dev, "%s: failed0\n", __func__);
1706                 return (-1);
1707         }
1708         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1709
1710         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1711
1712         if (err) {
1713                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1714                 return (-1);
1715         }
1716         
1717         return 0;
1718 }
1719
1720 int
1721 ql_set_promisc(qla_host_t *ha)
1722 {
1723         int ret;
1724
1725         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1726         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1727         return (ret);
1728 }
1729
1730 void
1731 qla_reset_promisc(qla_host_t *ha)
1732 {
1733         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1734         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1735 }
1736
1737 int
1738 ql_set_allmulti(qla_host_t *ha)
1739 {
1740         int ret;
1741
1742         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1743         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1744         return (ret);
1745 }
1746
1747 void
1748 qla_reset_allmulti(qla_host_t *ha)
1749 {
1750         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1751         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1752 }
1753
1754 /*
1755  * Name: ql_set_max_mtu
1756  * Function:
1757  *      Sets the maximum transfer unit size for the specified rcv context.
1758  */
1759 int
1760 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1761 {
1762         device_t                dev;
1763         q80_set_max_mtu_t       *max_mtu;
1764         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1765         uint32_t                err;
1766
1767         dev = ha->pci_dev;
1768
1769         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1770         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1771
1772         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1773         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1774         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1775
1776         max_mtu->cntxt_id = cntxt_id;
1777         max_mtu->mtu = mtu;
1778
1779         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1780                 (sizeof (q80_set_max_mtu_t) >> 2),
1781                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1782                 device_printf(dev, "%s: failed\n", __func__);
1783                 return -1;
1784         }
1785
1786         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1787
1788         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1789
1790         if (err) {
1791                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1792         }
1793
1794         return 0;
1795 }
1796
1797 static int
1798 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1799 {
1800         device_t                dev;
1801         q80_link_event_t        *lnk;
1802         q80_link_event_rsp_t    *lnk_rsp;
1803         uint32_t                err;
1804
1805         dev = ha->pci_dev;
1806
1807         lnk = (q80_link_event_t *)ha->hw.mbox;
1808         bzero(lnk, (sizeof (q80_link_event_t)));
1809
1810         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1811         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1812         lnk->count_version |= Q8_MBX_CMD_VERSION;
1813
1814         lnk->cntxt_id = cntxt_id;
1815         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1816
1817         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1818                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1819                 device_printf(dev, "%s: failed\n", __func__);
1820                 return -1;
1821         }
1822
1823         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1824
1825         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1826
1827         if (err) {
1828                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1829         }
1830
1831         return 0;
1832 }
1833
1834 static int
1835 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1836 {
1837         device_t                dev;
1838         q80_config_fw_lro_t     *fw_lro;
1839         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1840         uint32_t                err;
1841
1842         dev = ha->pci_dev;
1843
1844         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1845         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1846
1847         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1848         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1849         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1850
1851         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1852         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1853
1854         fw_lro->cntxt_id = cntxt_id;
1855
1856         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1857                 (sizeof (q80_config_fw_lro_t) >> 2),
1858                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1859                 device_printf(dev, "%s: failed\n", __func__);
1860                 return -1;
1861         }
1862
1863         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1864
1865         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1866
1867         if (err) {
1868                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1869         }
1870
1871         return 0;
1872 }
1873
1874 static int
1875 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1876 {
1877         device_t                dev;
1878         q80_hw_config_t         *hw_config;
1879         q80_hw_config_rsp_t     *hw_config_rsp;
1880         uint32_t                err;
1881
1882         dev = ha->pci_dev;
1883
1884         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1885         bzero(hw_config, sizeof (q80_hw_config_t));
1886
1887         hw_config->opcode = Q8_MBX_HW_CONFIG;
1888         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1889         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1890
1891         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1892
1893         hw_config->u.set_cam_search_mode.mode = search_mode;
1894
1895         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1896                 (sizeof (q80_hw_config_t) >> 2),
1897                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1898                 device_printf(dev, "%s: failed\n", __func__);
1899                 return -1;
1900         }
1901         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1902
1903         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1904
1905         if (err) {
1906                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1907         }
1908
1909         return 0;
1910 }
1911
1912 static int
1913 qla_get_cam_search_mode(qla_host_t *ha)
1914 {
1915         device_t                dev;
1916         q80_hw_config_t         *hw_config;
1917         q80_hw_config_rsp_t     *hw_config_rsp;
1918         uint32_t                err;
1919
1920         dev = ha->pci_dev;
1921
1922         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1923         bzero(hw_config, sizeof (q80_hw_config_t));
1924
1925         hw_config->opcode = Q8_MBX_HW_CONFIG;
1926         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1927         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1928
1929         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1930
1931         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1932                 (sizeof (q80_hw_config_t) >> 2),
1933                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1934                 device_printf(dev, "%s: failed\n", __func__);
1935                 return -1;
1936         }
1937         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1938
1939         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1940
1941         if (err) {
1942                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1943         } else {
1944                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1945                         hw_config_rsp->u.get_cam_search_mode.mode);
1946         }
1947
1948         return 0;
1949 }
1950
1951 static int
1952 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1953 {
1954         device_t                dev;
1955         q80_get_stats_t         *stat;
1956         q80_get_stats_rsp_t     *stat_rsp;
1957         uint32_t                err;
1958
1959         dev = ha->pci_dev;
1960
1961         stat = (q80_get_stats_t *)ha->hw.mbox;
1962         bzero(stat, (sizeof (q80_get_stats_t)));
1963
1964         stat->opcode = Q8_MBX_GET_STATS;
1965         stat->count_version = 2;
1966         stat->count_version |= Q8_MBX_CMD_VERSION;
1967
1968         stat->cmd = cmd;
1969
1970         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1971                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1972                 device_printf(dev, "%s: failed\n", __func__);
1973                 return -1;
1974         }
1975
1976         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1977
1978         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1979
1980         if (err) {
1981                 return -1;
1982         }
1983
1984         return 0;
1985 }
1986
1987 void
1988 ql_get_stats(qla_host_t *ha)
1989 {
1990         q80_get_stats_rsp_t     *stat_rsp;
1991         q80_mac_stats_t         *mstat;
1992         q80_xmt_stats_t         *xstat;
1993         q80_rcv_stats_t         *rstat;
1994         uint32_t                cmd;
1995         int                     i;
1996         struct ifnet *ifp = ha->ifp;
1997
1998         if (ifp == NULL)
1999                 return;
2000
2001         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2002                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2003                 return;
2004         }
2005
2006         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2007                 QLA_UNLOCK(ha, __func__);
2008                 return;
2009         }
2010
2011         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2012         /*
2013          * Get MAC Statistics
2014          */
2015         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2016 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2017
2018         cmd |= ((ha->pci_func & 0x1) << 16);
2019
2020         if (ha->qla_watchdog_pause)
2021                 goto ql_get_stats_exit;
2022
2023         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2024                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2025                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2026         } else {
2027                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2028                         __func__, ha->hw.mbox[0]);
2029         }
2030         /*
2031          * Get RCV Statistics
2032          */
2033         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2034 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2035         cmd |= (ha->hw.rcv_cntxt_id << 16);
2036
2037         if (ha->qla_watchdog_pause)
2038                 goto ql_get_stats_exit;
2039
2040         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2041                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2042                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2043         } else {
2044                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2045                         __func__, ha->hw.mbox[0]);
2046         }
2047
2048         if (ha->qla_watchdog_pause)
2049                 goto ql_get_stats_exit;
2050         /*
2051          * Get XMT Statistics
2052          */
2053         for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2054                 i++) {
2055                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2056 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2057                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2058
2059                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2060                         == 0) {
2061                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2062                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2063                 } else {
2064                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2065                                 __func__, ha->hw.mbox[0]);
2066                 }
2067         }
2068
2069 ql_get_stats_exit:
2070         QLA_UNLOCK(ha, __func__);
2071
2072         return;
2073 }
2074
2075 /*
2076  * Name: qla_tx_tso
2077  * Function: Checks if the packet to be transmitted is a candidate for
2078  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2079  *      Ring Structure are plugged in.
2080  */
2081 static int
2082 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2083 {
2084         struct ether_vlan_header *eh;
2085         struct ip *ip = NULL;
2086         struct ip6_hdr *ip6 = NULL;
2087         struct tcphdr *th = NULL;
2088         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2089         uint16_t etype, opcode, offload = 1;
2090         device_t dev;
2091
2092         dev = ha->pci_dev;
2093
2094
2095         eh = mtod(mp, struct ether_vlan_header *);
2096
2097         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2098                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2099                 etype = ntohs(eh->evl_proto);
2100         } else {
2101                 ehdrlen = ETHER_HDR_LEN;
2102                 etype = ntohs(eh->evl_encap_proto);
2103         }
2104
2105         hdrlen = 0;
2106
2107         switch (etype) {
2108                 case ETHERTYPE_IP:
2109
2110                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2111                                         sizeof(struct tcphdr);
2112
2113                         if (mp->m_len < tcp_opt_off) {
2114                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2115                                 ip = (struct ip *)(hdr + ehdrlen);
2116                         } else {
2117                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2118                         }
2119
2120                         ip_hlen = ip->ip_hl << 2;
2121                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2122
2123                                 
2124                         if ((ip->ip_p != IPPROTO_TCP) ||
2125                                 (ip_hlen != sizeof (struct ip))){
2126                                 /* IP Options are not supported */
2127
2128                                 offload = 0;
2129                         } else
2130                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2131
2132                 break;
2133
2134                 case ETHERTYPE_IPV6:
2135
2136                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2137                                         sizeof (struct tcphdr);
2138
2139                         if (mp->m_len < tcp_opt_off) {
2140                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2141                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2142                         } else {
2143                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2144                         }
2145
2146                         ip_hlen = sizeof(struct ip6_hdr);
2147                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2148
2149                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2150                                 //device_printf(dev, "%s: ipv6\n", __func__);
2151                                 offload = 0;
2152                         } else
2153                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2154                 break;
2155
2156                 default:
2157                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2158                         offload = 0;
2159                 break;
2160         }
2161
2162         if (!offload)
2163                 return (-1);
2164
2165         tcp_hlen = th->th_off << 2;
2166         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2167
2168         if (mp->m_len < hdrlen) {
2169                 if (mp->m_len < tcp_opt_off) {
2170                         if (tcp_hlen > sizeof(struct tcphdr)) {
2171                                 m_copydata(mp, tcp_opt_off,
2172                                         (tcp_hlen - sizeof(struct tcphdr)),
2173                                         &hdr[tcp_opt_off]);
2174                         }
2175                 } else {
2176                         m_copydata(mp, 0, hdrlen, hdr);
2177                 }
2178         }
2179
2180         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2181
2182         tx_cmd->flags_opcode = opcode ;
2183         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2184         tx_cmd->total_hdr_len = hdrlen;
2185
2186         /* Check for Multicast least significant bit of MSB == 1 */
2187         if (eh->evl_dhost[0] & 0x01) {
2188                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2189         }
2190
2191         if (mp->m_len < hdrlen) {
2192                 printf("%d\n", hdrlen);
2193                 return (1);
2194         }
2195
2196         return (0);
2197 }
2198
2199 /*
2200  * Name: qla_tx_chksum
2201  * Function: Checks if the packet to be transmitted is a candidate for
2202  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2203  *      Ring Structure are plugged in.
2204  */
2205 static int
2206 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2207         uint32_t *tcp_hdr_off)
2208 {
2209         struct ether_vlan_header *eh;
2210         struct ip *ip;
2211         struct ip6_hdr *ip6;
2212         uint32_t ehdrlen, ip_hlen;
2213         uint16_t etype, opcode, offload = 1;
2214         device_t dev;
2215         uint8_t buf[sizeof(struct ip6_hdr)];
2216
2217         dev = ha->pci_dev;
2218
2219         *op_code = 0;
2220
2221         if ((mp->m_pkthdr.csum_flags &
2222                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2223                 return (-1);
2224
2225         eh = mtod(mp, struct ether_vlan_header *);
2226
2227         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2228                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2229                 etype = ntohs(eh->evl_proto);
2230         } else {
2231                 ehdrlen = ETHER_HDR_LEN;
2232                 etype = ntohs(eh->evl_encap_proto);
2233         }
2234
2235                 
2236         switch (etype) {
2237                 case ETHERTYPE_IP:
2238                         ip = (struct ip *)(mp->m_data + ehdrlen);
2239
2240                         ip_hlen = sizeof (struct ip);
2241
2242                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2243                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2244                                 ip = (struct ip *)buf;
2245                         }
2246
2247                         if (ip->ip_p == IPPROTO_TCP)
2248                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2249                         else if (ip->ip_p == IPPROTO_UDP)
2250                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2251                         else {
2252                                 //device_printf(dev, "%s: ipv4\n", __func__);
2253                                 offload = 0;
2254                         }
2255                 break;
2256
2257                 case ETHERTYPE_IPV6:
2258                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2259
2260                         ip_hlen = sizeof(struct ip6_hdr);
2261
2262                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2263                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2264                                         buf);
2265                                 ip6 = (struct ip6_hdr *)buf;
2266                         }
2267
2268                         if (ip6->ip6_nxt == IPPROTO_TCP)
2269                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2270                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2271                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2272                         else {
2273                                 //device_printf(dev, "%s: ipv6\n", __func__);
2274                                 offload = 0;
2275                         }
2276                 break;
2277
2278                 default:
2279                         offload = 0;
2280                 break;
2281         }
2282         if (!offload)
2283                 return (-1);
2284
2285         *op_code = opcode;
2286         *tcp_hdr_off = (ip_hlen + ehdrlen);
2287
2288         return (0);
2289 }
2290
2291 #define QLA_TX_MIN_FREE 2
2292 /*
2293  * Name: ql_hw_send
2294  * Function: Transmits a packet. It first checks if the packet is a
2295  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2296  *      offload. If either of these creteria are not met, it is transmitted
2297  *      as a regular ethernet frame.
2298  */
2299 int
2300 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2301         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2302 {
2303         struct ether_vlan_header *eh;
2304         qla_hw_t *hw = &ha->hw;
2305         q80_tx_cmd_t *tx_cmd, tso_cmd;
2306         bus_dma_segment_t *c_seg;
2307         uint32_t num_tx_cmds, hdr_len = 0;
2308         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2309         device_t dev;
2310         int i, ret;
2311         uint8_t *src = NULL, *dst = NULL;
2312         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2313         uint32_t op_code = 0;
2314         uint32_t tcp_hdr_off = 0;
2315
2316         dev = ha->pci_dev;
2317
2318         /*
2319          * Always make sure there is atleast one empty slot in the tx_ring
2320          * tx_ring is considered full when there only one entry available
2321          */
2322         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2323
2324         total_length = mp->m_pkthdr.len;
2325         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2326                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2327                         __func__, total_length);
2328                 return (EINVAL);
2329         }
2330         eh = mtod(mp, struct ether_vlan_header *);
2331
2332         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2333
2334                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2335
2336                 src = frame_hdr;
2337                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2338
2339                 if (!(ret & ~1)) {
2340                         /* find the additional tx_cmd descriptors required */
2341
2342                         if (mp->m_flags & M_VLANTAG)
2343                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2344
2345                         hdr_len = tso_cmd.total_hdr_len;
2346
2347                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2348                         bytes = QL_MIN(bytes, hdr_len);
2349
2350                         num_tx_cmds++;
2351                         hdr_len -= bytes;
2352
2353                         while (hdr_len) {
2354                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2355                                 hdr_len -= bytes;
2356                                 num_tx_cmds++;
2357                         }
2358                         hdr_len = tso_cmd.total_hdr_len;
2359
2360                         if (ret == 0)
2361                                 src = (uint8_t *)eh;
2362                 } else 
2363                         return (EINVAL);
2364         } else {
2365                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2366         }
2367
2368         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2369                 ql_hw_tx_done_locked(ha, txr_idx);
2370                 if (hw->tx_cntxt[txr_idx].txr_free <=
2371                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2372                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2373                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2374                                 __func__));
2375                         return (-1);
2376                 }
2377         }
2378
2379         for (i = 0; i < num_tx_cmds; i++) {
2380                 int j;
2381
2382                 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2383
2384                 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2385                         QL_ASSERT(ha, 0, \
2386                                 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2387                                 __func__, __LINE__, txr_idx, j,\
2388                                 ha->tx_ring[txr_idx].tx_buf[j].m_head));
2389                         return (EINVAL);
2390                 }
2391         }
2392
2393         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2394
2395         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2396
2397                 if (nsegs > ha->hw.max_tx_segs)
2398                         ha->hw.max_tx_segs = nsegs;
2399
2400                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2401
2402                 if (op_code) {
2403                         tx_cmd->flags_opcode = op_code;
2404                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2405
2406                 } else {
2407                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2408                 }
2409         } else {
2410                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2411                 ha->tx_tso_frames++;
2412         }
2413
2414         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2415                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2416
2417                 if (iscsi_pdu)
2418                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2419
2420         } else if (mp->m_flags & M_VLANTAG) {
2421
2422                 if (hdr_len) { /* TSO */
2423                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2424                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2425                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2426                 } else
2427                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2428
2429                 ha->hw_vlan_tx_frames++;
2430                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2431
2432                 if (iscsi_pdu) {
2433                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2434                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2435                 }
2436         }
2437
2438
2439         tx_cmd->n_bufs = (uint8_t)nsegs;
2440         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2441         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2442         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2443
2444         c_seg = segs;
2445
2446         while (1) {
2447                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2448
2449                         switch (i) {
2450                         case 0:
2451                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2452                                 tx_cmd->buf1_len = c_seg->ds_len;
2453                                 break;
2454
2455                         case 1:
2456                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2457                                 tx_cmd->buf2_len = c_seg->ds_len;
2458                                 break;
2459
2460                         case 2:
2461                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2462                                 tx_cmd->buf3_len = c_seg->ds_len;
2463                                 break;
2464
2465                         case 3:
2466                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2467                                 tx_cmd->buf4_len = c_seg->ds_len;
2468                                 break;
2469                         }
2470
2471                         c_seg++;
2472                         nsegs--;
2473                 }
2474
2475                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2476                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2477                                 (NUM_TX_DESCRIPTORS - 1);
2478                 tx_cmd_count++;
2479
2480                 if (!nsegs)
2481                         break;
2482                 
2483                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2484                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2485         }
2486
2487         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2488
2489                 /* TSO : Copy the header in the following tx cmd descriptors */
2490
2491                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2492
2493                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2494                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2495
2496                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2497                 bytes = QL_MIN(bytes, hdr_len);
2498
2499                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2500
2501                 if (mp->m_flags & M_VLANTAG) {
2502                         /* first copy the src/dst MAC addresses */
2503                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2504                         dst += (ETHER_ADDR_LEN * 2);
2505                         src += (ETHER_ADDR_LEN * 2);
2506                         
2507                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2508                         dst += 2;
2509                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2510                         dst += 2;
2511
2512                         /* bytes left in src header */
2513                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2514                                         ETHER_VLAN_ENCAP_LEN);
2515
2516                         /* bytes left in TxCmd Entry */
2517                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2518
2519
2520                         bcopy(src, dst, bytes);
2521                         src += bytes;
2522                         hdr_len -= bytes;
2523                 } else {
2524                         bcopy(src, dst, bytes);
2525                         src += bytes;
2526                         hdr_len -= bytes;
2527                 }
2528
2529                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2530                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2531                                         (NUM_TX_DESCRIPTORS - 1);
2532                 tx_cmd_count++;
2533                 
2534                 while (hdr_len) {
2535                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2536                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2537
2538                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2539
2540                         bcopy(src, tx_cmd, bytes);
2541                         src += bytes;
2542                         hdr_len -= bytes;
2543
2544                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2545                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2546                                         (NUM_TX_DESCRIPTORS - 1);
2547                         tx_cmd_count++;
2548                 }
2549         }
2550
2551         hw->tx_cntxt[txr_idx].txr_free =
2552                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2553
2554         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2555                 txr_idx);
2556         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2557
2558         return (0);
2559 }
2560
2561
2562
2563 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2564 static int
2565 qla_config_rss_ind_table(qla_host_t *ha)
2566 {
2567         uint32_t i, count;
2568         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2569
2570
2571         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2572                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2573         }
2574
2575         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2576                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2577
2578                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2579                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2580                 } else {
2581                         count = Q8_CONFIG_IND_TBL_SIZE;
2582                 }
2583
2584                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2585                         rss_ind_tbl))
2586                         return (-1);
2587         }
2588
2589         return (0);
2590 }
2591
2592 static int
2593 qla_config_soft_lro(qla_host_t *ha)
2594 {
2595         int i;
2596         qla_hw_t *hw = &ha->hw;
2597         struct lro_ctrl *lro;
2598
2599         for (i = 0; i < hw->num_sds_rings; i++) {
2600                 lro = &hw->sds[i].lro;
2601
2602                 bzero(lro, sizeof(struct lro_ctrl));
2603
2604 #if (__FreeBSD_version >= 1100101)
2605                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2606                         device_printf(ha->pci_dev,
2607                                 "%s: tcp_lro_init_args [%d] failed\n",
2608                                 __func__, i);
2609                         return (-1);
2610                 }
2611 #else
2612                 if (tcp_lro_init(lro)) {
2613                         device_printf(ha->pci_dev,
2614                                 "%s: tcp_lro_init [%d] failed\n",
2615                                 __func__, i);
2616                         return (-1);
2617                 }
2618 #endif /* #if (__FreeBSD_version >= 1100101) */
2619
2620                 lro->ifp = ha->ifp;
2621         }
2622
2623         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2624         return (0);
2625 }
2626
2627 static void
2628 qla_drain_soft_lro(qla_host_t *ha)
2629 {
2630         int i;
2631         qla_hw_t *hw = &ha->hw;
2632         struct lro_ctrl *lro;
2633
2634         for (i = 0; i < hw->num_sds_rings; i++) {
2635                 lro = &hw->sds[i].lro;
2636
2637 #if (__FreeBSD_version >= 1100101)
2638                 tcp_lro_flush_all(lro);
2639 #else
2640                 struct lro_entry *queued;
2641
2642                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2643                         queued = SLIST_FIRST(&lro->lro_active);
2644                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2645                         tcp_lro_flush(lro, queued);
2646                 }
2647 #endif /* #if (__FreeBSD_version >= 1100101) */
2648         }
2649
2650         return;
2651 }
2652
2653 static void
2654 qla_free_soft_lro(qla_host_t *ha)
2655 {
2656         int i;
2657         qla_hw_t *hw = &ha->hw;
2658         struct lro_ctrl *lro;
2659
2660         for (i = 0; i < hw->num_sds_rings; i++) {
2661                 lro = &hw->sds[i].lro;
2662                 tcp_lro_free(lro);
2663         }
2664
2665         return;
2666 }
2667
2668
2669 /*
2670  * Name: ql_del_hw_if
2671  * Function: Destroys the hardware specific entities corresponding to an
2672  *      Ethernet Interface
2673  */
2674 void
2675 ql_del_hw_if(qla_host_t *ha)
2676 {
2677         uint32_t i;
2678         uint32_t num_msix;
2679
2680         (void)qla_stop_nic_func(ha);
2681
2682         qla_del_rcv_cntxt(ha);
2683
2684         qla_del_xmt_cntxt(ha);
2685
2686         if (ha->hw.flags.init_intr_cnxt) {
2687                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2688
2689                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2690                                 num_msix = Q8_MAX_INTR_VECTORS;
2691                         else
2692                                 num_msix = ha->hw.num_sds_rings - i;
2693                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2694
2695                         i += num_msix;
2696                 }
2697
2698                 ha->hw.flags.init_intr_cnxt = 0;
2699         }
2700
2701         if (ha->hw.enable_soft_lro) {
2702                 qla_drain_soft_lro(ha);
2703                 qla_free_soft_lro(ha);
2704         }
2705
2706         return;
2707 }
2708
2709 void
2710 qla_confirm_9kb_enable(qla_host_t *ha)
2711 {
2712         uint32_t supports_9kb = 0;
2713
2714         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2715
2716         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2717         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2718         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2719
2720         qla_get_nic_partition(ha, &supports_9kb, NULL);
2721
2722         if (!supports_9kb)
2723                 ha->hw.enable_9kb = 0;
2724
2725         return;
2726 }
2727
2728 /*
2729  * Name: ql_init_hw_if
2730  * Function: Creates the hardware specific entities corresponding to an
2731  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2732  *      corresponding to the interface. Enables LRO if allowed.
2733  */
2734 int
2735 ql_init_hw_if(qla_host_t *ha)
2736 {
2737         device_t        dev;
2738         uint32_t        i;
2739         uint8_t         bcast_mac[6];
2740         qla_rdesc_t     *rdesc;
2741         uint32_t        num_msix;
2742
2743         dev = ha->pci_dev;
2744
2745         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2746                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2747                         ha->hw.dma_buf.sds_ring[i].size);
2748         }
2749
2750         for (i = 0; i < ha->hw.num_sds_rings; ) {
2751
2752                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2753                         num_msix = Q8_MAX_INTR_VECTORS;
2754                 else
2755                         num_msix = ha->hw.num_sds_rings - i;
2756
2757                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2758
2759                         if (i > 0) {
2760
2761                                 num_msix = i;
2762
2763                                 for (i = 0; i < num_msix; ) {
2764                                         qla_config_intr_cntxt(ha, i,
2765                                                 Q8_MAX_INTR_VECTORS, 0);
2766                                         i += Q8_MAX_INTR_VECTORS;
2767                                 }
2768                         }
2769                         return (-1);
2770                 }
2771
2772                 i = i + num_msix;
2773         }
2774
2775         ha->hw.flags.init_intr_cnxt = 1;
2776
2777         /*
2778          * Create Receive Context
2779          */
2780         if (qla_init_rcv_cntxt(ha)) {
2781                 return (-1);
2782         }
2783
2784         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2785                 rdesc = &ha->hw.rds[i];
2786                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2787                 rdesc->rx_in = 0;
2788                 /* Update the RDS Producer Indices */
2789                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2790                         rdesc->rx_next);
2791         }
2792
2793         /*
2794          * Create Transmit Context
2795          */
2796         if (qla_init_xmt_cntxt(ha)) {
2797                 qla_del_rcv_cntxt(ha);
2798                 return (-1);
2799         }
2800         ha->hw.max_tx_segs = 0;
2801
2802         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2803                 return(-1);
2804
2805         ha->hw.flags.unicast_mac = 1;
2806
2807         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2808         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2809
2810         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2811                 return (-1);
2812
2813         ha->hw.flags.bcast_mac = 1;
2814
2815         /*
2816          * program any cached multicast addresses
2817          */
2818         if (qla_hw_add_all_mcast(ha))
2819                 return (-1);
2820
2821         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2822                 return (-1);
2823
2824         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2825                 return (-1);
2826
2827         if (qla_config_rss_ind_table(ha))
2828                 return (-1);
2829
2830         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2831                 return (-1);
2832
2833         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2834                 return (-1);
2835
2836         if (ha->ifp->if_capenable & IFCAP_LRO) {
2837                 if (ha->hw.enable_hw_lro) {
2838                         ha->hw.enable_soft_lro = 0;
2839
2840                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2841                                 return (-1);
2842                 } else {
2843                         ha->hw.enable_soft_lro = 1;
2844
2845                         if (qla_config_soft_lro(ha))
2846                                 return (-1);
2847                 }
2848         }
2849
2850         if (qla_init_nic_func(ha))
2851                 return (-1);
2852
2853         if (qla_query_fw_dcbx_caps(ha))
2854                 return (-1);
2855
2856         for (i = 0; i < ha->hw.num_sds_rings; i++)
2857                 QL_ENABLE_INTERRUPTS(ha, i);
2858
2859         return (0);
2860 }
2861
2862 static int
2863 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2864 {
2865         device_t                dev = ha->pci_dev;
2866         q80_rq_map_sds_to_rds_t *map_rings;
2867         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2868         uint32_t                i, err;
2869         qla_hw_t                *hw = &ha->hw;
2870
2871         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2872         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2873
2874         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2875         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2876         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2877
2878         map_rings->cntxt_id = hw->rcv_cntxt_id;
2879         map_rings->num_rings = num_idx;
2880
2881         for (i = 0; i < num_idx; i++) {
2882                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2883                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2884         }
2885
2886         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2887                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2888                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2889                 device_printf(dev, "%s: failed0\n", __func__);
2890                 return (-1);
2891         }
2892
2893         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2894
2895         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2896
2897         if (err) {
2898                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2899                 return (-1);
2900         }
2901
2902         return (0);
2903 }
2904
2905 /*
2906  * Name: qla_init_rcv_cntxt
2907  * Function: Creates the Receive Context.
2908  */
2909 static int
2910 qla_init_rcv_cntxt(qla_host_t *ha)
2911 {
2912         q80_rq_rcv_cntxt_t      *rcntxt;
2913         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2914         q80_stat_desc_t         *sdesc;
2915         int                     i, j;
2916         qla_hw_t                *hw = &ha->hw;
2917         device_t                dev;
2918         uint32_t                err;
2919         uint32_t                rcntxt_sds_rings;
2920         uint32_t                rcntxt_rds_rings;
2921         uint32_t                max_idx;
2922
2923         dev = ha->pci_dev;
2924
2925         /*
2926          * Create Receive Context
2927          */
2928
2929         for (i = 0; i < hw->num_sds_rings; i++) {
2930                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2931
2932                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2933                         sdesc->data[0] = 1ULL;
2934                         sdesc->data[1] = 1ULL;
2935                 }
2936         }
2937
2938         rcntxt_sds_rings = hw->num_sds_rings;
2939         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2940                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2941
2942         rcntxt_rds_rings = hw->num_rds_rings;
2943
2944         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2945                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2946
2947         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2948         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2949
2950         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2951         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2952         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2953
2954         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2955                         Q8_RCV_CNTXT_CAP0_LRO |
2956                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2957                         Q8_RCV_CNTXT_CAP0_RSS |
2958                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2959
2960         if (ha->hw.enable_9kb)
2961                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2962         else
2963                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2964
2965         if (ha->hw.num_rds_rings > 1) {
2966                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2967                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2968         } else
2969                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2970
2971         rcntxt->nsds_rings = rcntxt_sds_rings;
2972
2973         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2974
2975         rcntxt->rcv_vpid = 0;
2976
2977         for (i = 0; i <  rcntxt_sds_rings; i++) {
2978                 rcntxt->sds[i].paddr =
2979                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2980                 rcntxt->sds[i].size =
2981                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2982                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2983                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2984         }
2985
2986         for (i = 0; i <  rcntxt_rds_rings; i++) {
2987                 rcntxt->rds[i].paddr_std =
2988                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2989
2990                 if (ha->hw.enable_9kb)
2991                         rcntxt->rds[i].std_bsize =
2992                                 qla_host_to_le64(MJUM9BYTES);
2993                 else
2994                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2995
2996                 rcntxt->rds[i].std_nentries =
2997                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2998         }
2999
3000         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3001                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
3002                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3003                 device_printf(dev, "%s: failed0\n", __func__);
3004                 return (-1);
3005         }
3006
3007         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3008
3009         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3010
3011         if (err) {
3012                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3013                 return (-1);
3014         }
3015
3016         for (i = 0; i <  rcntxt_sds_rings; i++) {
3017                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3018         }
3019
3020         for (i = 0; i <  rcntxt_rds_rings; i++) {
3021                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3022         }
3023
3024         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3025
3026         ha->hw.flags.init_rx_cnxt = 1;
3027
3028         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3029
3030                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3031
3032                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3033                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3034                         else
3035                                 max_idx = hw->num_sds_rings - i;
3036
3037                         err = qla_add_rcv_rings(ha, i, max_idx);
3038                         if (err)
3039                                 return -1;
3040
3041                         i += max_idx;
3042                 }
3043         }
3044
3045         if (hw->num_rds_rings > 1) {
3046
3047                 for (i = 0; i < hw->num_rds_rings; ) {
3048
3049                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3050                                 max_idx = MAX_SDS_TO_RDS_MAP;
3051                         else
3052                                 max_idx = hw->num_rds_rings - i;
3053
3054                         err = qla_map_sds_to_rds(ha, i, max_idx);
3055                         if (err)
3056                                 return -1;
3057
3058                         i += max_idx;
3059                 }
3060         }
3061
3062         return (0);
3063 }
3064
3065 static int
3066 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3067 {
3068         device_t                dev = ha->pci_dev;
3069         q80_rq_add_rcv_rings_t  *add_rcv;
3070         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3071         uint32_t                i,j, err;
3072         qla_hw_t                *hw = &ha->hw;
3073
3074         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3075         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3076
3077         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3078         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3079         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3080
3081         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3082         add_rcv->nsds_rings = nsds;
3083         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3084
3085         for (i = 0; i <  nsds; i++) {
3086
3087                 j = i + sds_idx;
3088
3089                 add_rcv->sds[i].paddr =
3090                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3091
3092                 add_rcv->sds[i].size =
3093                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3094
3095                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3096                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3097
3098         }
3099
3100         for (i = 0; (i <  nsds); i++) {
3101                 j = i + sds_idx;
3102
3103                 add_rcv->rds[i].paddr_std =
3104                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3105
3106                 if (ha->hw.enable_9kb)
3107                         add_rcv->rds[i].std_bsize =
3108                                 qla_host_to_le64(MJUM9BYTES);
3109                 else
3110                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3111
3112                 add_rcv->rds[i].std_nentries =
3113                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3114         }
3115
3116
3117         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3118                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3119                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3120                 device_printf(dev, "%s: failed0\n", __func__);
3121                 return (-1);
3122         }
3123
3124         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3125
3126         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3127
3128         if (err) {
3129                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3130                 return (-1);
3131         }
3132
3133         for (i = 0; i < nsds; i++) {
3134                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3135         }
3136
3137         for (i = 0; i < nsds; i++) {
3138                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3139         }
3140
3141         return (0);
3142 }
3143
3144 /*
3145  * Name: qla_del_rcv_cntxt
3146  * Function: Destroys the Receive Context.
3147  */
3148 static void
3149 qla_del_rcv_cntxt(qla_host_t *ha)
3150 {
3151         device_t                        dev = ha->pci_dev;
3152         q80_rcv_cntxt_destroy_t         *rcntxt;
3153         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3154         uint32_t                        err;
3155         uint8_t                         bcast_mac[6];
3156
3157         if (!ha->hw.flags.init_rx_cnxt)
3158                 return;
3159
3160         if (qla_hw_del_all_mcast(ha))
3161                 return;
3162
3163         if (ha->hw.flags.bcast_mac) {
3164
3165                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3166                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3167
3168                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3169                         return;
3170                 ha->hw.flags.bcast_mac = 0;
3171
3172         }
3173
3174         if (ha->hw.flags.unicast_mac) {
3175                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3176                         return;
3177                 ha->hw.flags.unicast_mac = 0;
3178         }
3179
3180         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3181         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3182
3183         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3184         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3185         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3186
3187         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3188
3189         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3190                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3191                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3192                 device_printf(dev, "%s: failed0\n", __func__);
3193                 return;
3194         }
3195         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3196
3197         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3198
3199         if (err) {
3200                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3201         }
3202
3203         ha->hw.flags.init_rx_cnxt = 0;
3204         return;
3205 }
3206
3207 /*
3208  * Name: qla_init_xmt_cntxt
3209  * Function: Creates the Transmit Context.
3210  */
3211 static int
3212 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3213 {
3214         device_t                dev;
3215         qla_hw_t                *hw = &ha->hw;
3216         q80_rq_tx_cntxt_t       *tcntxt;
3217         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3218         uint32_t                err;
3219         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3220         uint32_t                intr_idx;
3221
3222         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3223
3224         dev = ha->pci_dev;
3225
3226         /*
3227          * Create Transmit Context
3228          */
3229         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3230         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3231
3232         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3233         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3234         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3235
3236         intr_idx = txr_idx;
3237
3238 #ifdef QL_ENABLE_ISCSI_TLV
3239
3240         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3241                                 Q8_TX_CNTXT_CAP0_TC;
3242
3243         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3244                 tcntxt->traffic_class = 1;
3245         }
3246
3247         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3248
3249 #else
3250         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3251
3252 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3253
3254         tcntxt->ntx_rings = 1;
3255
3256         tcntxt->tx_ring[0].paddr =
3257                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3258         tcntxt->tx_ring[0].tx_consumer =
3259                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3260         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3261
3262         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3263         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3264
3265         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3266         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3267         *(hw_tx_cntxt->tx_cons) = 0;
3268
3269         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3270                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3271                 ha->hw.mbox,
3272                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3273                 device_printf(dev, "%s: failed0\n", __func__);
3274                 return (-1);
3275         }
3276         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3277
3278         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3279
3280         if (err) {
3281                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3282                 return -1;
3283         }
3284
3285         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3286         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3287
3288         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3289                 return (-1);
3290
3291         return (0);
3292 }
3293
3294
3295 /*
3296  * Name: qla_del_xmt_cntxt
3297  * Function: Destroys the Transmit Context.
3298  */
3299 static int
3300 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3301 {
3302         device_t                        dev = ha->pci_dev;
3303         q80_tx_cntxt_destroy_t          *tcntxt;
3304         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3305         uint32_t                        err;
3306
3307         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3308         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3309
3310         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3311         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3312         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3313
3314         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3315
3316         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3317                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3318                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3319                 device_printf(dev, "%s: failed0\n", __func__);
3320                 return (-1);
3321         }
3322         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3323
3324         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3325
3326         if (err) {
3327                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3328                 return (-1);
3329         }
3330
3331         return (0);
3332 }
3333 static void
3334 qla_del_xmt_cntxt(qla_host_t *ha)
3335 {
3336         uint32_t i;
3337
3338         if (!ha->hw.flags.init_tx_cnxt)
3339                 return;
3340
3341         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3342                 if (qla_del_xmt_cntxt_i(ha, i))
3343                         break;
3344         }
3345         ha->hw.flags.init_tx_cnxt = 0;
3346 }
3347
3348 static int
3349 qla_init_xmt_cntxt(qla_host_t *ha)
3350 {
3351         uint32_t i, j;
3352
3353         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3354                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3355                         for (j = 0; j < i; j++)
3356                                 qla_del_xmt_cntxt_i(ha, j);
3357                         return (-1);
3358                 }
3359         }
3360         ha->hw.flags.init_tx_cnxt = 1;
3361         return (0);
3362 }
3363
3364 static int
3365 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3366 {
3367         int i, nmcast;
3368         uint32_t count = 0;
3369         uint8_t *mcast;
3370
3371         nmcast = ha->hw.nmcast;
3372
3373         QL_DPRINT2(ha, (ha->pci_dev,
3374                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3375
3376         mcast = ha->hw.mac_addr_arr;
3377         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3378
3379         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3380                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3381                         (ha->hw.mcast[i].addr[1] != 0) ||
3382                         (ha->hw.mcast[i].addr[2] != 0) ||
3383                         (ha->hw.mcast[i].addr[3] != 0) ||
3384                         (ha->hw.mcast[i].addr[4] != 0) ||
3385                         (ha->hw.mcast[i].addr[5] != 0)) {
3386
3387                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3388                         mcast = mcast + ETHER_ADDR_LEN;
3389                         count++;
3390                         
3391                         if (count == Q8_MAX_MAC_ADDRS) {
3392                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3393                                         add_mcast, count)) {
3394                                         device_printf(ha->pci_dev,
3395                                                 "%s: failed\n", __func__);
3396                                         return (-1);
3397                                 }
3398
3399                                 count = 0;
3400                                 mcast = ha->hw.mac_addr_arr;
3401                                 memset(mcast, 0,
3402                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3403                         }
3404
3405                         nmcast--;
3406                 }
3407         }
3408
3409         if (count) {
3410                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3411                         count)) {
3412                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3413                         return (-1);
3414                 }
3415         }
3416         QL_DPRINT2(ha, (ha->pci_dev,
3417                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3418
3419         return 0;
3420 }
3421
3422 static int
3423 qla_hw_add_all_mcast(qla_host_t *ha)
3424 {
3425         int ret;
3426
3427         ret = qla_hw_all_mcast(ha, 1);
3428
3429         return (ret);
3430 }
3431
3432 int
3433 qla_hw_del_all_mcast(qla_host_t *ha)
3434 {
3435         int ret;
3436
3437         ret = qla_hw_all_mcast(ha, 0);
3438
3439         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3440         ha->hw.nmcast = 0;
3441
3442         return (ret);
3443 }
3444
3445 static int
3446 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3447 {
3448         int i;
3449
3450         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3451                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3452                         return (0); /* its been already added */
3453         }
3454         return (-1);
3455 }
3456
3457 static int
3458 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3459 {
3460         int i;
3461
3462         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3463
3464                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3465                         (ha->hw.mcast[i].addr[1] == 0) &&
3466                         (ha->hw.mcast[i].addr[2] == 0) &&
3467                         (ha->hw.mcast[i].addr[3] == 0) &&
3468                         (ha->hw.mcast[i].addr[4] == 0) &&
3469                         (ha->hw.mcast[i].addr[5] == 0)) {
3470
3471                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3472                         ha->hw.nmcast++;        
3473
3474                         mta = mta + ETHER_ADDR_LEN;
3475                         nmcast--;
3476
3477                         if (nmcast == 0)
3478                                 break;
3479                 }
3480
3481         }
3482         return 0;
3483 }
3484
3485 static int
3486 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3487 {
3488         int i;
3489
3490         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3491                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3492
3493                         ha->hw.mcast[i].addr[0] = 0;
3494                         ha->hw.mcast[i].addr[1] = 0;
3495                         ha->hw.mcast[i].addr[2] = 0;
3496                         ha->hw.mcast[i].addr[3] = 0;
3497                         ha->hw.mcast[i].addr[4] = 0;
3498                         ha->hw.mcast[i].addr[5] = 0;
3499
3500                         ha->hw.nmcast--;        
3501
3502                         mta = mta + ETHER_ADDR_LEN;
3503                         nmcast--;
3504
3505                         if (nmcast == 0)
3506                                 break;
3507                 }
3508         }
3509         return 0;
3510 }
3511
3512 /*
3513  * Name: ql_hw_set_multi
3514  * Function: Sets the Multicast Addresses provided by the host O.S into the
3515  *      hardware (for the given interface)
3516  */
3517 int
3518 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3519         uint32_t add_mac)
3520 {
3521         uint8_t *mta = mcast_addr;
3522         int i;
3523         int ret = 0;
3524         uint32_t count = 0;
3525         uint8_t *mcast;
3526
3527         mcast = ha->hw.mac_addr_arr;
3528         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3529
3530         for (i = 0; i < mcnt; i++) {
3531                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3532                         if (add_mac) {
3533                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3534                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3535                                         mcast = mcast + ETHER_ADDR_LEN;
3536                                         count++;
3537                                 }
3538                         } else {
3539                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3540                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3541                                         mcast = mcast + ETHER_ADDR_LEN;
3542                                         count++;
3543                                 }
3544                         }
3545                 }
3546                 if (count == Q8_MAX_MAC_ADDRS) {
3547                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3548                                 add_mac, count)) {
3549                                 device_printf(ha->pci_dev, "%s: failed\n",
3550                                         __func__);
3551                                 return (-1);
3552                         }
3553
3554                         if (add_mac) {
3555                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3556                                         count);
3557                         } else {
3558                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3559                                         count);
3560                         }
3561
3562                         count = 0;
3563                         mcast = ha->hw.mac_addr_arr;
3564                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3565                 }
3566                         
3567                 mta += Q8_MAC_ADDR_LEN;
3568         }
3569
3570         if (count) {
3571                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3572                         count)) {
3573                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3574                         return (-1);
3575                 }
3576                 if (add_mac) {
3577                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3578                 } else {
3579                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3580                 }
3581         }
3582
3583         return (ret);
3584 }
3585
3586 /*
3587  * Name: ql_hw_tx_done_locked
3588  * Function: Handle Transmit Completions
3589  */
3590 void
3591 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3592 {
3593         qla_tx_buf_t *txb;
3594         qla_hw_t *hw = &ha->hw;
3595         uint32_t comp_idx, comp_count = 0;
3596         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3597
3598         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3599
3600         /* retrieve index of last entry in tx ring completed */
3601         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3602
3603         while (comp_idx != hw_tx_cntxt->txr_comp) {
3604
3605                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3606
3607                 hw_tx_cntxt->txr_comp++;
3608                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3609                         hw_tx_cntxt->txr_comp = 0;
3610
3611                 comp_count++;
3612
3613                 if (txb->m_head) {
3614                         if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3615
3616                         bus_dmamap_sync(ha->tx_tag, txb->map,
3617                                 BUS_DMASYNC_POSTWRITE);
3618                         bus_dmamap_unload(ha->tx_tag, txb->map);
3619                         m_freem(txb->m_head);
3620
3621                         txb->m_head = NULL;
3622                 }
3623         }
3624
3625         hw_tx_cntxt->txr_free += comp_count;
3626         return;
3627 }
3628
3629 void
3630 ql_update_link_state(qla_host_t *ha)
3631 {
3632         uint32_t link_state;
3633         uint32_t prev_link_state;
3634
3635         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3636                 ha->hw.link_up = 0;
3637                 return;
3638         }
3639         link_state = READ_REG32(ha, Q8_LINK_STATE);
3640
3641         prev_link_state =  ha->hw.link_up;
3642
3643         if (ha->pci_func == 0) 
3644                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3645         else
3646                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3647
3648         if (prev_link_state !=  ha->hw.link_up) {
3649                 if (ha->hw.link_up) {
3650                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3651                 } else {
3652                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3653                 }
3654         }
3655         return;
3656 }
3657
3658 int
3659 ql_hw_check_health(qla_host_t *ha)
3660 {
3661         uint32_t val;
3662
3663         ha->hw.health_count++;
3664
3665         if (ha->hw.health_count < 500)
3666                 return 0;
3667
3668         ha->hw.health_count = 0;
3669
3670         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3671
3672         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3673                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3674                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3675                         __func__, val);
3676                 return -1;
3677         }
3678
3679         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3680
3681         if ((val != ha->hw.hbeat_value) &&
3682                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3683                 ha->hw.hbeat_value = val;
3684                 ha->hw.hbeat_failure = 0;
3685                 return 0;
3686         }
3687
3688         ha->hw.hbeat_failure++;
3689
3690         
3691         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3692                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3693                         __func__, val);
3694         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3695                 return 0;
3696         else 
3697                 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3698                         __func__, val);
3699
3700         return -1;
3701 }
3702
3703 static int
3704 qla_init_nic_func(qla_host_t *ha)
3705 {
3706         device_t                dev;
3707         q80_init_nic_func_t     *init_nic;
3708         q80_init_nic_func_rsp_t *init_nic_rsp;
3709         uint32_t                err;
3710
3711         dev = ha->pci_dev;
3712
3713         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3714         bzero(init_nic, sizeof(q80_init_nic_func_t));
3715
3716         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3717         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3718         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3719
3720         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3721         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3722         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3723
3724 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3725         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3726                 (sizeof (q80_init_nic_func_t) >> 2),
3727                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3728                 device_printf(dev, "%s: failed\n", __func__);
3729                 return -1;
3730         }
3731
3732         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3733 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3734
3735         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3736
3737         if (err) {
3738                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3739         }
3740
3741         return 0;
3742 }
3743
3744 static int
3745 qla_stop_nic_func(qla_host_t *ha)
3746 {
3747         device_t                dev;
3748         q80_stop_nic_func_t     *stop_nic;
3749         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3750         uint32_t                err;
3751
3752         dev = ha->pci_dev;
3753
3754         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3755         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3756
3757         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3758         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3759         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3760
3761         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3762         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3763
3764 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3765         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3766                 (sizeof (q80_stop_nic_func_t) >> 2),
3767                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3768                 device_printf(dev, "%s: failed\n", __func__);
3769                 return -1;
3770         }
3771
3772         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3773 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3774
3775         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3776
3777         if (err) {
3778                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3779         }
3780
3781         return 0;
3782 }
3783
3784 static int
3785 qla_query_fw_dcbx_caps(qla_host_t *ha)
3786 {
3787         device_t                        dev;
3788         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3789         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3790         uint32_t                        err;
3791
3792         dev = ha->pci_dev;
3793
3794         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3795         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3796
3797         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3798         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3799         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3800
3801         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3802         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3803                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3804                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3805                 device_printf(dev, "%s: failed\n", __func__);
3806                 return -1;
3807         }
3808
3809         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3810         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3811                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3812
3813         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3814
3815         if (err) {
3816                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3817         }
3818
3819         return 0;
3820 }
3821
3822 static int
3823 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3824         uint32_t aen_mb3, uint32_t aen_mb4)
3825 {
3826         device_t                dev;
3827         q80_idc_ack_t           *idc_ack;
3828         q80_idc_ack_rsp_t       *idc_ack_rsp;
3829         uint32_t                err;
3830         int                     count = 300;
3831
3832         dev = ha->pci_dev;
3833
3834         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3835         bzero(idc_ack, sizeof(q80_idc_ack_t));
3836
3837         idc_ack->opcode = Q8_MBX_IDC_ACK;
3838         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3839         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3840
3841         idc_ack->aen_mb1 = aen_mb1;
3842         idc_ack->aen_mb2 = aen_mb2;
3843         idc_ack->aen_mb3 = aen_mb3;
3844         idc_ack->aen_mb4 = aen_mb4;
3845
3846         ha->hw.imd_compl= 0;
3847
3848         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3849                 (sizeof (q80_idc_ack_t) >> 2),
3850                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3851                 device_printf(dev, "%s: failed\n", __func__);
3852                 return -1;
3853         }
3854
3855         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3856
3857         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3858
3859         if (err) {
3860                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3861                 return(-1);
3862         }
3863
3864         while (count && !ha->hw.imd_compl) {
3865                 qla_mdelay(__func__, 100);
3866                 count--;
3867         }
3868
3869         if (!count)
3870                 return -1;
3871         else
3872                 device_printf(dev, "%s: count %d\n", __func__, count);
3873
3874         return (0);
3875 }
3876
3877 static int
3878 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3879 {
3880         device_t                dev;
3881         q80_set_port_cfg_t      *pcfg;
3882         q80_set_port_cfg_rsp_t  *pfg_rsp;
3883         uint32_t                err;
3884         int                     count = 300;
3885
3886         dev = ha->pci_dev;
3887
3888         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3889         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3890
3891         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3892         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3893         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3894
3895         pcfg->cfg_bits = cfg_bits;
3896
3897         device_printf(dev, "%s: cfg_bits"
3898                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3899                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3900                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3901                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3902                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3903
3904         ha->hw.imd_compl= 0;
3905
3906         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3907                 (sizeof (q80_set_port_cfg_t) >> 2),
3908                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3909                 device_printf(dev, "%s: failed\n", __func__);
3910                 return -1;
3911         }
3912
3913         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3914
3915         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3916
3917         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3918                 while (count && !ha->hw.imd_compl) {
3919                         qla_mdelay(__func__, 100);
3920                         count--;
3921                 }
3922                 if (count) {
3923                         device_printf(dev, "%s: count %d\n", __func__, count);
3924
3925                         err = 0;
3926                 }
3927         }
3928
3929         if (err) {
3930                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3931                 return(-1);
3932         }
3933
3934         return (0);
3935 }
3936
3937
3938 static int
3939 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3940 {
3941         uint32_t                        err;
3942         device_t                        dev = ha->pci_dev;
3943         q80_config_md_templ_size_t      *md_size;
3944         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3945
3946 #ifndef QL_LDFLASH_FW
3947
3948         ql_minidump_template_hdr_t *hdr;
3949
3950         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3951         *size = hdr->size_of_template;
3952         return (0);
3953
3954 #endif /* #ifdef QL_LDFLASH_FW */
3955
3956         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3957         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3958
3959         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3960         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3961         md_size->count_version |= Q8_MBX_CMD_VERSION;
3962
3963         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3964                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3965                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3966
3967                 device_printf(dev, "%s: failed\n", __func__);
3968
3969                 return (-1);
3970         }
3971
3972         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3973
3974         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3975
3976         if (err) {
3977                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3978                 return(-1);
3979         }
3980
3981         *size = md_size_rsp->templ_size;
3982
3983         return (0);
3984 }
3985
3986 static int
3987 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3988 {
3989         device_t                dev;
3990         q80_get_port_cfg_t      *pcfg;
3991         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3992         uint32_t                err;
3993
3994         dev = ha->pci_dev;
3995
3996         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3997         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3998
3999         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4000         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4001         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4002
4003         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4004                 (sizeof (q80_get_port_cfg_t) >> 2),
4005                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4006                 device_printf(dev, "%s: failed\n", __func__);
4007                 return -1;
4008         }
4009
4010         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4011
4012         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4013
4014         if (err) {
4015                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4016                 return(-1);
4017         }
4018
4019         device_printf(dev, "%s: [cfg_bits, port type]"
4020                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4021                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4022                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4023                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4024                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4025                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4026                 );
4027
4028         *cfg_bits = pcfg_rsp->cfg_bits;
4029
4030         return (0);
4031 }
4032
4033 int
4034 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4035 {
4036         struct ether_vlan_header        *eh;
4037         uint16_t                        etype;
4038         struct ip                       *ip = NULL;
4039         struct ip6_hdr                  *ip6 = NULL;
4040         struct tcphdr                   *th = NULL;
4041         uint32_t                        hdrlen;
4042         uint32_t                        offset;
4043         uint8_t                         buf[sizeof(struct ip6_hdr)];
4044
4045         eh = mtod(mp, struct ether_vlan_header *);
4046
4047         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4048                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4049                 etype = ntohs(eh->evl_proto);
4050         } else {
4051                 hdrlen = ETHER_HDR_LEN;
4052                 etype = ntohs(eh->evl_encap_proto);
4053         }
4054
4055         if (etype == ETHERTYPE_IP) {
4056
4057                 offset = (hdrlen + sizeof (struct ip));
4058
4059                 if (mp->m_len >= offset) {
4060                         ip = (struct ip *)(mp->m_data + hdrlen);
4061                 } else {
4062                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4063                         ip = (struct ip *)buf;
4064                 }
4065
4066                 if (ip->ip_p == IPPROTO_TCP) {
4067
4068                         hdrlen += ip->ip_hl << 2;
4069                         offset = hdrlen + 4;
4070         
4071                         if (mp->m_len >= offset) {
4072                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4073                         } else {
4074                                 m_copydata(mp, hdrlen, 4, buf);
4075                                 th = (struct tcphdr *)buf;
4076                         }
4077                 }
4078
4079         } else if (etype == ETHERTYPE_IPV6) {
4080
4081                 offset = (hdrlen + sizeof (struct ip6_hdr));
4082
4083                 if (mp->m_len >= offset) {
4084                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4085                 } else {
4086                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4087                         ip6 = (struct ip6_hdr *)buf;
4088                 }
4089
4090                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4091
4092                         hdrlen += sizeof(struct ip6_hdr);
4093                         offset = hdrlen + 4;
4094
4095                         if (mp->m_len >= offset) {
4096                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4097                         } else {
4098                                 m_copydata(mp, hdrlen, 4, buf);
4099                                 th = (struct tcphdr *)buf;
4100                         }
4101                 }
4102         }
4103
4104         if (th != NULL) {
4105                 if ((th->th_sport == htons(3260)) ||
4106                         (th->th_dport == htons(3260)))
4107                         return 0;
4108         }
4109         return (-1);
4110 }
4111
4112 void
4113 qla_hw_async_event(qla_host_t *ha)
4114 {
4115         switch (ha->hw.aen_mb0) {
4116         case 0x8101:
4117                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4118                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4119
4120                 break;
4121
4122         default:
4123                 break;
4124         }
4125
4126         return;
4127 }
4128
4129 #ifdef QL_LDFLASH_FW
4130 static int
4131 ql_get_minidump_template(qla_host_t *ha)
4132 {
4133         uint32_t                        err;
4134         device_t                        dev = ha->pci_dev;
4135         q80_config_md_templ_cmd_t       *md_templ;
4136         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4137
4138         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4139         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4140
4141         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4142         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4143         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4144
4145         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4146         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4147
4148         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4149                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4150                  ha->hw.mbox,
4151                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4152
4153                 device_printf(dev, "%s: failed\n", __func__);
4154
4155                 return (-1);
4156         }
4157
4158         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4159
4160         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4161
4162         if (err) {
4163                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4164                 return (-1);
4165         }
4166
4167         return (0);
4168
4169 }
4170 #endif /* #ifdef QL_LDFLASH_FW */
4171
4172 /*
4173  * Minidump related functionality 
4174  */
4175
4176 static int ql_parse_template(qla_host_t *ha);
4177
4178 static uint32_t ql_rdcrb(qla_host_t *ha,
4179                         ql_minidump_entry_rdcrb_t *crb_entry,
4180                         uint32_t * data_buff);
4181
4182 static uint32_t ql_pollrd(qla_host_t *ha,
4183                         ql_minidump_entry_pollrd_t *entry,
4184                         uint32_t * data_buff);
4185
4186 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4187                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4188                         uint32_t *data_buff);
4189
4190 static uint32_t ql_L2Cache(qla_host_t *ha,
4191                         ql_minidump_entry_cache_t *cacheEntry,
4192                         uint32_t * data_buff);
4193
4194 static uint32_t ql_L1Cache(qla_host_t *ha,
4195                         ql_minidump_entry_cache_t *cacheEntry,
4196                         uint32_t *data_buff);
4197
4198 static uint32_t ql_rdocm(qla_host_t *ha,
4199                         ql_minidump_entry_rdocm_t *ocmEntry,
4200                         uint32_t *data_buff);
4201
4202 static uint32_t ql_rdmem(qla_host_t *ha,
4203                         ql_minidump_entry_rdmem_t *mem_entry,
4204                         uint32_t *data_buff);
4205
4206 static uint32_t ql_rdrom(qla_host_t *ha,
4207                         ql_minidump_entry_rdrom_t *romEntry,
4208                         uint32_t *data_buff);
4209
4210 static uint32_t ql_rdmux(qla_host_t *ha,
4211                         ql_minidump_entry_mux_t *muxEntry,
4212                         uint32_t *data_buff);
4213
4214 static uint32_t ql_rdmux2(qla_host_t *ha,
4215                         ql_minidump_entry_mux2_t *muxEntry,
4216                         uint32_t *data_buff);
4217
4218 static uint32_t ql_rdqueue(qla_host_t *ha,
4219                         ql_minidump_entry_queue_t *queueEntry,
4220                         uint32_t *data_buff);
4221
4222 static uint32_t ql_cntrl(qla_host_t *ha,
4223                         ql_minidump_template_hdr_t *template_hdr,
4224                         ql_minidump_entry_cntrl_t *crbEntry);
4225
4226
4227 static uint32_t
4228 ql_minidump_size(qla_host_t *ha)
4229 {
4230         uint32_t i, k;
4231         uint32_t size = 0;
4232         ql_minidump_template_hdr_t *hdr;
4233
4234         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4235
4236         i = 0x2;
4237
4238         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4239                 if (i & ha->hw.mdump_capture_mask)
4240                         size += hdr->capture_size_array[k];
4241                 i = i << 1;
4242         }
4243         return (size);
4244 }
4245
4246 static void
4247 ql_free_minidump_buffer(qla_host_t *ha)
4248 {
4249         if (ha->hw.mdump_buffer != NULL) {
4250                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4251                 ha->hw.mdump_buffer = NULL;
4252                 ha->hw.mdump_buffer_size = 0;
4253         }
4254         return;
4255 }
4256
4257 static int
4258 ql_alloc_minidump_buffer(qla_host_t *ha)
4259 {
4260         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4261
4262         if (!ha->hw.mdump_buffer_size)
4263                 return (-1);
4264
4265         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4266                                         M_NOWAIT);
4267
4268         if (ha->hw.mdump_buffer == NULL)
4269                 return (-1);
4270
4271         return (0);
4272 }
4273
4274 static void
4275 ql_free_minidump_template_buffer(qla_host_t *ha)
4276 {
4277         if (ha->hw.mdump_template != NULL) {
4278                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4279                 ha->hw.mdump_template = NULL;
4280                 ha->hw.mdump_template_size = 0;
4281         }
4282         return;
4283 }
4284
4285 static int
4286 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4287 {
4288         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4289
4290         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4291                                         M_QLA83XXBUF, M_NOWAIT);
4292
4293         if (ha->hw.mdump_template == NULL)
4294                 return (-1);
4295
4296         return (0);
4297 }
4298
4299 static int
4300 ql_alloc_minidump_buffers(qla_host_t *ha)
4301 {
4302         int ret;
4303
4304         ret = ql_alloc_minidump_template_buffer(ha);
4305
4306         if (ret)
4307                 return (ret);
4308
4309         ret = ql_alloc_minidump_buffer(ha);
4310
4311         if (ret)
4312                 ql_free_minidump_template_buffer(ha);
4313
4314         return (ret);
4315 }
4316
4317
4318 static uint32_t
4319 ql_validate_minidump_checksum(qla_host_t *ha)
4320 {
4321         uint64_t sum = 0;
4322         int count;
4323         uint32_t *template_buff;
4324
4325         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4326         template_buff = ha->hw.dma_buf.minidump.dma_b;
4327
4328         while (count-- > 0) {
4329                 sum += *template_buff++;
4330         }
4331
4332         while (sum >> 32) {
4333                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4334         }
4335
4336         return (~sum);
4337 }
4338
4339 int
4340 ql_minidump_init(qla_host_t *ha)
4341 {
4342         int             ret = 0;
4343         uint32_t        template_size = 0;
4344         device_t        dev = ha->pci_dev;
4345
4346         /*
4347          * Get Minidump Template Size
4348          */
4349         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4350
4351         if (ret || (template_size == 0)) {
4352                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4353                         template_size);
4354                 return (-1);
4355         }
4356
4357         /*
4358          * Allocate Memory for Minidump Template
4359          */
4360
4361         ha->hw.dma_buf.minidump.alignment = 8;
4362         ha->hw.dma_buf.minidump.size = template_size;
4363
4364 #ifdef QL_LDFLASH_FW
4365         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4366
4367                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4368
4369                 return (-1);
4370         }
4371         ha->hw.dma_buf.flags.minidump = 1;
4372
4373         /*
4374          * Retrieve Minidump Template
4375          */
4376         ret = ql_get_minidump_template(ha);
4377 #else
4378         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4379
4380 #endif /* #ifdef QL_LDFLASH_FW */
4381
4382         if (ret == 0) {
4383
4384                 ret = ql_validate_minidump_checksum(ha);
4385
4386                 if (ret == 0) {
4387
4388                         ret = ql_alloc_minidump_buffers(ha);
4389
4390                         if (ret == 0)
4391                 ha->hw.mdump_init = 1;
4392                         else
4393                                 device_printf(dev,
4394                                         "%s: ql_alloc_minidump_buffers"
4395                                         " failed\n", __func__);
4396                 } else {
4397                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4398                                 " failed\n", __func__);
4399                 }
4400         } else {
4401                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4402                          __func__);
4403         }
4404
4405         if (ret)
4406                 ql_minidump_free(ha);
4407
4408         return (ret);
4409 }
4410
4411 static void
4412 ql_minidump_free(qla_host_t *ha)
4413 {
4414         ha->hw.mdump_init = 0;
4415         if (ha->hw.dma_buf.flags.minidump) {
4416                 ha->hw.dma_buf.flags.minidump = 0;
4417                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4418         }
4419
4420         ql_free_minidump_template_buffer(ha);
4421         ql_free_minidump_buffer(ha);
4422
4423         return;
4424 }
4425
4426 void
4427 ql_minidump(qla_host_t *ha)
4428 {
4429         if (!ha->hw.mdump_init)
4430                 return;
4431
4432         if (ha->hw.mdump_done)
4433                 return;
4434
4435                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4436
4437         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4438         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4439
4440         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4441                 ha->hw.mdump_template_size);
4442
4443         ql_parse_template(ha);
4444  
4445         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4446
4447         ha->hw.mdump_done = 1;
4448
4449         return;
4450 }
4451
4452
4453 /*
4454  * helper routines
4455  */
4456 static void 
4457 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4458 {
4459         if (esize != entry->hdr.entry_capture_size) {
4460                 entry->hdr.entry_capture_size = esize;
4461                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4462         }
4463         return;
4464 }
4465
4466
4467 static int 
4468 ql_parse_template(qla_host_t *ha)
4469 {
4470         uint32_t num_of_entries, buff_level, e_cnt, esize;
4471         uint32_t end_cnt, rv = 0;
4472         char *dump_buff, *dbuff;
4473         int sane_start = 0, sane_end = 0;
4474         ql_minidump_template_hdr_t *template_hdr;
4475         ql_minidump_entry_t *entry;
4476         uint32_t capture_mask; 
4477         uint32_t dump_size; 
4478
4479         /* Setup parameters */
4480         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4481
4482         if (template_hdr->entry_type == TLHDR)
4483                 sane_start = 1;
4484         
4485         dump_buff = (char *) ha->hw.mdump_buffer;
4486
4487         num_of_entries = template_hdr->num_of_entries;
4488
4489         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4490                         + template_hdr->first_entry_offset );
4491
4492         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4493                 template_hdr->ocm_window_array[ha->pci_func];
4494         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4495
4496         capture_mask = ha->hw.mdump_capture_mask;
4497         dump_size = ha->hw.mdump_buffer_size;
4498
4499         template_hdr->driver_capture_mask = capture_mask;
4500
4501         QL_DPRINT80(ha, (ha->pci_dev,
4502                 "%s: sane_start = %d num_of_entries = %d "
4503                 "capture_mask = 0x%x dump_size = %d \n", 
4504                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4505
4506         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4507
4508                 /*
4509                  * If the capture_mask of the entry does not match capture mask
4510                  * skip the entry after marking the driver_flags indicator.
4511                  */
4512                 
4513                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4514
4515                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4516                         entry = (ql_minidump_entry_t *) ((char *) entry
4517                                         + entry->hdr.entry_size);
4518                         continue;
4519                 }
4520
4521                 /*
4522                  * This is ONLY needed in implementations where
4523                  * the capture buffer allocated is too small to capture
4524                  * all of the required entries for a given capture mask.
4525                  * We need to empty the buffer contents to a file
4526                  * if possible, before processing the next entry
4527                  * If the buff_full_flag is set, no further capture will happen
4528                  * and all remaining non-control entries will be skipped.
4529                  */
4530                 if (entry->hdr.entry_capture_size != 0) {
4531                         if ((buff_level + entry->hdr.entry_capture_size) >
4532                                 dump_size) {
4533                                 /*  Try to recover by emptying buffer to file */
4534                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4535                                 entry = (ql_minidump_entry_t *) ((char *) entry
4536                                                 + entry->hdr.entry_size);
4537                                 continue;
4538                         }
4539                 }
4540
4541                 /*
4542                  * Decode the entry type and process it accordingly
4543                  */
4544
4545                 switch (entry->hdr.entry_type) {
4546                 case RDNOP:
4547                         break;
4548
4549                 case RDEND:
4550                         if (sane_end == 0) {
4551                                 end_cnt = e_cnt;
4552                         }
4553                         sane_end++;
4554                         break;
4555
4556                 case RDCRB:
4557                         dbuff = dump_buff + buff_level;
4558                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4559                         ql_entry_err_chk(entry, esize);
4560                         buff_level += esize;
4561                         break;
4562
4563                 case POLLRD:
4564                         dbuff = dump_buff + buff_level;
4565                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4566                         ql_entry_err_chk(entry, esize);
4567                         buff_level += esize;
4568                         break;
4569
4570                 case POLLRDMWR:
4571                         dbuff = dump_buff + buff_level;
4572                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4573                                         (void *)dbuff);
4574                         ql_entry_err_chk(entry, esize);
4575                         buff_level += esize;
4576                         break;
4577
4578                 case L2ITG:
4579                 case L2DTG:
4580                 case L2DAT:
4581                 case L2INS:
4582                         dbuff = dump_buff + buff_level;
4583                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4584                         if (esize == -1) {
4585                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4586                         } else {
4587                                 ql_entry_err_chk(entry, esize);
4588                                 buff_level += esize;
4589                         }
4590                         break;
4591
4592                 case L1DAT:
4593                 case L1INS:
4594                         dbuff = dump_buff + buff_level;
4595                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4596                         ql_entry_err_chk(entry, esize);
4597                         buff_level += esize;
4598                         break;
4599
4600                 case RDOCM:
4601                         dbuff = dump_buff + buff_level;
4602                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4603                         ql_entry_err_chk(entry, esize);
4604                         buff_level += esize;
4605                         break;
4606
4607                 case RDMEM:
4608                         dbuff = dump_buff + buff_level;
4609                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4610                         ql_entry_err_chk(entry, esize);
4611                         buff_level += esize;
4612                         break;
4613
4614                 case BOARD:
4615                 case RDROM:
4616                         dbuff = dump_buff + buff_level;
4617                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4618                         ql_entry_err_chk(entry, esize);
4619                         buff_level += esize;
4620                         break;
4621
4622                 case RDMUX:
4623                         dbuff = dump_buff + buff_level;
4624                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4625                         ql_entry_err_chk(entry, esize);
4626                         buff_level += esize;
4627                         break;
4628
4629                 case RDMUX2:
4630                         dbuff = dump_buff + buff_level;
4631                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4632                         ql_entry_err_chk(entry, esize);
4633                         buff_level += esize;
4634                         break;
4635
4636                 case QUEUE:
4637                         dbuff = dump_buff + buff_level;
4638                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4639                         ql_entry_err_chk(entry, esize);
4640                         buff_level += esize;
4641                         break;
4642
4643                 case CNTRL:
4644                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4645                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4646                         }
4647                         break;
4648                 default:
4649                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4650                         break;
4651                 }
4652                 /*  next entry in the template */
4653                 entry = (ql_minidump_entry_t *) ((char *) entry
4654                                                 + entry->hdr.entry_size);
4655         }
4656
4657         if (!sane_start || (sane_end > 1)) {
4658                 device_printf(ha->pci_dev,
4659                         "\n%s: Template configuration error. Check Template\n",
4660                         __func__);
4661         }
4662         
4663         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4664                 __func__, template_hdr->num_of_entries));
4665
4666         return 0;
4667 }
4668
4669 /*
4670  * Read CRB operation.
4671  */
4672 static uint32_t
4673 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4674         uint32_t * data_buff)
4675 {
4676         int loop_cnt;
4677         int ret;
4678         uint32_t op_count, addr, stride, value = 0;
4679
4680         addr = crb_entry->addr;
4681         op_count = crb_entry->op_count;
4682         stride = crb_entry->addr_stride;
4683
4684         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4685
4686                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4687
4688                 if (ret)
4689                         return (0);
4690
4691                 *data_buff++ = addr;
4692                 *data_buff++ = value;
4693                 addr = addr + stride;
4694         }
4695
4696         /*
4697          * for testing purpose we return amount of data written
4698          */
4699         return (op_count * (2 * sizeof(uint32_t)));
4700 }
4701
4702 /*
4703  * Handle L2 Cache.
4704  */
4705
4706 static uint32_t 
4707 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4708         uint32_t * data_buff)
4709 {
4710         int i, k;
4711         int loop_cnt;
4712         int ret;
4713
4714         uint32_t read_value;
4715         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4716         uint32_t tag_value, read_cnt;
4717         volatile uint8_t cntl_value_r;
4718         long timeout;
4719         uint32_t data;
4720
4721         loop_cnt = cacheEntry->op_count;
4722
4723         read_addr = cacheEntry->read_addr;
4724         cntrl_addr = cacheEntry->control_addr;
4725         cntl_value_w = (uint32_t) cacheEntry->write_value;
4726
4727         tag_reg_addr = cacheEntry->tag_reg_addr;
4728
4729         tag_value = cacheEntry->init_tag_value;
4730         read_cnt = cacheEntry->read_addr_cnt;
4731
4732         for (i = 0; i < loop_cnt; i++) {
4733
4734                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4735                 if (ret)
4736                         return (0);
4737
4738                 if (cacheEntry->write_value != 0) { 
4739
4740                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4741                                         &cntl_value_w, 0);
4742                         if (ret)
4743                                 return (0);
4744                 }
4745
4746                 if (cacheEntry->poll_mask != 0) { 
4747
4748                         timeout = cacheEntry->poll_wait;
4749
4750                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4751                         if (ret)
4752                                 return (0);
4753
4754                         cntl_value_r = (uint8_t)data;
4755
4756                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4757
4758                                 if (timeout) {
4759                                         qla_mdelay(__func__, 1);
4760                                         timeout--;
4761                                 } else
4762                                         break;
4763
4764                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4765                                                 &data, 1);
4766                                 if (ret)
4767                                         return (0);
4768
4769                                 cntl_value_r = (uint8_t)data;
4770                         }
4771                         if (!timeout) {
4772                                 /* Report timeout error. 
4773                                  * core dump capture failed
4774                                  * Skip remaining entries.
4775                                  * Write buffer out to file
4776                                  * Use driver specific fields in template header
4777                                  * to report this error.
4778                                  */
4779                                 return (-1);
4780                         }
4781                 }
4782
4783                 addr = read_addr;
4784                 for (k = 0; k < read_cnt; k++) {
4785
4786                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4787                         if (ret)
4788                                 return (0);
4789
4790                         *data_buff++ = read_value;
4791                         addr += cacheEntry->read_addr_stride;
4792                 }
4793
4794                 tag_value += cacheEntry->tag_value_stride;
4795         }
4796
4797         return (read_cnt * loop_cnt * sizeof(uint32_t));
4798 }
4799
4800 /*
4801  * Handle L1 Cache.
4802  */
4803
4804 static uint32_t 
4805 ql_L1Cache(qla_host_t *ha,
4806         ql_minidump_entry_cache_t *cacheEntry,
4807         uint32_t *data_buff)
4808 {
4809         int ret;
4810         int i, k;
4811         int loop_cnt;
4812
4813         uint32_t read_value;
4814         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4815         uint32_t tag_value, read_cnt;
4816         uint32_t cntl_value_w;
4817
4818         loop_cnt = cacheEntry->op_count;
4819
4820         read_addr = cacheEntry->read_addr;
4821         cntrl_addr = cacheEntry->control_addr;
4822         cntl_value_w = (uint32_t) cacheEntry->write_value;
4823
4824         tag_reg_addr = cacheEntry->tag_reg_addr;
4825
4826         tag_value = cacheEntry->init_tag_value;
4827         read_cnt = cacheEntry->read_addr_cnt;
4828
4829         for (i = 0; i < loop_cnt; i++) {
4830
4831                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4832                 if (ret)
4833                         return (0);
4834
4835                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4836                 if (ret)
4837                         return (0);
4838
4839                 addr = read_addr;
4840                 for (k = 0; k < read_cnt; k++) {
4841
4842                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4843                         if (ret)
4844                                 return (0);
4845
4846                         *data_buff++ = read_value;
4847                         addr += cacheEntry->read_addr_stride;
4848                 }
4849
4850                 tag_value += cacheEntry->tag_value_stride;
4851         }
4852
4853         return (read_cnt * loop_cnt * sizeof(uint32_t));
4854 }
4855
4856 /*
4857  * Reading OCM memory
4858  */
4859
4860 static uint32_t 
4861 ql_rdocm(qla_host_t *ha,
4862         ql_minidump_entry_rdocm_t *ocmEntry,
4863         uint32_t *data_buff)
4864 {
4865         int i, loop_cnt;
4866         volatile uint32_t addr;
4867         volatile uint32_t value;
4868
4869         addr = ocmEntry->read_addr;
4870         loop_cnt = ocmEntry->op_count;
4871
4872         for (i = 0; i < loop_cnt; i++) {
4873                 value = READ_REG32(ha, addr);
4874                 *data_buff++ = value;
4875                 addr += ocmEntry->read_addr_stride;
4876         }
4877         return (loop_cnt * sizeof(value));
4878 }
4879
4880 /*
4881  * Read memory
4882  */
4883
4884 static uint32_t 
4885 ql_rdmem(qla_host_t *ha,
4886         ql_minidump_entry_rdmem_t *mem_entry,
4887         uint32_t *data_buff)
4888 {
4889         int ret;
4890         int i, loop_cnt;
4891         volatile uint32_t addr;
4892         q80_offchip_mem_val_t val;
4893
4894         addr = mem_entry->read_addr;
4895
4896         /* size in bytes / 16 */
4897         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4898
4899         for (i = 0; i < loop_cnt; i++) {
4900
4901                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4902                 if (ret)
4903                         return (0);
4904
4905                 *data_buff++ = val.data_lo;
4906                 *data_buff++ = val.data_hi;
4907                 *data_buff++ = val.data_ulo;
4908                 *data_buff++ = val.data_uhi;
4909
4910                 addr += (sizeof(uint32_t) * 4);
4911         }
4912
4913         return (loop_cnt * (sizeof(uint32_t) * 4));
4914 }
4915
4916 /*
4917  * Read Rom
4918  */
4919
4920 static uint32_t 
4921 ql_rdrom(qla_host_t *ha,
4922         ql_minidump_entry_rdrom_t *romEntry,
4923         uint32_t *data_buff)
4924 {
4925         int ret;
4926         int i, loop_cnt;
4927         uint32_t addr;
4928         uint32_t value;
4929
4930         addr = romEntry->read_addr;
4931         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4932         loop_cnt /= sizeof(value);
4933
4934         for (i = 0; i < loop_cnt; i++) {
4935
4936                 ret = ql_rd_flash32(ha, addr, &value);
4937                 if (ret)
4938                         return (0);
4939
4940                 *data_buff++ = value;
4941                 addr += sizeof(value);
4942         }
4943
4944         return (loop_cnt * sizeof(value));
4945 }
4946
4947 /*
4948  * Read MUX data
4949  */
4950
4951 static uint32_t 
4952 ql_rdmux(qla_host_t *ha,
4953         ql_minidump_entry_mux_t *muxEntry,
4954         uint32_t *data_buff)
4955 {
4956         int ret;
4957         int loop_cnt;
4958         uint32_t read_value, sel_value;
4959         uint32_t read_addr, select_addr;
4960
4961         select_addr = muxEntry->select_addr;
4962         sel_value = muxEntry->select_value;
4963         read_addr = muxEntry->read_addr;
4964
4965         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4966
4967                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4968                 if (ret)
4969                         return (0);
4970
4971                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4972                 if (ret)
4973                         return (0);
4974
4975                 *data_buff++ = sel_value;
4976                 *data_buff++ = read_value;
4977
4978                 sel_value += muxEntry->select_value_stride;
4979         }
4980
4981         return (loop_cnt * (2 * sizeof(uint32_t)));
4982 }
4983
4984 static uint32_t
4985 ql_rdmux2(qla_host_t *ha,
4986         ql_minidump_entry_mux2_t *muxEntry,
4987         uint32_t *data_buff)
4988 {
4989         int ret;
4990         int loop_cnt;
4991
4992         uint32_t select_addr_1, select_addr_2;
4993         uint32_t select_value_1, select_value_2;
4994         uint32_t select_value_count, select_value_mask;
4995         uint32_t read_addr, read_value;
4996
4997         select_addr_1 = muxEntry->select_addr_1;
4998         select_addr_2 = muxEntry->select_addr_2;
4999         select_value_1 = muxEntry->select_value_1;
5000         select_value_2 = muxEntry->select_value_2;
5001         select_value_count = muxEntry->select_value_count;
5002         select_value_mask  = muxEntry->select_value_mask;
5003
5004         read_addr = muxEntry->read_addr;
5005
5006         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5007                 loop_cnt++) {
5008
5009                 uint32_t temp_sel_val;
5010
5011                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5012                 if (ret)
5013                         return (0);
5014
5015                 temp_sel_val = select_value_1 & select_value_mask;
5016
5017                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5018                 if (ret)
5019                         return (0);
5020
5021                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5022                 if (ret)
5023                         return (0);
5024
5025                 *data_buff++ = temp_sel_val;
5026                 *data_buff++ = read_value;
5027
5028                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5029                 if (ret)
5030                         return (0);
5031
5032                 temp_sel_val = select_value_2 & select_value_mask;
5033
5034                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5035                 if (ret)
5036                         return (0);
5037
5038                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5039                 if (ret)
5040                         return (0);
5041
5042                 *data_buff++ = temp_sel_val;
5043                 *data_buff++ = read_value;
5044
5045                 select_value_1 += muxEntry->select_value_stride;
5046                 select_value_2 += muxEntry->select_value_stride;
5047         }
5048
5049         return (loop_cnt * (4 * sizeof(uint32_t)));
5050 }
5051
5052 /*
5053  * Handling Queue State Reads.
5054  */
5055
5056 static uint32_t 
5057 ql_rdqueue(qla_host_t *ha,
5058         ql_minidump_entry_queue_t *queueEntry,
5059         uint32_t *data_buff)
5060 {
5061         int ret;
5062         int loop_cnt, k;
5063         uint32_t read_value;
5064         uint32_t read_addr, read_stride, select_addr;
5065         uint32_t queue_id, read_cnt;
5066
5067         read_cnt = queueEntry->read_addr_cnt;
5068         read_stride = queueEntry->read_addr_stride;
5069         select_addr = queueEntry->select_addr;
5070
5071         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5072                 loop_cnt++) {
5073
5074                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5075                 if (ret)
5076                         return (0);
5077
5078                 read_addr = queueEntry->read_addr;
5079
5080                 for (k = 0; k < read_cnt; k++) {
5081
5082                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5083                         if (ret)
5084                                 return (0);
5085
5086                         *data_buff++ = read_value;
5087                         read_addr += read_stride;
5088                 }
5089
5090                 queue_id += queueEntry->queue_id_stride;
5091         }
5092
5093         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5094 }
5095
5096 /*
5097  * Handling control entries.
5098  */
5099
5100 static uint32_t 
5101 ql_cntrl(qla_host_t *ha,
5102         ql_minidump_template_hdr_t *template_hdr,
5103         ql_minidump_entry_cntrl_t *crbEntry)
5104 {
5105         int ret;
5106         int count;
5107         uint32_t opcode, read_value, addr, entry_addr;
5108         long timeout;
5109
5110         entry_addr = crbEntry->addr;
5111
5112         for (count = 0; count < crbEntry->op_count; count++) {
5113                 opcode = crbEntry->opcode;
5114
5115                 if (opcode & QL_DBG_OPCODE_WR) {
5116
5117                         ret = ql_rdwr_indreg32(ha, entry_addr,
5118                                         &crbEntry->value_1, 0);
5119                         if (ret)
5120                                 return (0);
5121
5122                         opcode &= ~QL_DBG_OPCODE_WR;
5123                 }
5124
5125                 if (opcode & QL_DBG_OPCODE_RW) {
5126
5127                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5128                         if (ret)
5129                                 return (0);
5130
5131                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5132                         if (ret)
5133                                 return (0);
5134
5135                         opcode &= ~QL_DBG_OPCODE_RW;
5136                 }
5137
5138                 if (opcode & QL_DBG_OPCODE_AND) {
5139
5140                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5141                         if (ret)
5142                                 return (0);
5143
5144                         read_value &= crbEntry->value_2;
5145                         opcode &= ~QL_DBG_OPCODE_AND;
5146
5147                         if (opcode & QL_DBG_OPCODE_OR) {
5148                                 read_value |= crbEntry->value_3;
5149                                 opcode &= ~QL_DBG_OPCODE_OR;
5150                         }
5151
5152                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5153                         if (ret)
5154                                 return (0);
5155                 }
5156
5157                 if (opcode & QL_DBG_OPCODE_OR) {
5158
5159                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5160                         if (ret)
5161                                 return (0);
5162
5163                         read_value |= crbEntry->value_3;
5164
5165                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5166                         if (ret)
5167                                 return (0);
5168
5169                         opcode &= ~QL_DBG_OPCODE_OR;
5170                 }
5171
5172                 if (opcode & QL_DBG_OPCODE_POLL) {
5173
5174                         opcode &= ~QL_DBG_OPCODE_POLL;
5175                         timeout = crbEntry->poll_timeout;
5176                         addr = entry_addr;
5177
5178                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5179                         if (ret)
5180                                 return (0);
5181
5182                         while ((read_value & crbEntry->value_2)
5183                                 != crbEntry->value_1) {
5184
5185                                 if (timeout) {
5186                                         qla_mdelay(__func__, 1);
5187                                         timeout--;
5188                                 } else
5189                                         break;
5190
5191                                 ret = ql_rdwr_indreg32(ha, addr,
5192                                                 &read_value, 1);
5193                                 if (ret)
5194                                         return (0);
5195                         }
5196
5197                         if (!timeout) {
5198                                 /*
5199                                  * Report timeout error.
5200                                  * core dump capture failed
5201                                  * Skip remaining entries.
5202                                  * Write buffer out to file
5203                                  * Use driver specific fields in template header
5204                                  * to report this error.
5205                                  */
5206                                 return (-1);
5207                         }
5208                 }
5209
5210                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5211                         /*
5212                          * decide which address to use.
5213                          */
5214                         if (crbEntry->state_index_a) {
5215                                 addr = template_hdr->saved_state_array[
5216                                                 crbEntry-> state_index_a];
5217                         } else {
5218                                 addr = entry_addr;
5219                         }
5220
5221                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5222                         if (ret)
5223                                 return (0);
5224
5225                         template_hdr->saved_state_array[crbEntry->state_index_v]
5226                                         = read_value;
5227                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5228                 }
5229
5230                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5231                         /*
5232                          * decide which value to use.
5233                          */
5234                         if (crbEntry->state_index_v) {
5235                                 read_value = template_hdr->saved_state_array[
5236                                                 crbEntry->state_index_v];
5237                         } else {
5238                                 read_value = crbEntry->value_1;
5239                         }
5240                         /*
5241                          * decide which address to use.
5242                          */
5243                         if (crbEntry->state_index_a) {
5244                                 addr = template_hdr->saved_state_array[
5245                                                 crbEntry-> state_index_a];
5246                         } else {
5247                                 addr = entry_addr;
5248                         }
5249
5250                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5251                         if (ret)
5252                                 return (0);
5253
5254                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5255                 }
5256
5257                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5258                         /*  Read value from saved state using index */
5259                         read_value = template_hdr->saved_state_array[
5260                                                 crbEntry->state_index_v];
5261
5262                         read_value <<= crbEntry->shl; /*Shift left operation */
5263                         read_value >>= crbEntry->shr; /*Shift right operation */
5264
5265                         if (crbEntry->value_2) {
5266                                 /* check if AND mask is provided */
5267                                 read_value &= crbEntry->value_2;
5268                         }
5269
5270                         read_value |= crbEntry->value_3; /* OR operation */
5271                         read_value += crbEntry->value_1; /* increment op */
5272
5273                         /* Write value back to state area. */
5274
5275                         template_hdr->saved_state_array[crbEntry->state_index_v]
5276                                         = read_value;
5277                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5278                 }
5279
5280                 entry_addr += crbEntry->addr_stride;
5281         }
5282
5283         return (0);
5284 }
5285
5286 /*
5287  * Handling rd poll entry.
5288  */
5289
5290 static uint32_t 
5291 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5292         uint32_t *data_buff)
5293 {
5294         int ret;
5295         int loop_cnt;
5296         uint32_t op_count, select_addr, select_value_stride, select_value;
5297         uint32_t read_addr, poll, mask, data_size, data;
5298         uint32_t wait_count = 0;
5299
5300         select_addr            = entry->select_addr;
5301         read_addr              = entry->read_addr;
5302         select_value           = entry->select_value;
5303         select_value_stride    = entry->select_value_stride;
5304         op_count               = entry->op_count;
5305         poll                   = entry->poll;
5306         mask                   = entry->mask;
5307         data_size              = entry->data_size;
5308
5309         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5310
5311                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5312                 if (ret)
5313                         return (0);
5314
5315                 wait_count = 0;
5316
5317                 while (wait_count < poll) {
5318
5319                         uint32_t temp;
5320
5321                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5322                         if (ret)
5323                                 return (0);
5324
5325                         if ( (temp & mask) != 0 ) {
5326                                 break;
5327                         }
5328                         wait_count++;
5329                 }
5330
5331                 if (wait_count == poll) {
5332                         device_printf(ha->pci_dev,
5333                                 "%s: Error in processing entry\n", __func__);
5334                         device_printf(ha->pci_dev,
5335                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5336                                 __func__, wait_count, poll);
5337                         return 0;
5338                 }
5339
5340                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5341                 if (ret)
5342                         return (0);
5343
5344                 *data_buff++ = select_value;
5345                 *data_buff++ = data;
5346                 select_value = select_value + select_value_stride;
5347         }
5348
5349         /*
5350          * for testing purpose we return amount of data written
5351          */
5352         return (loop_cnt * (2 * sizeof(uint32_t)));
5353 }
5354
5355
5356 /*
5357  * Handling rd modify write poll entry.
5358  */
5359
5360 static uint32_t 
5361 ql_pollrd_modify_write(qla_host_t *ha,
5362         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5363         uint32_t *data_buff)
5364 {
5365         int ret;
5366         uint32_t addr_1, addr_2, value_1, value_2, data;
5367         uint32_t poll, mask, data_size, modify_mask;
5368         uint32_t wait_count = 0;
5369
5370         addr_1          = entry->addr_1;
5371         addr_2          = entry->addr_2;
5372         value_1         = entry->value_1;
5373         value_2         = entry->value_2;
5374
5375         poll            = entry->poll;
5376         mask            = entry->mask;
5377         modify_mask     = entry->modify_mask;
5378         data_size       = entry->data_size;
5379
5380
5381         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5382         if (ret)
5383                 return (0);
5384
5385         wait_count = 0;
5386         while (wait_count < poll) {
5387
5388                 uint32_t temp;
5389
5390                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5391                 if (ret)
5392                         return (0);
5393
5394                 if ( (temp & mask) != 0 ) {
5395                         break;
5396                 }
5397                 wait_count++;
5398         }
5399
5400         if (wait_count == poll) {
5401                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5402                         __func__);
5403         } else {
5404
5405                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5406                 if (ret)
5407                         return (0);
5408
5409                 data = (data & modify_mask);
5410
5411                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5412                 if (ret)
5413                         return (0);
5414
5415                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5416                 if (ret)
5417                         return (0);
5418
5419                 /* Poll again */
5420                 wait_count = 0;
5421                 while (wait_count < poll) {
5422
5423                         uint32_t temp;
5424
5425                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5426                         if (ret)
5427                                 return (0);
5428
5429                         if ( (temp & mask) != 0 ) {
5430                                 break;
5431                         }
5432                         wait_count++;
5433                 }
5434                 *data_buff++ = addr_2;
5435                 *data_buff++ = data;
5436         }
5437
5438         /*
5439          * for testing purpose we return amount of data written
5440          */
5441         return (2 * sizeof(uint32_t));
5442 }
5443
5444