]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgbe/ql_hw.c
Import Intel Processor Trace decoder library from
[FreeBSD/FreeBSD.git] / sys / dev / qlxgbe / ql_hw.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  * File: ql_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "ql_os.h"
40 #include "ql_hw.h"
41 #include "ql_def.h"
42 #include "ql_inline.h"
43 #include "ql_ver.h"
44 #include "ql_glbl.h"
45 #include "ql_dbg.h"
46 #include "ql_minidump.h"
47
48 /*
49  * Static Functions
50  */
51
52 static void qla_del_rcv_cntxt(qla_host_t *ha);
53 static int qla_init_rcv_cntxt(qla_host_t *ha);
54 static int qla_del_xmt_cntxt(qla_host_t *ha);
55 static int qla_init_xmt_cntxt(qla_host_t *ha);
56 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
57         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
58 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
59         uint32_t num_intrs, uint32_t create);
60 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
61 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62         int tenable, int rcv);
63 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
64 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65
66 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67                 uint8_t *hdr);
68 static int qla_hw_add_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
78
79 static void ql_minidump_free(qla_host_t *ha);
80
81 #ifdef QL_DBG
82
83 static void
84 qla_stop_pegs(qla_host_t *ha)
85 {
86         uint32_t val = 1;
87
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
92         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
93         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
94 }
95
96 static int
97 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
98 {
99         int err, ret = 0;
100         qla_host_t *ha;
101         
102         err = sysctl_handle_int(oidp, &ret, 0, req);
103
104
105         if (err || !req->newptr)
106                 return (err);
107
108         if (ret == 1) {
109                 ha = (qla_host_t *)arg1;
110                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
111                         qla_stop_pegs(ha);      
112                         QLA_UNLOCK(ha, __func__);
113                 }
114         }
115
116         return err;
117 }
118 #endif /* #ifdef QL_DBG */
119
120 static int
121 qla_validate_set_port_cfg_bit(uint32_t bits)
122 {
123         if ((bits & 0xF) > 1)
124                 return (-1);
125
126         if (((bits >> 4) & 0xF) > 2)
127                 return (-1);
128
129         if (((bits >> 8) & 0xF) > 2)
130                 return (-1);
131
132         return (0);
133 }
134
135 static int
136 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
137 {
138         int err, ret = 0;
139         qla_host_t *ha;
140         uint32_t cfg_bits;
141
142         err = sysctl_handle_int(oidp, &ret, 0, req);
143
144         if (err || !req->newptr)
145                 return (err);
146
147         ha = (qla_host_t *)arg1;
148
149         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
150
151                 err = qla_get_port_config(ha, &cfg_bits);
152
153                 if (err)
154                         goto qla_sysctl_set_port_cfg_exit;
155
156                 if (ret & 0x1) {
157                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 } else {
159                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
160                 }
161
162                 ret = ret >> 4;
163                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
164
165                 if ((ret & 0xF) == 0) {
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
167                 } else if ((ret & 0xF) == 1){
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
169                 } else {
170                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
171                 }
172
173                 ret = ret >> 4;
174                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
175
176                 if (ret == 0) {
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
178                 } else if (ret == 1){
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
180                 } else {
181                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
182                 }
183
184                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
185                         err = qla_set_port_config(ha, cfg_bits);
186                         QLA_UNLOCK(ha, __func__);
187                 } else {
188                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
189                 }
190         } else {
191                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
192                         err = qla_get_port_config(ha, &cfg_bits);
193                         QLA_UNLOCK(ha, __func__);
194                 } else {
195                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
196                 }
197         }
198
199 qla_sysctl_set_port_cfg_exit:
200         return err;
201 }
202
203 static int
204 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
205 {
206         int err, ret = 0;
207         qla_host_t *ha;
208
209         err = sysctl_handle_int(oidp, &ret, 0, req);
210
211         if (err || !req->newptr)
212                 return (err);
213
214         ha = (qla_host_t *)arg1;
215
216         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
217                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
218
219                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
220                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
221                         QLA_UNLOCK(ha, __func__);
222                 } else {
223                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
224                 }
225
226         } else {
227                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
228         }
229
230         return (err);
231 }
232
233 static int
234 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
235 {
236         int err, ret = 0;
237         qla_host_t *ha;
238
239         err = sysctl_handle_int(oidp, &ret, 0, req);
240
241         if (err || !req->newptr)
242                 return (err);
243
244         ha = (qla_host_t *)arg1;
245         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
246                 err = qla_get_cam_search_mode(ha);
247                 QLA_UNLOCK(ha, __func__);
248         } else {
249                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
250         }
251
252         return (err);
253 }
254
255 static void
256 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
257 {
258         struct sysctl_ctx_list  *ctx;
259         struct sysctl_oid_list  *children;
260         struct sysctl_oid       *ctx_oid;
261
262         ctx = device_get_sysctl_ctx(ha->pci_dev);
263         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
264
265         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
266                         CTLFLAG_RD, NULL, "stats_hw_mac");
267         children = SYSCTL_CHILDREN(ctx_oid);
268
269         SYSCTL_ADD_QUAD(ctx, children,
270                 OID_AUTO, "xmt_frames",
271                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
272                 "xmt_frames");
273
274         SYSCTL_ADD_QUAD(ctx, children,
275                 OID_AUTO, "xmt_bytes",
276                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
277                 "xmt_frames");
278
279         SYSCTL_ADD_QUAD(ctx, children,
280                 OID_AUTO, "xmt_mcast_pkts",
281                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
282                 "xmt_mcast_pkts");
283
284         SYSCTL_ADD_QUAD(ctx, children,
285                 OID_AUTO, "xmt_bcast_pkts",
286                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
287                 "xmt_bcast_pkts");
288
289         SYSCTL_ADD_QUAD(ctx, children,
290                 OID_AUTO, "xmt_pause_frames",
291                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
292                 "xmt_pause_frames");
293
294         SYSCTL_ADD_QUAD(ctx, children,
295                 OID_AUTO, "xmt_cntrl_pkts",
296                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
297                 "xmt_cntrl_pkts");
298
299         SYSCTL_ADD_QUAD(ctx, children,
300                 OID_AUTO, "xmt_pkt_lt_64bytes",
301                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
302                 "xmt_pkt_lt_64bytes");
303
304         SYSCTL_ADD_QUAD(ctx, children,
305                 OID_AUTO, "xmt_pkt_lt_127bytes",
306                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
307                 "xmt_pkt_lt_127bytes");
308
309         SYSCTL_ADD_QUAD(ctx, children,
310                 OID_AUTO, "xmt_pkt_lt_255bytes",
311                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
312                 "xmt_pkt_lt_255bytes");
313
314         SYSCTL_ADD_QUAD(ctx, children,
315                 OID_AUTO, "xmt_pkt_lt_511bytes",
316                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
317                 "xmt_pkt_lt_511bytes");
318
319         SYSCTL_ADD_QUAD(ctx, children,
320                 OID_AUTO, "xmt_pkt_lt_1023bytes",
321                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
322                 "xmt_pkt_lt_1023bytes");
323
324         SYSCTL_ADD_QUAD(ctx, children,
325                 OID_AUTO, "xmt_pkt_lt_1518bytes",
326                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
327                 "xmt_pkt_lt_1518bytes");
328
329         SYSCTL_ADD_QUAD(ctx, children,
330                 OID_AUTO, "xmt_pkt_gt_1518bytes",
331                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
332                 "xmt_pkt_gt_1518bytes");
333
334         SYSCTL_ADD_QUAD(ctx, children,
335                 OID_AUTO, "rcv_frames",
336                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
337                 "rcv_frames");
338
339         SYSCTL_ADD_QUAD(ctx, children,
340                 OID_AUTO, "rcv_bytes",
341                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
342                 "rcv_bytes");
343
344         SYSCTL_ADD_QUAD(ctx, children,
345                 OID_AUTO, "rcv_mcast_pkts",
346                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
347                 "rcv_mcast_pkts");
348
349         SYSCTL_ADD_QUAD(ctx, children,
350                 OID_AUTO, "rcv_bcast_pkts",
351                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
352                 "rcv_bcast_pkts");
353
354         SYSCTL_ADD_QUAD(ctx, children,
355                 OID_AUTO, "rcv_pause_frames",
356                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
357                 "rcv_pause_frames");
358
359         SYSCTL_ADD_QUAD(ctx, children,
360                 OID_AUTO, "rcv_cntrl_pkts",
361                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
362                 "rcv_cntrl_pkts");
363
364         SYSCTL_ADD_QUAD(ctx, children,
365                 OID_AUTO, "rcv_pkt_lt_64bytes",
366                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
367                 "rcv_pkt_lt_64bytes");
368
369         SYSCTL_ADD_QUAD(ctx, children,
370                 OID_AUTO, "rcv_pkt_lt_127bytes",
371                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
372                 "rcv_pkt_lt_127bytes");
373
374         SYSCTL_ADD_QUAD(ctx, children,
375                 OID_AUTO, "rcv_pkt_lt_255bytes",
376                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
377                 "rcv_pkt_lt_255bytes");
378
379         SYSCTL_ADD_QUAD(ctx, children,
380                 OID_AUTO, "rcv_pkt_lt_511bytes",
381                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
382                 "rcv_pkt_lt_511bytes");
383
384         SYSCTL_ADD_QUAD(ctx, children,
385                 OID_AUTO, "rcv_pkt_lt_1023bytes",
386                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
387                 "rcv_pkt_lt_1023bytes");
388
389         SYSCTL_ADD_QUAD(ctx, children,
390                 OID_AUTO, "rcv_pkt_lt_1518bytes",
391                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
392                 "rcv_pkt_lt_1518bytes");
393
394         SYSCTL_ADD_QUAD(ctx, children,
395                 OID_AUTO, "rcv_pkt_gt_1518bytes",
396                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
397                 "rcv_pkt_gt_1518bytes");
398
399         SYSCTL_ADD_QUAD(ctx, children,
400                 OID_AUTO, "rcv_len_error",
401                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
402                 "rcv_len_error");
403
404         SYSCTL_ADD_QUAD(ctx, children,
405                 OID_AUTO, "rcv_len_small",
406                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
407                 "rcv_len_small");
408
409         SYSCTL_ADD_QUAD(ctx, children,
410                 OID_AUTO, "rcv_len_large",
411                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
412                 "rcv_len_large");
413
414         SYSCTL_ADD_QUAD(ctx, children,
415                 OID_AUTO, "rcv_jabber",
416                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
417                 "rcv_jabber");
418
419         SYSCTL_ADD_QUAD(ctx, children,
420                 OID_AUTO, "rcv_dropped",
421                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
422                 "rcv_dropped");
423
424         SYSCTL_ADD_QUAD(ctx, children,
425                 OID_AUTO, "fcs_error",
426                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
427                 "fcs_error");
428
429         SYSCTL_ADD_QUAD(ctx, children,
430                 OID_AUTO, "align_error",
431                 CTLFLAG_RD, &ha->hw.mac.align_error,
432                 "align_error");
433
434         SYSCTL_ADD_QUAD(ctx, children,
435                 OID_AUTO, "eswitched_frames",
436                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
437                 "eswitched_frames");
438
439         SYSCTL_ADD_QUAD(ctx, children,
440                 OID_AUTO, "eswitched_bytes",
441                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
442                 "eswitched_bytes");
443
444         SYSCTL_ADD_QUAD(ctx, children,
445                 OID_AUTO, "eswitched_mcast_frames",
446                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
447                 "eswitched_mcast_frames");
448
449         SYSCTL_ADD_QUAD(ctx, children,
450                 OID_AUTO, "eswitched_bcast_frames",
451                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
452                 "eswitched_bcast_frames");
453
454         SYSCTL_ADD_QUAD(ctx, children,
455                 OID_AUTO, "eswitched_ucast_frames",
456                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
457                 "eswitched_ucast_frames");
458
459         SYSCTL_ADD_QUAD(ctx, children,
460                 OID_AUTO, "eswitched_err_free_frames",
461                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
462                 "eswitched_err_free_frames");
463
464         SYSCTL_ADD_QUAD(ctx, children,
465                 OID_AUTO, "eswitched_err_free_bytes",
466                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
467                 "eswitched_err_free_bytes");
468
469         return;
470 }
471
472 static void
473 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
474 {
475         struct sysctl_ctx_list  *ctx;
476         struct sysctl_oid_list  *children;
477         struct sysctl_oid       *ctx_oid;
478
479         ctx = device_get_sysctl_ctx(ha->pci_dev);
480         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
481
482         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
483                         CTLFLAG_RD, NULL, "stats_hw_rcv");
484         children = SYSCTL_CHILDREN(ctx_oid);
485
486         SYSCTL_ADD_QUAD(ctx, children,
487                 OID_AUTO, "total_bytes",
488                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
489                 "total_bytes");
490
491         SYSCTL_ADD_QUAD(ctx, children,
492                 OID_AUTO, "total_pkts",
493                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
494                 "total_pkts");
495
496         SYSCTL_ADD_QUAD(ctx, children,
497                 OID_AUTO, "lro_pkt_count",
498                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
499                 "lro_pkt_count");
500
501         SYSCTL_ADD_QUAD(ctx, children,
502                 OID_AUTO, "sw_pkt_count",
503                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
504                 "sw_pkt_count");
505
506         SYSCTL_ADD_QUAD(ctx, children,
507                 OID_AUTO, "ip_chksum_err",
508                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
509                 "ip_chksum_err");
510
511         SYSCTL_ADD_QUAD(ctx, children,
512                 OID_AUTO, "pkts_wo_acntxts",
513                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
514                 "pkts_wo_acntxts");
515
516         SYSCTL_ADD_QUAD(ctx, children,
517                 OID_AUTO, "pkts_dropped_no_sds_card",
518                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
519                 "pkts_dropped_no_sds_card");
520
521         SYSCTL_ADD_QUAD(ctx, children,
522                 OID_AUTO, "pkts_dropped_no_sds_host",
523                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
524                 "pkts_dropped_no_sds_host");
525
526         SYSCTL_ADD_QUAD(ctx, children,
527                 OID_AUTO, "oversized_pkts",
528                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
529                 "oversized_pkts");
530
531         SYSCTL_ADD_QUAD(ctx, children,
532                 OID_AUTO, "pkts_dropped_no_rds",
533                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
534                 "pkts_dropped_no_rds");
535
536         SYSCTL_ADD_QUAD(ctx, children,
537                 OID_AUTO, "unxpctd_mcast_pkts",
538                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
539                 "unxpctd_mcast_pkts");
540
541         SYSCTL_ADD_QUAD(ctx, children,
542                 OID_AUTO, "re1_fbq_error",
543                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
544                 "re1_fbq_error");
545
546         SYSCTL_ADD_QUAD(ctx, children,
547                 OID_AUTO, "invalid_mac_addr",
548                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
549                 "invalid_mac_addr");
550
551         SYSCTL_ADD_QUAD(ctx, children,
552                 OID_AUTO, "rds_prime_trys",
553                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
554                 "rds_prime_trys");
555
556         SYSCTL_ADD_QUAD(ctx, children,
557                 OID_AUTO, "rds_prime_success",
558                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
559                 "rds_prime_success");
560
561         SYSCTL_ADD_QUAD(ctx, children,
562                 OID_AUTO, "lro_flows_added",
563                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
564                 "lro_flows_added");
565
566         SYSCTL_ADD_QUAD(ctx, children,
567                 OID_AUTO, "lro_flows_deleted",
568                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
569                 "lro_flows_deleted");
570
571         SYSCTL_ADD_QUAD(ctx, children,
572                 OID_AUTO, "lro_flows_active",
573                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
574                 "lro_flows_active");
575
576         SYSCTL_ADD_QUAD(ctx, children,
577                 OID_AUTO, "pkts_droped_unknown",
578                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
579                 "pkts_droped_unknown");
580
581         SYSCTL_ADD_QUAD(ctx, children,
582                 OID_AUTO, "pkts_cnt_oversized",
583                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
584                 "pkts_cnt_oversized");
585
586         return;
587 }
588
589 static void
590 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
591 {
592         struct sysctl_ctx_list  *ctx;
593         struct sysctl_oid_list  *children;
594         struct sysctl_oid_list  *node_children;
595         struct sysctl_oid       *ctx_oid;
596         int                     i;
597         uint8_t                 name_str[16];
598
599         ctx = device_get_sysctl_ctx(ha->pci_dev);
600         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
601
602         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
603                         CTLFLAG_RD, NULL, "stats_hw_xmt");
604         children = SYSCTL_CHILDREN(ctx_oid);
605
606         for (i = 0; i < ha->hw.num_tx_rings; i++) {
607
608                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
609                 snprintf(name_str, sizeof(name_str), "%d", i);
610
611                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
612                         CTLFLAG_RD, NULL, name_str);
613                 node_children = SYSCTL_CHILDREN(ctx_oid);
614
615                 /* Tx Related */
616
617                 SYSCTL_ADD_QUAD(ctx, node_children,
618                         OID_AUTO, "total_bytes",
619                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
620                         "total_bytes");
621
622                 SYSCTL_ADD_QUAD(ctx, node_children,
623                         OID_AUTO, "total_pkts",
624                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
625                         "total_pkts");
626
627                 SYSCTL_ADD_QUAD(ctx, node_children,
628                         OID_AUTO, "errors",
629                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
630                         "errors");
631
632                 SYSCTL_ADD_QUAD(ctx, node_children,
633                         OID_AUTO, "pkts_dropped",
634                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
635                         "pkts_dropped");
636
637                 SYSCTL_ADD_QUAD(ctx, node_children,
638                         OID_AUTO, "switch_pkts",
639                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
640                         "switch_pkts");
641
642                 SYSCTL_ADD_QUAD(ctx, node_children,
643                         OID_AUTO, "num_buffers",
644                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
645                         "num_buffers");
646         }
647
648         return;
649 }
650
651 static void
652 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
653 {
654         struct sysctl_ctx_list  *ctx;
655         struct sysctl_oid_list  *node_children;
656
657         ctx = device_get_sysctl_ctx(ha->pci_dev);
658         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
659
660         SYSCTL_ADD_QUAD(ctx, node_children,
661                 OID_AUTO, "mbx_completion_time_lt_200ms",
662                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
663                 "mbx_completion_time_lt_200ms");
664
665         SYSCTL_ADD_QUAD(ctx, node_children,
666                 OID_AUTO, "mbx_completion_time_200ms_400ms",
667                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
668                 "mbx_completion_time_200ms_400ms");
669
670         SYSCTL_ADD_QUAD(ctx, node_children,
671                 OID_AUTO, "mbx_completion_time_400ms_600ms",
672                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
673                 "mbx_completion_time_400ms_600ms");
674
675         SYSCTL_ADD_QUAD(ctx, node_children,
676                 OID_AUTO, "mbx_completion_time_600ms_800ms",
677                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
678                 "mbx_completion_time_600ms_800ms");
679
680         SYSCTL_ADD_QUAD(ctx, node_children,
681                 OID_AUTO, "mbx_completion_time_800ms_1000ms",
682                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
683                 "mbx_completion_time_800ms_1000ms");
684
685         SYSCTL_ADD_QUAD(ctx, node_children,
686                 OID_AUTO, "mbx_completion_time_1000ms_1200ms",
687                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
688                 "mbx_completion_time_1000ms_1200ms");
689
690         SYSCTL_ADD_QUAD(ctx, node_children,
691                 OID_AUTO, "mbx_completion_time_1200ms_1400ms",
692                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
693                 "mbx_completion_time_1200ms_1400ms");
694
695         SYSCTL_ADD_QUAD(ctx, node_children,
696                 OID_AUTO, "mbx_completion_time_1400ms_1600ms",
697                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
698                 "mbx_completion_time_1400ms_1600ms");
699
700         SYSCTL_ADD_QUAD(ctx, node_children,
701                 OID_AUTO, "mbx_completion_time_1600ms_1800ms",
702                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
703                 "mbx_completion_time_1600ms_1800ms");
704
705         SYSCTL_ADD_QUAD(ctx, node_children,
706                 OID_AUTO, "mbx_completion_time_1800ms_2000ms",
707                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
708                 "mbx_completion_time_1800ms_2000ms");
709
710         SYSCTL_ADD_QUAD(ctx, node_children,
711                 OID_AUTO, "mbx_completion_time_2000ms_2200ms",
712                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
713                 "mbx_completion_time_2000ms_2200ms");
714
715         SYSCTL_ADD_QUAD(ctx, node_children,
716                 OID_AUTO, "mbx_completion_time_2200ms_2400ms",
717                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
718                 "mbx_completion_time_2200ms_2400ms");
719
720         SYSCTL_ADD_QUAD(ctx, node_children,
721                 OID_AUTO, "mbx_completion_time_2400ms_2600ms",
722                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
723                 "mbx_completion_time_2400ms_2600ms");
724
725         SYSCTL_ADD_QUAD(ctx, node_children,
726                 OID_AUTO, "mbx_completion_time_2600ms_2800ms",
727                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
728                 "mbx_completion_time_2600ms_2800ms");
729
730         SYSCTL_ADD_QUAD(ctx, node_children,
731                 OID_AUTO, "mbx_completion_time_2800ms_3000ms",
732                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
733                 "mbx_completion_time_2800ms_3000ms");
734
735         SYSCTL_ADD_QUAD(ctx, node_children,
736                 OID_AUTO, "mbx_completion_time_3000ms_4000ms",
737                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
738                 "mbx_completion_time_3000ms_4000ms");
739
740         SYSCTL_ADD_QUAD(ctx, node_children,
741                 OID_AUTO, "mbx_completion_time_4000ms_5000ms",
742                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
743                 "mbx_completion_time_4000ms_5000ms");
744
745         SYSCTL_ADD_QUAD(ctx, node_children,
746                 OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
747                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
748                 "mbx_completion_host_mbx_cntrl_timeout");
749
750         SYSCTL_ADD_QUAD(ctx, node_children,
751                 OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
752                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
753                 "mbx_completion_fw_mbx_cntrl_timeout");
754         return;
755 }
756
757 static void
758 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
759 {
760         qlnx_add_hw_mac_stats_sysctls(ha);
761         qlnx_add_hw_rcv_stats_sysctls(ha);
762         qlnx_add_hw_xmt_stats_sysctls(ha);
763         qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
764
765         return;
766 }
767
768 static void
769 qlnx_add_drvr_sds_stats(qla_host_t *ha)
770 {
771         struct sysctl_ctx_list  *ctx;
772         struct sysctl_oid_list  *children;
773         struct sysctl_oid_list  *node_children;
774         struct sysctl_oid       *ctx_oid;
775         int                     i;
776         uint8_t                 name_str[16];
777
778         ctx = device_get_sysctl_ctx(ha->pci_dev);
779         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
780
781         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
782                         CTLFLAG_RD, NULL, "stats_drvr_sds");
783         children = SYSCTL_CHILDREN(ctx_oid);
784
785         for (i = 0; i < ha->hw.num_sds_rings; i++) {
786
787                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
788                 snprintf(name_str, sizeof(name_str), "%d", i);
789
790                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
791                         CTLFLAG_RD, NULL, name_str);
792                 node_children = SYSCTL_CHILDREN(ctx_oid);
793
794                 SYSCTL_ADD_QUAD(ctx, node_children,
795                         OID_AUTO, "intr_count",
796                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
797                         "intr_count");
798
799                 SYSCTL_ADD_UINT(ctx, node_children,
800                         OID_AUTO, "rx_free",
801                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
802                         ha->hw.sds[i].rx_free, "rx_free");
803         }
804
805         return;
806 }
807 static void
808 qlnx_add_drvr_rds_stats(qla_host_t *ha)
809 {
810         struct sysctl_ctx_list  *ctx;
811         struct sysctl_oid_list  *children;
812         struct sysctl_oid_list  *node_children;
813         struct sysctl_oid       *ctx_oid;
814         int                     i;
815         uint8_t                 name_str[16];
816
817         ctx = device_get_sysctl_ctx(ha->pci_dev);
818         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
819
820         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
821                         CTLFLAG_RD, NULL, "stats_drvr_rds");
822         children = SYSCTL_CHILDREN(ctx_oid);
823
824         for (i = 0; i < ha->hw.num_rds_rings; i++) {
825
826                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
827                 snprintf(name_str, sizeof(name_str), "%d", i);
828
829                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
830                         CTLFLAG_RD, NULL, name_str);
831                 node_children = SYSCTL_CHILDREN(ctx_oid);
832
833                 SYSCTL_ADD_QUAD(ctx, node_children,
834                         OID_AUTO, "count",
835                         CTLFLAG_RD, &ha->hw.rds[i].count,
836                         "count");
837
838                 SYSCTL_ADD_QUAD(ctx, node_children,
839                         OID_AUTO, "lro_pkt_count",
840                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
841                         "lro_pkt_count");
842
843                 SYSCTL_ADD_QUAD(ctx, node_children,
844                         OID_AUTO, "lro_bytes",
845                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
846                         "lro_bytes");
847         }
848
849         return;
850 }
851
852 static void
853 qlnx_add_drvr_tx_stats(qla_host_t *ha)
854 {
855         struct sysctl_ctx_list  *ctx;
856         struct sysctl_oid_list  *children;
857         struct sysctl_oid_list  *node_children;
858         struct sysctl_oid       *ctx_oid;
859         int                     i;
860         uint8_t                 name_str[16];
861
862         ctx = device_get_sysctl_ctx(ha->pci_dev);
863         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
864
865         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
866                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
867         children = SYSCTL_CHILDREN(ctx_oid);
868
869         for (i = 0; i < ha->hw.num_tx_rings; i++) {
870
871                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
872                 snprintf(name_str, sizeof(name_str), "%d", i);
873
874                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
875                         CTLFLAG_RD, NULL, name_str);
876                 node_children = SYSCTL_CHILDREN(ctx_oid);
877
878                 SYSCTL_ADD_QUAD(ctx, node_children,
879                         OID_AUTO, "count",
880                         CTLFLAG_RD, &ha->tx_ring[i].count,
881                         "count");
882
883 #ifdef QL_ENABLE_ISCSI_TLV
884                 SYSCTL_ADD_QUAD(ctx, node_children,
885                         OID_AUTO, "iscsi_pkt_count",
886                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
887                         "iscsi_pkt_count");
888 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
889         }
890
891         return;
892 }
893
894 static void
895 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
896 {
897         qlnx_add_drvr_sds_stats(ha);
898         qlnx_add_drvr_rds_stats(ha);
899         qlnx_add_drvr_tx_stats(ha);
900         return;
901 }
902
903 /*
904  * Name: ql_hw_add_sysctls
905  * Function: Add P3Plus specific sysctls
906  */
907 void
908 ql_hw_add_sysctls(qla_host_t *ha)
909 {
910         device_t        dev;
911
912         dev = ha->pci_dev;
913
914         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
915                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
916                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
917                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
918
919         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
920                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
921                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
922                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
923
924         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
925                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
926                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
927                 ha->hw.num_tx_rings, "Number of Transmit Rings");
928
929         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
932                 ha->txr_idx, "Tx Ring Used");
933
934         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
935                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
936                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
937                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
938
939         ha->hw.sds_cidx_thres = 32;
940         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
941                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
942                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
943                 ha->hw.sds_cidx_thres,
944                 "Number of SDS entries to process before updating"
945                 " SDS Ring Consumer Index");
946
947         ha->hw.rds_pidx_thres = 32;
948         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
949                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
950                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
951                 ha->hw.rds_pidx_thres,
952                 "Number of Rcv Rings Entries to post before updating"
953                 " RDS Ring Producer Index");
954
955         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
956         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
957                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
958                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
959                 &ha->hw.rcv_intr_coalesce,
960                 ha->hw.rcv_intr_coalesce,
961                 "Rcv Intr Coalescing Parameters\n"
962                 "\tbits 15:0 max packets\n"
963                 "\tbits 31:16 max micro-seconds to wait\n"
964                 "\tplease run\n"
965                 "\tifconfig <if> down && ifconfig <if> up\n"
966                 "\tto take effect \n");
967
968         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
969         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
970                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
971                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
972                 &ha->hw.xmt_intr_coalesce,
973                 ha->hw.xmt_intr_coalesce,
974                 "Xmt Intr Coalescing Parameters\n"
975                 "\tbits 15:0 max packets\n"
976                 "\tbits 31:16 max micro-seconds to wait\n"
977                 "\tplease run\n"
978                 "\tifconfig <if> down && ifconfig <if> up\n"
979                 "\tto take effect \n");
980
981         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
982                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
983                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
984                 (void *)ha, 0,
985                 qla_sysctl_port_cfg, "I",
986                         "Set Port Configuration if values below "
987                         "otherwise Get Port Configuration\n"
988                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
989                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
990                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
991                         " 1 = xmt only; 2 = rcv only;\n"
992                 );
993
994         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
995                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
996                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
997                 (void *)ha, 0,
998                 qla_sysctl_set_cam_search_mode, "I",
999                         "Set CAM Search Mode"
1000                         "\t 1 = search mode internal\n"
1001                         "\t 2 = search mode auto\n");
1002
1003         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1004                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1005                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
1006                 (void *)ha, 0,
1007                 qla_sysctl_get_cam_search_mode, "I",
1008                         "Get CAM Search Mode"
1009                         "\t 1 = search mode internal\n"
1010                         "\t 2 = search mode auto\n");
1011
1012         ha->hw.enable_9kb = 1;
1013
1014         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1015                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1016                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1017                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1018
1019         ha->hw.enable_hw_lro = 1;
1020
1021         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1022                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1023                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1024                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1025                 "\t 1 : Hardware LRO if LRO is enabled\n"
1026                 "\t 0 : Software LRO if LRO is enabled\n"
1027                 "\t Any change requires ifconfig down/up to take effect\n"
1028                 "\t Note that LRO may be turned off/on via ifconfig\n");
1029
1030         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1031                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1032                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1033                 ha->hw.sp_log_index, "sp_log_index");
1034
1035         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1036                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1037                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1038                 ha->hw.sp_log_stop, "sp_log_stop");
1039
1040         ha->hw.sp_log_stop_events = 0;
1041
1042         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1043                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1044                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1045                 &ha->hw.sp_log_stop_events,
1046                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1047                 " when OR of the following events occur \n"
1048                 "\t 0x01 : Heart beat Failure\n"
1049                 "\t 0x02 : Temperature Failure\n"
1050                 "\t 0x04 : HW Initialization Failure\n"
1051                 "\t 0x08 : Interface Initialization Failure\n"
1052                 "\t 0x10 : Error Recovery Failure\n");
1053
1054         ha->hw.mdump_active = 0;
1055         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1056                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1057                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1058                 ha->hw.mdump_active,
1059                 "Minidump retrieval is Active");
1060
1061         ha->hw.mdump_done = 0;
1062         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1063                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1064                 OID_AUTO, "mdump_done", CTLFLAG_RW,
1065                 &ha->hw.mdump_done, ha->hw.mdump_done,
1066                 "Minidump has been done and available for retrieval");
1067
1068         ha->hw.mdump_capture_mask = 0xF;
1069         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1070                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1071                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1072                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1073                 "Minidump capture mask");
1074 #ifdef QL_DBG
1075
1076         ha->err_inject = 0;
1077         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1078                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1079                 OID_AUTO, "err_inject",
1080                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1081                 "Error to be injected\n"
1082                 "\t\t\t 0: No Errors\n"
1083                 "\t\t\t 1: rcv: rxb struct invalid\n"
1084                 "\t\t\t 2: rcv: mp == NULL\n"
1085                 "\t\t\t 3: lro: rxb struct invalid\n"
1086                 "\t\t\t 4: lro: mp == NULL\n"
1087                 "\t\t\t 5: rcv: num handles invalid\n"
1088                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1089                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1090                 "\t\t\t 8: mbx: mailbox command failure\n"
1091                 "\t\t\t 9: heartbeat failure\n"
1092                 "\t\t\t A: temperature failure\n"
1093                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
1094
1095         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1096                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1097                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
1098                 (void *)ha, 0,
1099                 qla_sysctl_stop_pegs, "I", "Peg Stop");
1100
1101 #endif /* #ifdef QL_DBG */
1102
1103         ha->hw.user_pri_nic = 0;
1104         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1105                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1106                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1107                 ha->hw.user_pri_nic,
1108                 "VLAN Tag User Priority for Normal Ethernet Packets");
1109
1110         ha->hw.user_pri_iscsi = 4;
1111         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1112                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1113                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1114                 ha->hw.user_pri_iscsi,
1115                 "VLAN Tag User Priority for iSCSI Packets");
1116
1117         qlnx_add_hw_stats_sysctls(ha);
1118         qlnx_add_drvr_stats_sysctls(ha);
1119
1120         return;
1121 }
1122
1123 void
1124 ql_hw_link_status(qla_host_t *ha)
1125 {
1126         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1127
1128         if (ha->hw.link_up) {
1129                 device_printf(ha->pci_dev, "link Up\n");
1130         } else {
1131                 device_printf(ha->pci_dev, "link Down\n");
1132         }
1133
1134         if (ha->hw.fduplex) {
1135                 device_printf(ha->pci_dev, "Full Duplex\n");
1136         } else {
1137                 device_printf(ha->pci_dev, "Half Duplex\n");
1138         }
1139
1140         if (ha->hw.autoneg) {
1141                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1142         } else {
1143                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1144         }
1145
1146         switch (ha->hw.link_speed) {
1147         case 0x710:
1148                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1149                 break;
1150
1151         case 0x3E8:
1152                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1153                 break;
1154
1155         case 0x64:
1156                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1157                 break;
1158
1159         default:
1160                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1161                 break;
1162         }
1163
1164         switch (ha->hw.module_type) {
1165
1166         case 0x01:
1167                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1168                 break;
1169
1170         case 0x02:
1171                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1172                 break;
1173
1174         case 0x03:
1175                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1176                 break;
1177
1178         case 0x04:
1179                 device_printf(ha->pci_dev,
1180                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1181                         ha->hw.cable_length);
1182                 break;
1183
1184         case 0x05:
1185                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1186                         " Limiting Copper(Compliant)[%d m]\n",
1187                         ha->hw.cable_length);
1188                 break;
1189
1190         case 0x06:
1191                 device_printf(ha->pci_dev,
1192                         "Module Type 10GE Passive Copper"
1193                         " (Legacy, Best Effort)[%d m]\n",
1194                         ha->hw.cable_length);
1195                 break;
1196
1197         case 0x07:
1198                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1199                 break;
1200
1201         case 0x08:
1202                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1203                 break;
1204
1205         case 0x09:
1206                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1207                 break;
1208
1209         case 0x0A:
1210                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1211                 break;
1212
1213         case 0x0B:
1214                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1215                         "(Legacy, Best Effort)\n");
1216                 break;
1217
1218         default:
1219                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1220                         ha->hw.module_type);
1221                 break;
1222         }
1223
1224         if (ha->hw.link_faults == 1)
1225                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1226 }
1227
1228 /*
1229  * Name: ql_free_dma
1230  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1231  */
1232 void
1233 ql_free_dma(qla_host_t *ha)
1234 {
1235         uint32_t i;
1236
1237         if (ha->hw.dma_buf.flags.sds_ring) {
1238                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1239                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1240                 }
1241                 ha->hw.dma_buf.flags.sds_ring = 0;
1242         }
1243
1244         if (ha->hw.dma_buf.flags.rds_ring) {
1245                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1246                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1247                 }
1248                 ha->hw.dma_buf.flags.rds_ring = 0;
1249         }
1250
1251         if (ha->hw.dma_buf.flags.tx_ring) {
1252                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1253                 ha->hw.dma_buf.flags.tx_ring = 0;
1254         }
1255         ql_minidump_free(ha);
1256 }
1257
1258 /*
1259  * Name: ql_alloc_dma
1260  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1261  */
1262 int
1263 ql_alloc_dma(qla_host_t *ha)
1264 {
1265         device_t                dev;
1266         uint32_t                i, j, size, tx_ring_size;
1267         qla_hw_t                *hw;
1268         qla_hw_tx_cntxt_t       *tx_cntxt;
1269         uint8_t                 *vaddr;
1270         bus_addr_t              paddr;
1271
1272         dev = ha->pci_dev;
1273
1274         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1275
1276         hw = &ha->hw;
1277         /*
1278          * Allocate Transmit Ring
1279          */
1280         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1281         size = (tx_ring_size * ha->hw.num_tx_rings);
1282
1283         hw->dma_buf.tx_ring.alignment = 8;
1284         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1285         
1286         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1287                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1288                 goto ql_alloc_dma_exit;
1289         }
1290
1291         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1292         paddr = hw->dma_buf.tx_ring.dma_addr;
1293         
1294         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1295                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1296
1297                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1298                 tx_cntxt->tx_ring_paddr = paddr;
1299
1300                 vaddr += tx_ring_size;
1301                 paddr += tx_ring_size;
1302         }
1303
1304         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1305                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1306
1307                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1308                 tx_cntxt->tx_cons_paddr = paddr;
1309
1310                 vaddr += sizeof (uint32_t);
1311                 paddr += sizeof (uint32_t);
1312         }
1313
1314         ha->hw.dma_buf.flags.tx_ring = 1;
1315
1316         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1317                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1318                 hw->dma_buf.tx_ring.dma_b));
1319         /*
1320          * Allocate Receive Descriptor Rings
1321          */
1322
1323         for (i = 0; i < hw->num_rds_rings; i++) {
1324
1325                 hw->dma_buf.rds_ring[i].alignment = 8;
1326                 hw->dma_buf.rds_ring[i].size =
1327                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1328
1329                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1330                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1331                                 __func__, i);
1332
1333                         for (j = 0; j < i; j++)
1334                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1335
1336                         goto ql_alloc_dma_exit;
1337                 }
1338                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1339                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1340                         hw->dma_buf.rds_ring[i].dma_b));
1341         }
1342
1343         hw->dma_buf.flags.rds_ring = 1;
1344
1345         /*
1346          * Allocate Status Descriptor Rings
1347          */
1348
1349         for (i = 0; i < hw->num_sds_rings; i++) {
1350                 hw->dma_buf.sds_ring[i].alignment = 8;
1351                 hw->dma_buf.sds_ring[i].size =
1352                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1353
1354                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1355                         device_printf(dev, "%s: sds ring alloc failed\n",
1356                                 __func__);
1357
1358                         for (j = 0; j < i; j++)
1359                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1360
1361                         goto ql_alloc_dma_exit;
1362                 }
1363                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1364                         __func__, i,
1365                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1366                         hw->dma_buf.sds_ring[i].dma_b));
1367         }
1368         for (i = 0; i < hw->num_sds_rings; i++) {
1369                 hw->sds[i].sds_ring_base =
1370                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1371         }
1372
1373         hw->dma_buf.flags.sds_ring = 1;
1374
1375         return 0;
1376
1377 ql_alloc_dma_exit:
1378         ql_free_dma(ha);
1379         return -1;
1380 }
1381
1382 #define Q8_MBX_MSEC_DELAY       5000
1383
1384 static int
1385 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1386         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1387 {
1388         uint32_t i;
1389         uint32_t data;
1390         int ret = 0;
1391         uint64_t start_usecs;
1392         uint64_t end_usecs;
1393         uint64_t msecs_200;
1394
1395         ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1396
1397         if (ha->offline || ha->qla_initiate_recovery) {
1398                 ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1399                 goto exit_qla_mbx_cmd;
1400         }
1401
1402         if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1403                 (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1404                 !(ha->err_inject & ~0xFFFF))) {
1405                 ret = -3;
1406                 QL_INITIATE_RECOVERY(ha);
1407                 goto exit_qla_mbx_cmd;
1408         }
1409
1410         start_usecs = qla_get_usec_timestamp();
1411
1412         if (no_pause)
1413                 i = 1000;
1414         else
1415                 i = Q8_MBX_MSEC_DELAY;
1416
1417         while (i) {
1418
1419                 if (ha->qla_initiate_recovery) {
1420                         ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1421                         return (-1);
1422                 }
1423
1424                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1425                 if (data == 0)
1426                         break;
1427                 if (no_pause) {
1428                         DELAY(1000);
1429                 } else {
1430                         qla_mdelay(__func__, 1);
1431                 }
1432                 i--;
1433         }
1434
1435         if (i == 0) {
1436                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1437                         __func__, data);
1438                 ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1439                 ret = -1;
1440                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1441                 QL_INITIATE_RECOVERY(ha);
1442                 goto exit_qla_mbx_cmd;
1443         }
1444
1445         for (i = 0; i < n_hmbox; i++) {
1446                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1447                 h_mbox++;
1448         }
1449
1450         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1451
1452
1453         i = Q8_MBX_MSEC_DELAY;
1454         while (i) {
1455
1456                 if (ha->qla_initiate_recovery) {
1457                         ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1458                         return (-1);
1459                 }
1460
1461                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1462
1463                 if ((data & 0x3) == 1) {
1464                         data = READ_REG32(ha, Q8_FW_MBOX0);
1465                         if ((data & 0xF000) != 0x8000)
1466                                 break;
1467                 }
1468                 if (no_pause) {
1469                         DELAY(1000);
1470                 } else {
1471                         qla_mdelay(__func__, 1);
1472                 }
1473                 i--;
1474         }
1475         if (i == 0) {
1476                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1477                         __func__, data);
1478                 ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1479                 ret = -2;
1480                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1481                 QL_INITIATE_RECOVERY(ha);
1482                 goto exit_qla_mbx_cmd;
1483         }
1484
1485         for (i = 0; i < n_fwmbox; i++) {
1486
1487                 if (ha->qla_initiate_recovery) {
1488                         ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1489                         return (-1);
1490                 }
1491
1492                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1493         }
1494
1495         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1496         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1497
1498         end_usecs = qla_get_usec_timestamp();
1499
1500         if (end_usecs > start_usecs) {
1501                 msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1502
1503                 if (msecs_200 < 15) 
1504                         ha->hw.mbx_comp_msecs[msecs_200]++;
1505                 else if (msecs_200 < 20)
1506                         ha->hw.mbx_comp_msecs[15]++;
1507                 else {
1508                         device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1509                                 start_usecs, end_usecs, msecs_200);
1510                         ha->hw.mbx_comp_msecs[16]++;
1511                 }
1512         }
1513         ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1514
1515
1516 exit_qla_mbx_cmd:
1517         return (ret);
1518 }
1519
1520 int
1521 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1522         uint32_t *num_rcvq)
1523 {
1524         uint32_t *mbox, err;
1525         device_t dev = ha->pci_dev;
1526
1527         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1528
1529         mbox = ha->hw.mbox;
1530
1531         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1532
1533         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1534                 device_printf(dev, "%s: failed0\n", __func__);
1535                 return (-1);
1536         }
1537         err = mbox[0] >> 25; 
1538
1539         if (supports_9kb != NULL) {
1540                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1541                         *supports_9kb = 1;
1542                 else
1543                         *supports_9kb = 0;
1544         }
1545
1546         if (num_rcvq != NULL)
1547                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1548
1549         if ((err != 1) && (err != 0)) {
1550                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1551                 return (-1);
1552         }
1553         return 0;
1554 }
1555
1556 static int
1557 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1558         uint32_t create)
1559 {
1560         uint32_t i, err;
1561         device_t dev = ha->pci_dev;
1562         q80_config_intr_t *c_intr;
1563         q80_config_intr_rsp_t *c_intr_rsp;
1564
1565         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1566         bzero(c_intr, (sizeof (q80_config_intr_t)));
1567
1568         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1569
1570         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1571         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1572
1573         c_intr->nentries = num_intrs;
1574
1575         for (i = 0; i < num_intrs; i++) {
1576                 if (create) {
1577                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1578                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1579                 } else {
1580                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1581                         c_intr->intr[i].msix_index =
1582                                 ha->hw.intr_id[(start_idx + i)];
1583                 }
1584
1585                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1586         }
1587
1588         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1589                 (sizeof (q80_config_intr_t) >> 2),
1590                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1591                 device_printf(dev, "%s: %s failed0\n", __func__,
1592                         (create ? "create" : "delete"));
1593                 return (-1);
1594         }
1595
1596         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1597
1598         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1599
1600         if (err) {
1601                 device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1602                         (create ? "create" : "delete"), err, c_intr_rsp->nentries);
1603
1604                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1605                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1606                                 __func__, i, 
1607                                 c_intr_rsp->intr[i].status,
1608                                 c_intr_rsp->intr[i].intr_id,
1609                                 c_intr_rsp->intr[i].intr_src);
1610                 }
1611
1612                 return (-1);
1613         }
1614
1615         for (i = 0; ((i < num_intrs) && create); i++) {
1616                 if (!c_intr_rsp->intr[i].status) {
1617                         ha->hw.intr_id[(start_idx + i)] =
1618                                 c_intr_rsp->intr[i].intr_id;
1619                         ha->hw.intr_src[(start_idx + i)] =
1620                                 c_intr_rsp->intr[i].intr_src;
1621                 }
1622         }
1623
1624         return (0);
1625 }
1626
1627 /*
1628  * Name: qla_config_rss
1629  * Function: Configure RSS for the context/interface.
1630  */
1631 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1632                         0x8030f20c77cb2da3ULL,
1633                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1634                         0x255b0ec26d5a56daULL };
1635
1636 static int
1637 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1638 {
1639         q80_config_rss_t        *c_rss;
1640         q80_config_rss_rsp_t    *c_rss_rsp;
1641         uint32_t                err, i;
1642         device_t                dev = ha->pci_dev;
1643
1644         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1645         bzero(c_rss, (sizeof (q80_config_rss_t)));
1646
1647         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1648
1649         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1650         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1651
1652         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1653                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1654         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1655         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1656
1657         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1658         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1659
1660         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1661
1662         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1663         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1664
1665         c_rss->cntxt_id = cntxt_id;
1666
1667         for (i = 0; i < 5; i++) {
1668                 c_rss->rss_key[i] = rss_key[i];
1669         }
1670
1671         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1672                 (sizeof (q80_config_rss_t) >> 2),
1673                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1674                 device_printf(dev, "%s: failed0\n", __func__);
1675                 return (-1);
1676         }
1677         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1678
1679         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1680
1681         if (err) {
1682                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1683                 return (-1);
1684         }
1685         return 0;
1686 }
1687
1688 static int
1689 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1690         uint16_t cntxt_id, uint8_t *ind_table)
1691 {
1692         q80_config_rss_ind_table_t      *c_rss_ind;
1693         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1694         uint32_t                        err;
1695         device_t                        dev = ha->pci_dev;
1696
1697         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1698                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1699                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1700                         start_idx, count);
1701                 return (-1);
1702         }
1703
1704         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1705         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1706
1707         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1708         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1709         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1710
1711         c_rss_ind->start_idx = start_idx;
1712         c_rss_ind->end_idx = start_idx + count - 1;
1713         c_rss_ind->cntxt_id = cntxt_id;
1714         bcopy(ind_table, c_rss_ind->ind_table, count);
1715
1716         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1717                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1718                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1719                 device_printf(dev, "%s: failed0\n", __func__);
1720                 return (-1);
1721         }
1722
1723         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1724         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1725
1726         if (err) {
1727                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1728                 return (-1);
1729         }
1730         return 0;
1731 }
1732
1733 /*
1734  * Name: qla_config_intr_coalesce
1735  * Function: Configure Interrupt Coalescing.
1736  */
1737 static int
1738 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1739         int rcv)
1740 {
1741         q80_config_intr_coalesc_t       *intrc;
1742         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1743         uint32_t                        err, i;
1744         device_t                        dev = ha->pci_dev;
1745         
1746         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1747         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1748
1749         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1750         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1751         intrc->count_version |= Q8_MBX_CMD_VERSION;
1752
1753         if (rcv) {
1754                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1755                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1756                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1757         } else {
1758                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1759                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1760                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1761         }
1762
1763         intrc->cntxt_id = cntxt_id;
1764
1765         if (tenable) {
1766                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1767                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1768
1769                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1770                         intrc->sds_ring_mask |= (1 << i);
1771                 }
1772                 intrc->ms_timeout = 1000;
1773         }
1774
1775         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1776                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1777                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1778                 device_printf(dev, "%s: failed0\n", __func__);
1779                 return (-1);
1780         }
1781         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1782
1783         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1784
1785         if (err) {
1786                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1787                 return (-1);
1788         }
1789         
1790         return 0;
1791 }
1792
1793
1794 /*
1795  * Name: qla_config_mac_addr
1796  * Function: binds a MAC address to the context/interface.
1797  *      Can be unicast, multicast or broadcast.
1798  */
1799 static int
1800 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1801         uint32_t num_mac)
1802 {
1803         q80_config_mac_addr_t           *cmac;
1804         q80_config_mac_addr_rsp_t       *cmac_rsp;
1805         uint32_t                        err;
1806         device_t                        dev = ha->pci_dev;
1807         int                             i;
1808         uint8_t                         *mac_cpy = mac_addr;
1809
1810         if (num_mac > Q8_MAX_MAC_ADDRS) {
1811                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1812                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1813                 return (-1);
1814         }
1815
1816         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1817         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1818
1819         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1820         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1821         cmac->count_version |= Q8_MBX_CMD_VERSION;
1822
1823         if (add_mac) 
1824                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1825         else
1826                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1827                 
1828         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1829
1830         cmac->nmac_entries = num_mac;
1831         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1832
1833         for (i = 0; i < num_mac; i++) {
1834                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1835                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1836         }
1837
1838         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1839                 (sizeof (q80_config_mac_addr_t) >> 2),
1840                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1841                 device_printf(dev, "%s: %s failed0\n", __func__,
1842                         (add_mac ? "Add" : "Del"));
1843                 return (-1);
1844         }
1845         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1846
1847         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1848
1849         if (err) {
1850                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1851                         (add_mac ? "Add" : "Del"), err);
1852                 for (i = 0; i < num_mac; i++) {
1853                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1854                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1855                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1856                         mac_cpy += ETHER_ADDR_LEN;
1857                 }
1858                 return (-1);
1859         }
1860         
1861         return 0;
1862 }
1863
1864
1865 /*
1866  * Name: qla_set_mac_rcv_mode
1867  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1868  */
1869 static int
1870 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1871 {
1872         q80_config_mac_rcv_mode_t       *rcv_mode;
1873         uint32_t                        err;
1874         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1875         device_t                        dev = ha->pci_dev;
1876
1877         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1878         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1879
1880         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1881         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1882         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1883
1884         rcv_mode->mode = mode;
1885
1886         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1887
1888         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1889                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1890                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1891                 device_printf(dev, "%s: failed0\n", __func__);
1892                 return (-1);
1893         }
1894         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1895
1896         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1897
1898         if (err) {
1899                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1900                 return (-1);
1901         }
1902         
1903         return 0;
1904 }
1905
1906 int
1907 ql_set_promisc(qla_host_t *ha)
1908 {
1909         int ret;
1910
1911         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1912         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1913         return (ret);
1914 }
1915
1916 void
1917 qla_reset_promisc(qla_host_t *ha)
1918 {
1919         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1920         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1921 }
1922
1923 int
1924 ql_set_allmulti(qla_host_t *ha)
1925 {
1926         int ret;
1927
1928         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1929         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1930         return (ret);
1931 }
1932
1933 void
1934 qla_reset_allmulti(qla_host_t *ha)
1935 {
1936         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1937         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1938 }
1939
1940 /*
1941  * Name: ql_set_max_mtu
1942  * Function:
1943  *      Sets the maximum transfer unit size for the specified rcv context.
1944  */
1945 int
1946 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1947 {
1948         device_t                dev;
1949         q80_set_max_mtu_t       *max_mtu;
1950         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1951         uint32_t                err;
1952
1953         dev = ha->pci_dev;
1954
1955         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1956         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1957
1958         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1959         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1960         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1961
1962         max_mtu->cntxt_id = cntxt_id;
1963         max_mtu->mtu = mtu;
1964
1965         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1966                 (sizeof (q80_set_max_mtu_t) >> 2),
1967                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1968                 device_printf(dev, "%s: failed\n", __func__);
1969                 return -1;
1970         }
1971
1972         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1973
1974         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1975
1976         if (err) {
1977                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1978         }
1979
1980         return 0;
1981 }
1982
1983 static int
1984 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1985 {
1986         device_t                dev;
1987         q80_link_event_t        *lnk;
1988         q80_link_event_rsp_t    *lnk_rsp;
1989         uint32_t                err;
1990
1991         dev = ha->pci_dev;
1992
1993         lnk = (q80_link_event_t *)ha->hw.mbox;
1994         bzero(lnk, (sizeof (q80_link_event_t)));
1995
1996         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1997         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1998         lnk->count_version |= Q8_MBX_CMD_VERSION;
1999
2000         lnk->cntxt_id = cntxt_id;
2001         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
2002
2003         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
2004                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
2005                 device_printf(dev, "%s: failed\n", __func__);
2006                 return -1;
2007         }
2008
2009         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
2010
2011         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
2012
2013         if (err) {
2014                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2015         }
2016
2017         return 0;
2018 }
2019
2020 static int
2021 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2022 {
2023         device_t                dev;
2024         q80_config_fw_lro_t     *fw_lro;
2025         q80_config_fw_lro_rsp_t *fw_lro_rsp;
2026         uint32_t                err;
2027
2028         dev = ha->pci_dev;
2029
2030         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2031         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2032
2033         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2034         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2035         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2036
2037         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2038         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2039
2040         fw_lro->cntxt_id = cntxt_id;
2041
2042         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2043                 (sizeof (q80_config_fw_lro_t) >> 2),
2044                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2045                 device_printf(dev, "%s: failed\n", __func__);
2046                 return -1;
2047         }
2048
2049         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2050
2051         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2052
2053         if (err) {
2054                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2055         }
2056
2057         return 0;
2058 }
2059
2060 static int
2061 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2062 {
2063         device_t                dev;
2064         q80_hw_config_t         *hw_config;
2065         q80_hw_config_rsp_t     *hw_config_rsp;
2066         uint32_t                err;
2067
2068         dev = ha->pci_dev;
2069
2070         hw_config = (q80_hw_config_t *)ha->hw.mbox;
2071         bzero(hw_config, sizeof (q80_hw_config_t));
2072
2073         hw_config->opcode = Q8_MBX_HW_CONFIG;
2074         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2075         hw_config->count_version |= Q8_MBX_CMD_VERSION;
2076
2077         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2078
2079         hw_config->u.set_cam_search_mode.mode = search_mode;
2080
2081         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2082                 (sizeof (q80_hw_config_t) >> 2),
2083                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2084                 device_printf(dev, "%s: failed\n", __func__);
2085                 return -1;
2086         }
2087         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2088
2089         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2090
2091         if (err) {
2092                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2093         }
2094
2095         return 0;
2096 }
2097
2098 static int
2099 qla_get_cam_search_mode(qla_host_t *ha)
2100 {
2101         device_t                dev;
2102         q80_hw_config_t         *hw_config;
2103         q80_hw_config_rsp_t     *hw_config_rsp;
2104         uint32_t                err;
2105
2106         dev = ha->pci_dev;
2107
2108         hw_config = (q80_hw_config_t *)ha->hw.mbox;
2109         bzero(hw_config, sizeof (q80_hw_config_t));
2110
2111         hw_config->opcode = Q8_MBX_HW_CONFIG;
2112         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2113         hw_config->count_version |= Q8_MBX_CMD_VERSION;
2114
2115         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2116
2117         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2118                 (sizeof (q80_hw_config_t) >> 2),
2119                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2120                 device_printf(dev, "%s: failed\n", __func__);
2121                 return -1;
2122         }
2123         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2124
2125         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2126
2127         if (err) {
2128                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2129         } else {
2130                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2131                         hw_config_rsp->u.get_cam_search_mode.mode);
2132         }
2133
2134         return 0;
2135 }
2136
2137 static int
2138 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2139 {
2140         device_t                dev;
2141         q80_get_stats_t         *stat;
2142         q80_get_stats_rsp_t     *stat_rsp;
2143         uint32_t                err;
2144
2145         dev = ha->pci_dev;
2146
2147         stat = (q80_get_stats_t *)ha->hw.mbox;
2148         bzero(stat, (sizeof (q80_get_stats_t)));
2149
2150         stat->opcode = Q8_MBX_GET_STATS;
2151         stat->count_version = 2;
2152         stat->count_version |= Q8_MBX_CMD_VERSION;
2153
2154         stat->cmd = cmd;
2155
2156         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2157                 ha->hw.mbox, (rsp_size >> 2), 0)) {
2158                 device_printf(dev, "%s: failed\n", __func__);
2159                 return -1;
2160         }
2161
2162         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2163
2164         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2165
2166         if (err) {
2167                 return -1;
2168         }
2169
2170         return 0;
2171 }
2172
2173 void
2174 ql_get_stats(qla_host_t *ha)
2175 {
2176         q80_get_stats_rsp_t     *stat_rsp;
2177         q80_mac_stats_t         *mstat;
2178         q80_xmt_stats_t         *xstat;
2179         q80_rcv_stats_t         *rstat;
2180         uint32_t                cmd;
2181         int                     i;
2182         struct ifnet *ifp = ha->ifp;
2183
2184         if (ifp == NULL)
2185                 return;
2186
2187         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2188                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2189                 return;
2190         }
2191
2192         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2193                 QLA_UNLOCK(ha, __func__);
2194                 return;
2195         }
2196
2197         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2198         /*
2199          * Get MAC Statistics
2200          */
2201         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2202 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2203
2204         cmd |= ((ha->pci_func & 0x1) << 16);
2205
2206         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2207                 ha->offline)
2208                 goto ql_get_stats_exit;
2209
2210         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2211                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2212                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2213         } else {
2214                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2215                         __func__, ha->hw.mbox[0]);
2216         }
2217         /*
2218          * Get RCV Statistics
2219          */
2220         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2221 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2222         cmd |= (ha->hw.rcv_cntxt_id << 16);
2223
2224         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2225                 ha->offline)
2226                 goto ql_get_stats_exit;
2227
2228         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2229                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2230                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2231         } else {
2232                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2233                         __func__, ha->hw.mbox[0]);
2234         }
2235
2236         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2237                 ha->offline)
2238                 goto ql_get_stats_exit;
2239         /*
2240          * Get XMT Statistics
2241          */
2242         for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2243                 if (ha->qla_watchdog_pause ||
2244                         (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2245                         ha->offline)
2246                         goto ql_get_stats_exit;
2247
2248                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2249 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2250                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2251
2252                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2253                         == 0) {
2254                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2255                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2256                 } else {
2257                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2258                                 __func__, ha->hw.mbox[0]);
2259                 }
2260         }
2261
2262 ql_get_stats_exit:
2263         QLA_UNLOCK(ha, __func__);
2264
2265         return;
2266 }
2267
2268 /*
2269  * Name: qla_tx_tso
2270  * Function: Checks if the packet to be transmitted is a candidate for
2271  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2272  *      Ring Structure are plugged in.
2273  */
2274 static int
2275 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2276 {
2277         struct ether_vlan_header *eh;
2278         struct ip *ip = NULL;
2279         struct ip6_hdr *ip6 = NULL;
2280         struct tcphdr *th = NULL;
2281         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2282         uint16_t etype, opcode, offload = 1;
2283         device_t dev;
2284
2285         dev = ha->pci_dev;
2286
2287
2288         eh = mtod(mp, struct ether_vlan_header *);
2289
2290         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2291                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2292                 etype = ntohs(eh->evl_proto);
2293         } else {
2294                 ehdrlen = ETHER_HDR_LEN;
2295                 etype = ntohs(eh->evl_encap_proto);
2296         }
2297
2298         hdrlen = 0;
2299
2300         switch (etype) {
2301                 case ETHERTYPE_IP:
2302
2303                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2304                                         sizeof(struct tcphdr);
2305
2306                         if (mp->m_len < tcp_opt_off) {
2307                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2308                                 ip = (struct ip *)(hdr + ehdrlen);
2309                         } else {
2310                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2311                         }
2312
2313                         ip_hlen = ip->ip_hl << 2;
2314                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2315
2316                                 
2317                         if ((ip->ip_p != IPPROTO_TCP) ||
2318                                 (ip_hlen != sizeof (struct ip))){
2319                                 /* IP Options are not supported */
2320
2321                                 offload = 0;
2322                         } else
2323                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2324
2325                 break;
2326
2327                 case ETHERTYPE_IPV6:
2328
2329                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2330                                         sizeof (struct tcphdr);
2331
2332                         if (mp->m_len < tcp_opt_off) {
2333                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2334                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2335                         } else {
2336                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2337                         }
2338
2339                         ip_hlen = sizeof(struct ip6_hdr);
2340                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2341
2342                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2343                                 //device_printf(dev, "%s: ipv6\n", __func__);
2344                                 offload = 0;
2345                         } else
2346                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2347                 break;
2348
2349                 default:
2350                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2351                         offload = 0;
2352                 break;
2353         }
2354
2355         if (!offload)
2356                 return (-1);
2357
2358         tcp_hlen = th->th_off << 2;
2359         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2360
2361         if (mp->m_len < hdrlen) {
2362                 if (mp->m_len < tcp_opt_off) {
2363                         if (tcp_hlen > sizeof(struct tcphdr)) {
2364                                 m_copydata(mp, tcp_opt_off,
2365                                         (tcp_hlen - sizeof(struct tcphdr)),
2366                                         &hdr[tcp_opt_off]);
2367                         }
2368                 } else {
2369                         m_copydata(mp, 0, hdrlen, hdr);
2370                 }
2371         }
2372
2373         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2374
2375         tx_cmd->flags_opcode = opcode ;
2376         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2377         tx_cmd->total_hdr_len = hdrlen;
2378
2379         /* Check for Multicast least significant bit of MSB == 1 */
2380         if (eh->evl_dhost[0] & 0x01) {
2381                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2382         }
2383
2384         if (mp->m_len < hdrlen) {
2385                 printf("%d\n", hdrlen);
2386                 return (1);
2387         }
2388
2389         return (0);
2390 }
2391
2392 /*
2393  * Name: qla_tx_chksum
2394  * Function: Checks if the packet to be transmitted is a candidate for
2395  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2396  *      Ring Structure are plugged in.
2397  */
2398 static int
2399 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2400         uint32_t *tcp_hdr_off)
2401 {
2402         struct ether_vlan_header *eh;
2403         struct ip *ip;
2404         struct ip6_hdr *ip6;
2405         uint32_t ehdrlen, ip_hlen;
2406         uint16_t etype, opcode, offload = 1;
2407         device_t dev;
2408         uint8_t buf[sizeof(struct ip6_hdr)];
2409
2410         dev = ha->pci_dev;
2411
2412         *op_code = 0;
2413
2414         if ((mp->m_pkthdr.csum_flags &
2415                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2416                 return (-1);
2417
2418         eh = mtod(mp, struct ether_vlan_header *);
2419
2420         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2421                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2422                 etype = ntohs(eh->evl_proto);
2423         } else {
2424                 ehdrlen = ETHER_HDR_LEN;
2425                 etype = ntohs(eh->evl_encap_proto);
2426         }
2427
2428                 
2429         switch (etype) {
2430                 case ETHERTYPE_IP:
2431                         ip = (struct ip *)(mp->m_data + ehdrlen);
2432
2433                         ip_hlen = sizeof (struct ip);
2434
2435                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2436                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2437                                 ip = (struct ip *)buf;
2438                         }
2439
2440                         if (ip->ip_p == IPPROTO_TCP)
2441                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2442                         else if (ip->ip_p == IPPROTO_UDP)
2443                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2444                         else {
2445                                 //device_printf(dev, "%s: ipv4\n", __func__);
2446                                 offload = 0;
2447                         }
2448                 break;
2449
2450                 case ETHERTYPE_IPV6:
2451                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2452
2453                         ip_hlen = sizeof(struct ip6_hdr);
2454
2455                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2456                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2457                                         buf);
2458                                 ip6 = (struct ip6_hdr *)buf;
2459                         }
2460
2461                         if (ip6->ip6_nxt == IPPROTO_TCP)
2462                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2463                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2464                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2465                         else {
2466                                 //device_printf(dev, "%s: ipv6\n", __func__);
2467                                 offload = 0;
2468                         }
2469                 break;
2470
2471                 default:
2472                         offload = 0;
2473                 break;
2474         }
2475         if (!offload)
2476                 return (-1);
2477
2478         *op_code = opcode;
2479         *tcp_hdr_off = (ip_hlen + ehdrlen);
2480
2481         return (0);
2482 }
2483
2484 #define QLA_TX_MIN_FREE 2
2485 /*
2486  * Name: ql_hw_send
2487  * Function: Transmits a packet. It first checks if the packet is a
2488  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2489  *      offload. If either of these creteria are not met, it is transmitted
2490  *      as a regular ethernet frame.
2491  */
2492 int
2493 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2494         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2495 {
2496         struct ether_vlan_header *eh;
2497         qla_hw_t *hw = &ha->hw;
2498         q80_tx_cmd_t *tx_cmd, tso_cmd;
2499         bus_dma_segment_t *c_seg;
2500         uint32_t num_tx_cmds, hdr_len = 0;
2501         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2502         device_t dev;
2503         int i, ret;
2504         uint8_t *src = NULL, *dst = NULL;
2505         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2506         uint32_t op_code = 0;
2507         uint32_t tcp_hdr_off = 0;
2508
2509         dev = ha->pci_dev;
2510
2511         /*
2512          * Always make sure there is atleast one empty slot in the tx_ring
2513          * tx_ring is considered full when there only one entry available
2514          */
2515         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2516
2517         total_length = mp->m_pkthdr.len;
2518         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2519                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2520                         __func__, total_length);
2521                 return (EINVAL);
2522         }
2523         eh = mtod(mp, struct ether_vlan_header *);
2524
2525         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2526
2527                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2528
2529                 src = frame_hdr;
2530                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2531
2532                 if (!(ret & ~1)) {
2533                         /* find the additional tx_cmd descriptors required */
2534
2535                         if (mp->m_flags & M_VLANTAG)
2536                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2537
2538                         hdr_len = tso_cmd.total_hdr_len;
2539
2540                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2541                         bytes = QL_MIN(bytes, hdr_len);
2542
2543                         num_tx_cmds++;
2544                         hdr_len -= bytes;
2545
2546                         while (hdr_len) {
2547                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2548                                 hdr_len -= bytes;
2549                                 num_tx_cmds++;
2550                         }
2551                         hdr_len = tso_cmd.total_hdr_len;
2552
2553                         if (ret == 0)
2554                                 src = (uint8_t *)eh;
2555                 } else 
2556                         return (EINVAL);
2557         } else {
2558                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2559         }
2560
2561         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2562                 ql_hw_tx_done_locked(ha, txr_idx);
2563                 if (hw->tx_cntxt[txr_idx].txr_free <=
2564                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2565                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2566                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2567                                 __func__));
2568                         return (-1);
2569                 }
2570         }
2571
2572         for (i = 0; i < num_tx_cmds; i++) {
2573                 int j;
2574
2575                 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2576
2577                 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2578                         QL_ASSERT(ha, 0, \
2579                                 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2580                                 __func__, __LINE__, txr_idx, j,\
2581                                 ha->tx_ring[txr_idx].tx_buf[j].m_head));
2582                         return (EINVAL);
2583                 }
2584         }
2585
2586         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2587
2588         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2589
2590                 if (nsegs > ha->hw.max_tx_segs)
2591                         ha->hw.max_tx_segs = nsegs;
2592
2593                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2594
2595                 if (op_code) {
2596                         tx_cmd->flags_opcode = op_code;
2597                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2598
2599                 } else {
2600                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2601                 }
2602         } else {
2603                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2604                 ha->tx_tso_frames++;
2605         }
2606
2607         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2608                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2609
2610                 if (iscsi_pdu)
2611                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2612
2613         } else if (mp->m_flags & M_VLANTAG) {
2614
2615                 if (hdr_len) { /* TSO */
2616                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2617                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2618                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2619                 } else
2620                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2621
2622                 ha->hw_vlan_tx_frames++;
2623                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2624
2625                 if (iscsi_pdu) {
2626                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2627                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2628                 }
2629         }
2630
2631
2632         tx_cmd->n_bufs = (uint8_t)nsegs;
2633         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2634         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2635         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2636
2637         c_seg = segs;
2638
2639         while (1) {
2640                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2641
2642                         switch (i) {
2643                         case 0:
2644                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2645                                 tx_cmd->buf1_len = c_seg->ds_len;
2646                                 break;
2647
2648                         case 1:
2649                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2650                                 tx_cmd->buf2_len = c_seg->ds_len;
2651                                 break;
2652
2653                         case 2:
2654                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2655                                 tx_cmd->buf3_len = c_seg->ds_len;
2656                                 break;
2657
2658                         case 3:
2659                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2660                                 tx_cmd->buf4_len = c_seg->ds_len;
2661                                 break;
2662                         }
2663
2664                         c_seg++;
2665                         nsegs--;
2666                 }
2667
2668                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2669                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2670                                 (NUM_TX_DESCRIPTORS - 1);
2671                 tx_cmd_count++;
2672
2673                 if (!nsegs)
2674                         break;
2675                 
2676                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2677                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2678         }
2679
2680         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2681
2682                 /* TSO : Copy the header in the following tx cmd descriptors */
2683
2684                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2685
2686                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2687                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2688
2689                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2690                 bytes = QL_MIN(bytes, hdr_len);
2691
2692                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2693
2694                 if (mp->m_flags & M_VLANTAG) {
2695                         /* first copy the src/dst MAC addresses */
2696                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2697                         dst += (ETHER_ADDR_LEN * 2);
2698                         src += (ETHER_ADDR_LEN * 2);
2699                         
2700                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2701                         dst += 2;
2702                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2703                         dst += 2;
2704
2705                         /* bytes left in src header */
2706                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2707                                         ETHER_VLAN_ENCAP_LEN);
2708
2709                         /* bytes left in TxCmd Entry */
2710                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2711
2712
2713                         bcopy(src, dst, bytes);
2714                         src += bytes;
2715                         hdr_len -= bytes;
2716                 } else {
2717                         bcopy(src, dst, bytes);
2718                         src += bytes;
2719                         hdr_len -= bytes;
2720                 }
2721
2722                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2723                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2724                                         (NUM_TX_DESCRIPTORS - 1);
2725                 tx_cmd_count++;
2726                 
2727                 while (hdr_len) {
2728                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2729                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2730
2731                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2732
2733                         bcopy(src, tx_cmd, bytes);
2734                         src += bytes;
2735                         hdr_len -= bytes;
2736
2737                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2738                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2739                                         (NUM_TX_DESCRIPTORS - 1);
2740                         tx_cmd_count++;
2741                 }
2742         }
2743
2744         hw->tx_cntxt[txr_idx].txr_free =
2745                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2746
2747         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2748                 txr_idx);
2749         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2750
2751         return (0);
2752 }
2753
2754
2755
2756 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2757 static int
2758 qla_config_rss_ind_table(qla_host_t *ha)
2759 {
2760         uint32_t i, count;
2761         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2762
2763
2764         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2765                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2766         }
2767
2768         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2769                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2770
2771                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2772                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2773                 } else {
2774                         count = Q8_CONFIG_IND_TBL_SIZE;
2775                 }
2776
2777                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2778                         rss_ind_tbl))
2779                         return (-1);
2780         }
2781
2782         return (0);
2783 }
2784
2785 static int
2786 qla_config_soft_lro(qla_host_t *ha)
2787 {
2788         int i;
2789         qla_hw_t *hw = &ha->hw;
2790         struct lro_ctrl *lro;
2791
2792         for (i = 0; i < hw->num_sds_rings; i++) {
2793                 lro = &hw->sds[i].lro;
2794
2795                 bzero(lro, sizeof(struct lro_ctrl));
2796
2797 #if (__FreeBSD_version >= 1100101)
2798                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2799                         device_printf(ha->pci_dev,
2800                                 "%s: tcp_lro_init_args [%d] failed\n",
2801                                 __func__, i);
2802                         return (-1);
2803                 }
2804 #else
2805                 if (tcp_lro_init(lro)) {
2806                         device_printf(ha->pci_dev,
2807                                 "%s: tcp_lro_init [%d] failed\n",
2808                                 __func__, i);
2809                         return (-1);
2810                 }
2811 #endif /* #if (__FreeBSD_version >= 1100101) */
2812
2813                 lro->ifp = ha->ifp;
2814         }
2815
2816         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2817         return (0);
2818 }
2819
2820 static void
2821 qla_drain_soft_lro(qla_host_t *ha)
2822 {
2823         int i;
2824         qla_hw_t *hw = &ha->hw;
2825         struct lro_ctrl *lro;
2826
2827         for (i = 0; i < hw->num_sds_rings; i++) {
2828                 lro = &hw->sds[i].lro;
2829
2830 #if (__FreeBSD_version >= 1100101)
2831                 tcp_lro_flush_all(lro);
2832 #else
2833                 struct lro_entry *queued;
2834
2835                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2836                         queued = SLIST_FIRST(&lro->lro_active);
2837                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2838                         tcp_lro_flush(lro, queued);
2839                 }
2840 #endif /* #if (__FreeBSD_version >= 1100101) */
2841         }
2842
2843         return;
2844 }
2845
2846 static void
2847 qla_free_soft_lro(qla_host_t *ha)
2848 {
2849         int i;
2850         qla_hw_t *hw = &ha->hw;
2851         struct lro_ctrl *lro;
2852
2853         for (i = 0; i < hw->num_sds_rings; i++) {
2854                 lro = &hw->sds[i].lro;
2855                 tcp_lro_free(lro);
2856         }
2857
2858         return;
2859 }
2860
2861
2862 /*
2863  * Name: ql_del_hw_if
2864  * Function: Destroys the hardware specific entities corresponding to an
2865  *      Ethernet Interface
2866  */
2867 void
2868 ql_del_hw_if(qla_host_t *ha)
2869 {
2870         uint32_t i;
2871         uint32_t num_msix;
2872
2873         (void)qla_stop_nic_func(ha);
2874
2875         qla_del_rcv_cntxt(ha);
2876
2877         if(qla_del_xmt_cntxt(ha))
2878                 goto ql_del_hw_if_exit;
2879
2880         if (ha->hw.flags.init_intr_cnxt) {
2881                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2882
2883                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2884                                 num_msix = Q8_MAX_INTR_VECTORS;
2885                         else
2886                                 num_msix = ha->hw.num_sds_rings - i;
2887
2888                         if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2889                                 break;
2890
2891                         i += num_msix;
2892                 }
2893
2894                 ha->hw.flags.init_intr_cnxt = 0;
2895         }
2896
2897 ql_del_hw_if_exit:
2898         if (ha->hw.enable_soft_lro) {
2899                 qla_drain_soft_lro(ha);
2900                 qla_free_soft_lro(ha);
2901         }
2902
2903         return;
2904 }
2905
2906 void
2907 qla_confirm_9kb_enable(qla_host_t *ha)
2908 {
2909         uint32_t supports_9kb = 0;
2910
2911         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2912
2913         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2914         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2915         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2916
2917         qla_get_nic_partition(ha, &supports_9kb, NULL);
2918
2919         if (!supports_9kb)
2920                 ha->hw.enable_9kb = 0;
2921
2922         return;
2923 }
2924
2925 /*
2926  * Name: ql_init_hw_if
2927  * Function: Creates the hardware specific entities corresponding to an
2928  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2929  *      corresponding to the interface. Enables LRO if allowed.
2930  */
2931 int
2932 ql_init_hw_if(qla_host_t *ha)
2933 {
2934         device_t        dev;
2935         uint32_t        i;
2936         uint8_t         bcast_mac[6];
2937         qla_rdesc_t     *rdesc;
2938         uint32_t        num_msix;
2939
2940         dev = ha->pci_dev;
2941
2942         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2943                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2944                         ha->hw.dma_buf.sds_ring[i].size);
2945         }
2946
2947         for (i = 0; i < ha->hw.num_sds_rings; ) {
2948
2949                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2950                         num_msix = Q8_MAX_INTR_VECTORS;
2951                 else
2952                         num_msix = ha->hw.num_sds_rings - i;
2953
2954                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2955
2956                         if (i > 0) {
2957
2958                                 num_msix = i;
2959
2960                                 for (i = 0; i < num_msix; ) {
2961                                         qla_config_intr_cntxt(ha, i,
2962                                                 Q8_MAX_INTR_VECTORS, 0);
2963                                         i += Q8_MAX_INTR_VECTORS;
2964                                 }
2965                         }
2966                         return (-1);
2967                 }
2968
2969                 i = i + num_msix;
2970         }
2971
2972         ha->hw.flags.init_intr_cnxt = 1;
2973
2974         /*
2975          * Create Receive Context
2976          */
2977         if (qla_init_rcv_cntxt(ha)) {
2978                 return (-1);
2979         }
2980
2981         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2982                 rdesc = &ha->hw.rds[i];
2983                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2984                 rdesc->rx_in = 0;
2985                 /* Update the RDS Producer Indices */
2986                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2987                         rdesc->rx_next);
2988         }
2989
2990         /*
2991          * Create Transmit Context
2992          */
2993         if (qla_init_xmt_cntxt(ha)) {
2994                 qla_del_rcv_cntxt(ha);
2995                 return (-1);
2996         }
2997         ha->hw.max_tx_segs = 0;
2998
2999         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
3000                 return(-1);
3001
3002         ha->hw.flags.unicast_mac = 1;
3003
3004         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3005         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3006
3007         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
3008                 return (-1);
3009
3010         ha->hw.flags.bcast_mac = 1;
3011
3012         /*
3013          * program any cached multicast addresses
3014          */
3015         if (qla_hw_add_all_mcast(ha))
3016                 return (-1);
3017
3018         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
3019                 return (-1);
3020
3021         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
3022                 return (-1);
3023
3024         if (qla_config_rss_ind_table(ha))
3025                 return (-1);
3026
3027         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
3028                 return (-1);
3029
3030         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
3031                 return (-1);
3032
3033         if (ha->ifp->if_capenable & IFCAP_LRO) {
3034                 if (ha->hw.enable_hw_lro) {
3035                         ha->hw.enable_soft_lro = 0;
3036
3037                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
3038                                 return (-1);
3039                 } else {
3040                         ha->hw.enable_soft_lro = 1;
3041
3042                         if (qla_config_soft_lro(ha))
3043                                 return (-1);
3044                 }
3045         }
3046
3047         if (qla_init_nic_func(ha))
3048                 return (-1);
3049
3050         if (qla_query_fw_dcbx_caps(ha))
3051                 return (-1);
3052
3053         for (i = 0; i < ha->hw.num_sds_rings; i++)
3054                 QL_ENABLE_INTERRUPTS(ha, i);
3055
3056         return (0);
3057 }
3058
3059 static int
3060 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3061 {
3062         device_t                dev = ha->pci_dev;
3063         q80_rq_map_sds_to_rds_t *map_rings;
3064         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3065         uint32_t                i, err;
3066         qla_hw_t                *hw = &ha->hw;
3067
3068         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3069         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3070
3071         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3072         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3073         map_rings->count_version |= Q8_MBX_CMD_VERSION;
3074
3075         map_rings->cntxt_id = hw->rcv_cntxt_id;
3076         map_rings->num_rings = num_idx;
3077
3078         for (i = 0; i < num_idx; i++) {
3079                 map_rings->sds_rds[i].sds_ring = i + start_idx;
3080                 map_rings->sds_rds[i].rds_ring = i + start_idx;
3081         }
3082
3083         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3084                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3085                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3086                 device_printf(dev, "%s: failed0\n", __func__);
3087                 return (-1);
3088         }
3089
3090         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3091
3092         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3093
3094         if (err) {
3095                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3096                 return (-1);
3097         }
3098
3099         return (0);
3100 }
3101
3102 /*
3103  * Name: qla_init_rcv_cntxt
3104  * Function: Creates the Receive Context.
3105  */
3106 static int
3107 qla_init_rcv_cntxt(qla_host_t *ha)
3108 {
3109         q80_rq_rcv_cntxt_t      *rcntxt;
3110         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
3111         q80_stat_desc_t         *sdesc;
3112         int                     i, j;
3113         qla_hw_t                *hw = &ha->hw;
3114         device_t                dev;
3115         uint32_t                err;
3116         uint32_t                rcntxt_sds_rings;
3117         uint32_t                rcntxt_rds_rings;
3118         uint32_t                max_idx;
3119
3120         dev = ha->pci_dev;
3121
3122         /*
3123          * Create Receive Context
3124          */
3125
3126         for (i = 0; i < hw->num_sds_rings; i++) {
3127                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3128
3129                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3130                         sdesc->data[0] = 1ULL;
3131                         sdesc->data[1] = 1ULL;
3132                 }
3133         }
3134
3135         rcntxt_sds_rings = hw->num_sds_rings;
3136         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3137                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3138
3139         rcntxt_rds_rings = hw->num_rds_rings;
3140
3141         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3142                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
3143
3144         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3145         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3146
3147         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3148         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3149         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3150
3151         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3152                         Q8_RCV_CNTXT_CAP0_LRO |
3153                         Q8_RCV_CNTXT_CAP0_HW_LRO |
3154                         Q8_RCV_CNTXT_CAP0_RSS |
3155                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
3156
3157         if (ha->hw.enable_9kb)
3158                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3159         else
3160                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3161
3162         if (ha->hw.num_rds_rings > 1) {
3163                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3164                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3165         } else
3166                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3167
3168         rcntxt->nsds_rings = rcntxt_sds_rings;
3169
3170         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3171
3172         rcntxt->rcv_vpid = 0;
3173
3174         for (i = 0; i <  rcntxt_sds_rings; i++) {
3175                 rcntxt->sds[i].paddr =
3176                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3177                 rcntxt->sds[i].size =
3178                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3179                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3180                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3181         }
3182
3183         for (i = 0; i <  rcntxt_rds_rings; i++) {
3184                 rcntxt->rds[i].paddr_std =
3185                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3186
3187                 if (ha->hw.enable_9kb)
3188                         rcntxt->rds[i].std_bsize =
3189                                 qla_host_to_le64(MJUM9BYTES);
3190                 else
3191                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3192
3193                 rcntxt->rds[i].std_nentries =
3194                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3195         }
3196
3197         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3198                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
3199                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3200                 device_printf(dev, "%s: failed0\n", __func__);
3201                 return (-1);
3202         }
3203
3204         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3205
3206         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3207
3208         if (err) {
3209                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3210                 return (-1);
3211         }
3212
3213         for (i = 0; i <  rcntxt_sds_rings; i++) {
3214                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3215         }
3216
3217         for (i = 0; i <  rcntxt_rds_rings; i++) {
3218                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3219         }
3220
3221         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3222
3223         ha->hw.flags.init_rx_cnxt = 1;
3224
3225         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3226
3227                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3228
3229                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3230                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3231                         else
3232                                 max_idx = hw->num_sds_rings - i;
3233
3234                         err = qla_add_rcv_rings(ha, i, max_idx);
3235                         if (err)
3236                                 return -1;
3237
3238                         i += max_idx;
3239                 }
3240         }
3241
3242         if (hw->num_rds_rings > 1) {
3243
3244                 for (i = 0; i < hw->num_rds_rings; ) {
3245
3246                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3247                                 max_idx = MAX_SDS_TO_RDS_MAP;
3248                         else
3249                                 max_idx = hw->num_rds_rings - i;
3250
3251                         err = qla_map_sds_to_rds(ha, i, max_idx);
3252                         if (err)
3253                                 return -1;
3254
3255                         i += max_idx;
3256                 }
3257         }
3258
3259         return (0);
3260 }
3261
3262 static int
3263 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3264 {
3265         device_t                dev = ha->pci_dev;
3266         q80_rq_add_rcv_rings_t  *add_rcv;
3267         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3268         uint32_t                i,j, err;
3269         qla_hw_t                *hw = &ha->hw;
3270
3271         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3272         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3273
3274         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3275         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3276         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3277
3278         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3279         add_rcv->nsds_rings = nsds;
3280         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3281
3282         for (i = 0; i <  nsds; i++) {
3283
3284                 j = i + sds_idx;
3285
3286                 add_rcv->sds[i].paddr =
3287                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3288
3289                 add_rcv->sds[i].size =
3290                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3291
3292                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3293                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3294
3295         }
3296
3297         for (i = 0; (i <  nsds); i++) {
3298                 j = i + sds_idx;
3299
3300                 add_rcv->rds[i].paddr_std =
3301                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3302
3303                 if (ha->hw.enable_9kb)
3304                         add_rcv->rds[i].std_bsize =
3305                                 qla_host_to_le64(MJUM9BYTES);
3306                 else
3307                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3308
3309                 add_rcv->rds[i].std_nentries =
3310                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3311         }
3312
3313
3314         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3315                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3316                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3317                 device_printf(dev, "%s: failed0\n", __func__);
3318                 return (-1);
3319         }
3320
3321         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3322
3323         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3324
3325         if (err) {
3326                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3327                 return (-1);
3328         }
3329
3330         for (i = 0; i < nsds; i++) {
3331                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3332         }
3333
3334         for (i = 0; i < nsds; i++) {
3335                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3336         }
3337
3338         return (0);
3339 }
3340
3341 /*
3342  * Name: qla_del_rcv_cntxt
3343  * Function: Destroys the Receive Context.
3344  */
3345 static void
3346 qla_del_rcv_cntxt(qla_host_t *ha)
3347 {
3348         device_t                        dev = ha->pci_dev;
3349         q80_rcv_cntxt_destroy_t         *rcntxt;
3350         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3351         uint32_t                        err;
3352         uint8_t                         bcast_mac[6];
3353
3354         if (!ha->hw.flags.init_rx_cnxt)
3355                 return;
3356
3357         if (qla_hw_del_all_mcast(ha))
3358                 return;
3359
3360         if (ha->hw.flags.bcast_mac) {
3361
3362                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3363                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3364
3365                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3366                         return;
3367                 ha->hw.flags.bcast_mac = 0;
3368
3369         }
3370
3371         if (ha->hw.flags.unicast_mac) {
3372                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3373                         return;
3374                 ha->hw.flags.unicast_mac = 0;
3375         }
3376
3377         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3378         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3379
3380         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3381         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3382         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3383
3384         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3385
3386         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3387                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3388                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3389                 device_printf(dev, "%s: failed0\n", __func__);
3390                 return;
3391         }
3392         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3393
3394         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3395
3396         if (err) {
3397                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3398         }
3399
3400         ha->hw.flags.init_rx_cnxt = 0;
3401         return;
3402 }
3403
3404 /*
3405  * Name: qla_init_xmt_cntxt
3406  * Function: Creates the Transmit Context.
3407  */
3408 static int
3409 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3410 {
3411         device_t                dev;
3412         qla_hw_t                *hw = &ha->hw;
3413         q80_rq_tx_cntxt_t       *tcntxt;
3414         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3415         uint32_t                err;
3416         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3417         uint32_t                intr_idx;
3418
3419         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3420
3421         dev = ha->pci_dev;
3422
3423         /*
3424          * Create Transmit Context
3425          */
3426         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3427         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3428
3429         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3430         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3431         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3432
3433         intr_idx = txr_idx;
3434
3435 #ifdef QL_ENABLE_ISCSI_TLV
3436
3437         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3438                                 Q8_TX_CNTXT_CAP0_TC;
3439
3440         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3441                 tcntxt->traffic_class = 1;
3442         }
3443
3444         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3445
3446 #else
3447         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3448
3449 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3450
3451         tcntxt->ntx_rings = 1;
3452
3453         tcntxt->tx_ring[0].paddr =
3454                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3455         tcntxt->tx_ring[0].tx_consumer =
3456                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3457         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3458
3459         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3460         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3461
3462         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3463         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3464         *(hw_tx_cntxt->tx_cons) = 0;
3465
3466         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3467                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3468                 ha->hw.mbox,
3469                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3470                 device_printf(dev, "%s: failed0\n", __func__);
3471                 return (-1);
3472         }
3473         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3474
3475         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3476
3477         if (err) {
3478                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3479                 return -1;
3480         }
3481
3482         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3483         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3484
3485         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3486                 return (-1);
3487
3488         return (0);
3489 }
3490
3491
3492 /*
3493  * Name: qla_del_xmt_cntxt
3494  * Function: Destroys the Transmit Context.
3495  */
3496 static int
3497 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3498 {
3499         device_t                        dev = ha->pci_dev;
3500         q80_tx_cntxt_destroy_t          *tcntxt;
3501         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3502         uint32_t                        err;
3503
3504         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3505         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3506
3507         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3508         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3509         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3510
3511         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3512
3513         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3514                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3515                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3516                 device_printf(dev, "%s: failed0\n", __func__);
3517                 return (-1);
3518         }
3519         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3520
3521         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3522
3523         if (err) {
3524                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3525                 return (-1);
3526         }
3527
3528         return (0);
3529 }
3530 static int
3531 qla_del_xmt_cntxt(qla_host_t *ha)
3532 {
3533         uint32_t i;
3534         int ret = 0;
3535
3536         if (!ha->hw.flags.init_tx_cnxt)
3537                 return (ret);
3538
3539         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3540                 if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3541                         break;
3542         }
3543         ha->hw.flags.init_tx_cnxt = 0;
3544
3545         return (ret);
3546 }
3547
3548 static int
3549 qla_init_xmt_cntxt(qla_host_t *ha)
3550 {
3551         uint32_t i, j;
3552
3553         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3554                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3555                         for (j = 0; j < i; j++) {
3556                                 if (qla_del_xmt_cntxt_i(ha, j))
3557                                         break;
3558                         }
3559                         return (-1);
3560                 }
3561         }
3562         ha->hw.flags.init_tx_cnxt = 1;
3563         return (0);
3564 }
3565
3566 static int
3567 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3568 {
3569         int i, nmcast;
3570         uint32_t count = 0;
3571         uint8_t *mcast;
3572
3573         nmcast = ha->hw.nmcast;
3574
3575         QL_DPRINT2(ha, (ha->pci_dev,
3576                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3577
3578         mcast = ha->hw.mac_addr_arr;
3579         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3580
3581         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3582                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3583                         (ha->hw.mcast[i].addr[1] != 0) ||
3584                         (ha->hw.mcast[i].addr[2] != 0) ||
3585                         (ha->hw.mcast[i].addr[3] != 0) ||
3586                         (ha->hw.mcast[i].addr[4] != 0) ||
3587                         (ha->hw.mcast[i].addr[5] != 0)) {
3588
3589                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3590                         mcast = mcast + ETHER_ADDR_LEN;
3591                         count++;
3592                         
3593                         if (count == Q8_MAX_MAC_ADDRS) {
3594                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3595                                         add_mcast, count)) {
3596                                         device_printf(ha->pci_dev,
3597                                                 "%s: failed\n", __func__);
3598                                         return (-1);
3599                                 }
3600
3601                                 count = 0;
3602                                 mcast = ha->hw.mac_addr_arr;
3603                                 memset(mcast, 0,
3604                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3605                         }
3606
3607                         nmcast--;
3608                 }
3609         }
3610
3611         if (count) {
3612                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3613                         count)) {
3614                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3615                         return (-1);
3616                 }
3617         }
3618         QL_DPRINT2(ha, (ha->pci_dev,
3619                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3620
3621         return 0;
3622 }
3623
3624 static int
3625 qla_hw_add_all_mcast(qla_host_t *ha)
3626 {
3627         int ret;
3628
3629         ret = qla_hw_all_mcast(ha, 1);
3630
3631         return (ret);
3632 }
3633
3634 int
3635 qla_hw_del_all_mcast(qla_host_t *ha)
3636 {
3637         int ret;
3638
3639         ret = qla_hw_all_mcast(ha, 0);
3640
3641         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3642         ha->hw.nmcast = 0;
3643
3644         return (ret);
3645 }
3646
3647 static int
3648 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3649 {
3650         int i;
3651
3652         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3653                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3654                         return (0); /* its been already added */
3655         }
3656         return (-1);
3657 }
3658
3659 static int
3660 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3661 {
3662         int i;
3663
3664         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3665
3666                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3667                         (ha->hw.mcast[i].addr[1] == 0) &&
3668                         (ha->hw.mcast[i].addr[2] == 0) &&
3669                         (ha->hw.mcast[i].addr[3] == 0) &&
3670                         (ha->hw.mcast[i].addr[4] == 0) &&
3671                         (ha->hw.mcast[i].addr[5] == 0)) {
3672
3673                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3674                         ha->hw.nmcast++;        
3675
3676                         mta = mta + ETHER_ADDR_LEN;
3677                         nmcast--;
3678
3679                         if (nmcast == 0)
3680                                 break;
3681                 }
3682
3683         }
3684         return 0;
3685 }
3686
3687 static int
3688 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3689 {
3690         int i;
3691
3692         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3693                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3694
3695                         ha->hw.mcast[i].addr[0] = 0;
3696                         ha->hw.mcast[i].addr[1] = 0;
3697                         ha->hw.mcast[i].addr[2] = 0;
3698                         ha->hw.mcast[i].addr[3] = 0;
3699                         ha->hw.mcast[i].addr[4] = 0;
3700                         ha->hw.mcast[i].addr[5] = 0;
3701
3702                         ha->hw.nmcast--;        
3703
3704                         mta = mta + ETHER_ADDR_LEN;
3705                         nmcast--;
3706
3707                         if (nmcast == 0)
3708                                 break;
3709                 }
3710         }
3711         return 0;
3712 }
3713
3714 /*
3715  * Name: ql_hw_set_multi
3716  * Function: Sets the Multicast Addresses provided by the host O.S into the
3717  *      hardware (for the given interface)
3718  */
3719 int
3720 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3721         uint32_t add_mac)
3722 {
3723         uint8_t *mta = mcast_addr;
3724         int i;
3725         int ret = 0;
3726         uint32_t count = 0;
3727         uint8_t *mcast;
3728
3729         mcast = ha->hw.mac_addr_arr;
3730         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3731
3732         for (i = 0; i < mcnt; i++) {
3733                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3734                         if (add_mac) {
3735                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3736                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3737                                         mcast = mcast + ETHER_ADDR_LEN;
3738                                         count++;
3739                                 }
3740                         } else {
3741                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3742                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3743                                         mcast = mcast + ETHER_ADDR_LEN;
3744                                         count++;
3745                                 }
3746                         }
3747                 }
3748                 if (count == Q8_MAX_MAC_ADDRS) {
3749                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3750                                 add_mac, count)) {
3751                                 device_printf(ha->pci_dev, "%s: failed\n",
3752                                         __func__);
3753                                 return (-1);
3754                         }
3755
3756                         if (add_mac) {
3757                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3758                                         count);
3759                         } else {
3760                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3761                                         count);
3762                         }
3763
3764                         count = 0;
3765                         mcast = ha->hw.mac_addr_arr;
3766                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3767                 }
3768                         
3769                 mta += Q8_MAC_ADDR_LEN;
3770         }
3771
3772         if (count) {
3773                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3774                         count)) {
3775                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3776                         return (-1);
3777                 }
3778                 if (add_mac) {
3779                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3780                 } else {
3781                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3782                 }
3783         }
3784
3785         return (ret);
3786 }
3787
3788 /*
3789  * Name: ql_hw_tx_done_locked
3790  * Function: Handle Transmit Completions
3791  */
3792 void
3793 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3794 {
3795         qla_tx_buf_t *txb;
3796         qla_hw_t *hw = &ha->hw;
3797         uint32_t comp_idx, comp_count = 0;
3798         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3799
3800         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3801
3802         /* retrieve index of last entry in tx ring completed */
3803         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3804
3805         while (comp_idx != hw_tx_cntxt->txr_comp) {
3806
3807                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3808
3809                 hw_tx_cntxt->txr_comp++;
3810                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3811                         hw_tx_cntxt->txr_comp = 0;
3812
3813                 comp_count++;
3814
3815                 if (txb->m_head) {
3816                         if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3817
3818                         bus_dmamap_sync(ha->tx_tag, txb->map,
3819                                 BUS_DMASYNC_POSTWRITE);
3820                         bus_dmamap_unload(ha->tx_tag, txb->map);
3821                         m_freem(txb->m_head);
3822
3823                         txb->m_head = NULL;
3824                 }
3825         }
3826
3827         hw_tx_cntxt->txr_free += comp_count;
3828         return;
3829 }
3830
3831 void
3832 ql_update_link_state(qla_host_t *ha)
3833 {
3834         uint32_t link_state = 0;
3835         uint32_t prev_link_state;
3836
3837         prev_link_state =  ha->hw.link_up;
3838
3839         if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
3840                 link_state = READ_REG32(ha, Q8_LINK_STATE);
3841
3842                 if (ha->pci_func == 0) {
3843                         link_state = (((link_state & 0xF) == 1)? 1 : 0);
3844                 } else {
3845                         link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3846                 }
3847         }
3848
3849         atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3850
3851         if (prev_link_state !=  ha->hw.link_up) {
3852                 if (ha->hw.link_up) {
3853                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3854                 } else {
3855                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3856                 }
3857         }
3858         return;
3859 }
3860
3861 int
3862 ql_hw_check_health(qla_host_t *ha)
3863 {
3864         uint32_t val;
3865
3866         ha->hw.health_count++;
3867
3868         if (ha->hw.health_count < 500)
3869                 return 0;
3870
3871         ha->hw.health_count = 0;
3872
3873         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3874
3875         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3876                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3877                 device_printf(ha->pci_dev, "%s: Temperature Alert"
3878                         " at ts_usecs %ld ts_reg = 0x%08x\n",
3879                         __func__, qla_get_usec_timestamp(), val);
3880
3881                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3882                         ha->hw.sp_log_stop = -1;
3883
3884                 QL_INITIATE_RECOVERY(ha);
3885                 return -1;
3886         }
3887
3888         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3889
3890         if ((val != ha->hw.hbeat_value) &&
3891                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3892                 ha->hw.hbeat_value = val;
3893                 ha->hw.hbeat_failure = 0;
3894                 return 0;
3895         }
3896
3897         ha->hw.hbeat_failure++;
3898
3899         
3900         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3901                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3902                         __func__, val);
3903         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3904                 return 0;
3905         else {
3906                 uint32_t peg_halt_status1;
3907                 uint32_t peg_halt_status2;
3908
3909                 peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3910                 peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3911
3912                 device_printf(ha->pci_dev,
3913                         "%s: Heartbeat Failue at ts_usecs = %ld "
3914                         "fw_heart_beat = 0x%08x "
3915                         "peg_halt_status1 = 0x%08x "
3916                         "peg_halt_status2 = 0x%08x\n",
3917                         __func__, qla_get_usec_timestamp(), val,
3918                         peg_halt_status1, peg_halt_status2);
3919
3920                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3921                         ha->hw.sp_log_stop = -1;
3922         }
3923         QL_INITIATE_RECOVERY(ha);
3924
3925         return -1;
3926 }
3927
3928 static int
3929 qla_init_nic_func(qla_host_t *ha)
3930 {
3931         device_t                dev;
3932         q80_init_nic_func_t     *init_nic;
3933         q80_init_nic_func_rsp_t *init_nic_rsp;
3934         uint32_t                err;
3935
3936         dev = ha->pci_dev;
3937
3938         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3939         bzero(init_nic, sizeof(q80_init_nic_func_t));
3940
3941         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3942         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3943         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3944
3945         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3946         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3947         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3948
3949 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3950         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3951                 (sizeof (q80_init_nic_func_t) >> 2),
3952                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3953                 device_printf(dev, "%s: failed\n", __func__);
3954                 return -1;
3955         }
3956
3957         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3958 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3959
3960         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3961
3962         if (err) {
3963                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3964         }
3965
3966         return 0;
3967 }
3968
3969 static int
3970 qla_stop_nic_func(qla_host_t *ha)
3971 {
3972         device_t                dev;
3973         q80_stop_nic_func_t     *stop_nic;
3974         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3975         uint32_t                err;
3976
3977         dev = ha->pci_dev;
3978
3979         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3980         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3981
3982         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3983         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3984         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3985
3986         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3987         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3988
3989 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3990         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3991                 (sizeof (q80_stop_nic_func_t) >> 2),
3992                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3993                 device_printf(dev, "%s: failed\n", __func__);
3994                 return -1;
3995         }
3996
3997         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3998 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3999
4000         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
4001
4002         if (err) {
4003                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4004         }
4005
4006         return 0;
4007 }
4008
4009 static int
4010 qla_query_fw_dcbx_caps(qla_host_t *ha)
4011 {
4012         device_t                        dev;
4013         q80_query_fw_dcbx_caps_t        *fw_dcbx;
4014         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
4015         uint32_t                        err;
4016
4017         dev = ha->pci_dev;
4018
4019         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
4020         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
4021
4022         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
4023         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
4024         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
4025
4026         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
4027         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
4028                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
4029                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
4030                 device_printf(dev, "%s: failed\n", __func__);
4031                 return -1;
4032         }
4033
4034         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
4035         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
4036                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
4037
4038         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
4039
4040         if (err) {
4041                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4042         }
4043
4044         return 0;
4045 }
4046
4047 static int
4048 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
4049         uint32_t aen_mb3, uint32_t aen_mb4)
4050 {
4051         device_t                dev;
4052         q80_idc_ack_t           *idc_ack;
4053         q80_idc_ack_rsp_t       *idc_ack_rsp;
4054         uint32_t                err;
4055         int                     count = 300;
4056
4057         dev = ha->pci_dev;
4058
4059         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4060         bzero(idc_ack, sizeof(q80_idc_ack_t));
4061
4062         idc_ack->opcode = Q8_MBX_IDC_ACK;
4063         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4064         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4065
4066         idc_ack->aen_mb1 = aen_mb1;
4067         idc_ack->aen_mb2 = aen_mb2;
4068         idc_ack->aen_mb3 = aen_mb3;
4069         idc_ack->aen_mb4 = aen_mb4;
4070
4071         ha->hw.imd_compl= 0;
4072
4073         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4074                 (sizeof (q80_idc_ack_t) >> 2),
4075                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4076                 device_printf(dev, "%s: failed\n", __func__);
4077                 return -1;
4078         }
4079
4080         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4081
4082         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4083
4084         if (err) {
4085                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4086                 return(-1);
4087         }
4088
4089         while (count && !ha->hw.imd_compl) {
4090                 qla_mdelay(__func__, 100);
4091                 count--;
4092         }
4093
4094         if (!count)
4095                 return -1;
4096         else
4097                 device_printf(dev, "%s: count %d\n", __func__, count);
4098
4099         return (0);
4100 }
4101
4102 static int
4103 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4104 {
4105         device_t                dev;
4106         q80_set_port_cfg_t      *pcfg;
4107         q80_set_port_cfg_rsp_t  *pfg_rsp;
4108         uint32_t                err;
4109         int                     count = 300;
4110
4111         dev = ha->pci_dev;
4112
4113         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4114         bzero(pcfg, sizeof(q80_set_port_cfg_t));
4115
4116         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4117         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4118         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4119
4120         pcfg->cfg_bits = cfg_bits;
4121
4122         device_printf(dev, "%s: cfg_bits"
4123                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4124                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4125                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4126                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4127                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4128
4129         ha->hw.imd_compl= 0;
4130
4131         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4132                 (sizeof (q80_set_port_cfg_t) >> 2),
4133                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4134                 device_printf(dev, "%s: failed\n", __func__);
4135                 return -1;
4136         }
4137
4138         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4139
4140         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4141
4142         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4143                 while (count && !ha->hw.imd_compl) {
4144                         qla_mdelay(__func__, 100);
4145                         count--;
4146                 }
4147                 if (count) {
4148                         device_printf(dev, "%s: count %d\n", __func__, count);
4149
4150                         err = 0;
4151                 }
4152         }
4153
4154         if (err) {
4155                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4156                 return(-1);
4157         }
4158
4159         return (0);
4160 }
4161
4162
4163 static int
4164 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4165 {
4166         uint32_t                        err;
4167         device_t                        dev = ha->pci_dev;
4168         q80_config_md_templ_size_t      *md_size;
4169         q80_config_md_templ_size_rsp_t  *md_size_rsp;
4170
4171 #ifndef QL_LDFLASH_FW
4172
4173         ql_minidump_template_hdr_t *hdr;
4174
4175         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4176         *size = hdr->size_of_template;
4177         return (0);
4178
4179 #endif /* #ifdef QL_LDFLASH_FW */
4180
4181         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4182         bzero(md_size, sizeof(q80_config_md_templ_size_t));
4183
4184         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4185         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4186         md_size->count_version |= Q8_MBX_CMD_VERSION;
4187
4188         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4189                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4190                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4191
4192                 device_printf(dev, "%s: failed\n", __func__);
4193
4194                 return (-1);
4195         }
4196
4197         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4198
4199         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4200
4201         if (err) {
4202                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4203                 return(-1);
4204         }
4205
4206         *size = md_size_rsp->templ_size;
4207
4208         return (0);
4209 }
4210
4211 static int
4212 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4213 {
4214         device_t                dev;
4215         q80_get_port_cfg_t      *pcfg;
4216         q80_get_port_cfg_rsp_t  *pcfg_rsp;
4217         uint32_t                err;
4218
4219         dev = ha->pci_dev;
4220
4221         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4222         bzero(pcfg, sizeof(q80_get_port_cfg_t));
4223
4224         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4225         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4226         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4227
4228         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4229                 (sizeof (q80_get_port_cfg_t) >> 2),
4230                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4231                 device_printf(dev, "%s: failed\n", __func__);
4232                 return -1;
4233         }
4234
4235         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4236
4237         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4238
4239         if (err) {
4240                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4241                 return(-1);
4242         }
4243
4244         device_printf(dev, "%s: [cfg_bits, port type]"
4245                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4246                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4247                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4248                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4249                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4250                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4251                 );
4252
4253         *cfg_bits = pcfg_rsp->cfg_bits;
4254
4255         return (0);
4256 }
4257
4258 int
4259 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4260 {
4261         struct ether_vlan_header        *eh;
4262         uint16_t                        etype;
4263         struct ip                       *ip = NULL;
4264         struct ip6_hdr                  *ip6 = NULL;
4265         struct tcphdr                   *th = NULL;
4266         uint32_t                        hdrlen;
4267         uint32_t                        offset;
4268         uint8_t                         buf[sizeof(struct ip6_hdr)];
4269
4270         eh = mtod(mp, struct ether_vlan_header *);
4271
4272         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4273                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4274                 etype = ntohs(eh->evl_proto);
4275         } else {
4276                 hdrlen = ETHER_HDR_LEN;
4277                 etype = ntohs(eh->evl_encap_proto);
4278         }
4279
4280         if (etype == ETHERTYPE_IP) {
4281
4282                 offset = (hdrlen + sizeof (struct ip));
4283
4284                 if (mp->m_len >= offset) {
4285                         ip = (struct ip *)(mp->m_data + hdrlen);
4286                 } else {
4287                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4288                         ip = (struct ip *)buf;
4289                 }
4290
4291                 if (ip->ip_p == IPPROTO_TCP) {
4292
4293                         hdrlen += ip->ip_hl << 2;
4294                         offset = hdrlen + 4;
4295         
4296                         if (mp->m_len >= offset) {
4297                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4298                         } else {
4299                                 m_copydata(mp, hdrlen, 4, buf);
4300                                 th = (struct tcphdr *)buf;
4301                         }
4302                 }
4303
4304         } else if (etype == ETHERTYPE_IPV6) {
4305
4306                 offset = (hdrlen + sizeof (struct ip6_hdr));
4307
4308                 if (mp->m_len >= offset) {
4309                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4310                 } else {
4311                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4312                         ip6 = (struct ip6_hdr *)buf;
4313                 }
4314
4315                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4316
4317                         hdrlen += sizeof(struct ip6_hdr);
4318                         offset = hdrlen + 4;
4319
4320                         if (mp->m_len >= offset) {
4321                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4322                         } else {
4323                                 m_copydata(mp, hdrlen, 4, buf);
4324                                 th = (struct tcphdr *)buf;
4325                         }
4326                 }
4327         }
4328
4329         if (th != NULL) {
4330                 if ((th->th_sport == htons(3260)) ||
4331                         (th->th_dport == htons(3260)))
4332                         return 0;
4333         }
4334         return (-1);
4335 }
4336
4337 void
4338 qla_hw_async_event(qla_host_t *ha)
4339 {
4340         switch (ha->hw.aen_mb0) {
4341         case 0x8101:
4342                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4343                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4344
4345                 break;
4346
4347         default:
4348                 break;
4349         }
4350
4351         return;
4352 }
4353
4354 #ifdef QL_LDFLASH_FW
4355 static int
4356 ql_get_minidump_template(qla_host_t *ha)
4357 {
4358         uint32_t                        err;
4359         device_t                        dev = ha->pci_dev;
4360         q80_config_md_templ_cmd_t       *md_templ;
4361         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4362
4363         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4364         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4365
4366         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4367         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4368         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4369
4370         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4371         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4372
4373         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4374                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4375                  ha->hw.mbox,
4376                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4377
4378                 device_printf(dev, "%s: failed\n", __func__);
4379
4380                 return (-1);
4381         }
4382
4383         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4384
4385         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4386
4387         if (err) {
4388                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4389                 return (-1);
4390         }
4391
4392         return (0);
4393
4394 }
4395 #endif /* #ifdef QL_LDFLASH_FW */
4396
4397 /*
4398  * Minidump related functionality 
4399  */
4400
4401 static int ql_parse_template(qla_host_t *ha);
4402
4403 static uint32_t ql_rdcrb(qla_host_t *ha,
4404                         ql_minidump_entry_rdcrb_t *crb_entry,
4405                         uint32_t * data_buff);
4406
4407 static uint32_t ql_pollrd(qla_host_t *ha,
4408                         ql_minidump_entry_pollrd_t *entry,
4409                         uint32_t * data_buff);
4410
4411 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4412                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4413                         uint32_t *data_buff);
4414
4415 static uint32_t ql_L2Cache(qla_host_t *ha,
4416                         ql_minidump_entry_cache_t *cacheEntry,
4417                         uint32_t * data_buff);
4418
4419 static uint32_t ql_L1Cache(qla_host_t *ha,
4420                         ql_minidump_entry_cache_t *cacheEntry,
4421                         uint32_t *data_buff);
4422
4423 static uint32_t ql_rdocm(qla_host_t *ha,
4424                         ql_minidump_entry_rdocm_t *ocmEntry,
4425                         uint32_t *data_buff);
4426
4427 static uint32_t ql_rdmem(qla_host_t *ha,
4428                         ql_minidump_entry_rdmem_t *mem_entry,
4429                         uint32_t *data_buff);
4430
4431 static uint32_t ql_rdrom(qla_host_t *ha,
4432                         ql_minidump_entry_rdrom_t *romEntry,
4433                         uint32_t *data_buff);
4434
4435 static uint32_t ql_rdmux(qla_host_t *ha,
4436                         ql_minidump_entry_mux_t *muxEntry,
4437                         uint32_t *data_buff);
4438
4439 static uint32_t ql_rdmux2(qla_host_t *ha,
4440                         ql_minidump_entry_mux2_t *muxEntry,
4441                         uint32_t *data_buff);
4442
4443 static uint32_t ql_rdqueue(qla_host_t *ha,
4444                         ql_minidump_entry_queue_t *queueEntry,
4445                         uint32_t *data_buff);
4446
4447 static uint32_t ql_cntrl(qla_host_t *ha,
4448                         ql_minidump_template_hdr_t *template_hdr,
4449                         ql_minidump_entry_cntrl_t *crbEntry);
4450
4451
4452 static uint32_t
4453 ql_minidump_size(qla_host_t *ha)
4454 {
4455         uint32_t i, k;
4456         uint32_t size = 0;
4457         ql_minidump_template_hdr_t *hdr;
4458
4459         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4460
4461         i = 0x2;
4462
4463         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4464                 if (i & ha->hw.mdump_capture_mask)
4465                         size += hdr->capture_size_array[k];
4466                 i = i << 1;
4467         }
4468         return (size);
4469 }
4470
4471 static void
4472 ql_free_minidump_buffer(qla_host_t *ha)
4473 {
4474         if (ha->hw.mdump_buffer != NULL) {
4475                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4476                 ha->hw.mdump_buffer = NULL;
4477                 ha->hw.mdump_buffer_size = 0;
4478         }
4479         return;
4480 }
4481
4482 static int
4483 ql_alloc_minidump_buffer(qla_host_t *ha)
4484 {
4485         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4486
4487         if (!ha->hw.mdump_buffer_size)
4488                 return (-1);
4489
4490         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4491                                         M_NOWAIT);
4492
4493         if (ha->hw.mdump_buffer == NULL)
4494                 return (-1);
4495
4496         return (0);
4497 }
4498
4499 static void
4500 ql_free_minidump_template_buffer(qla_host_t *ha)
4501 {
4502         if (ha->hw.mdump_template != NULL) {
4503                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4504                 ha->hw.mdump_template = NULL;
4505                 ha->hw.mdump_template_size = 0;
4506         }
4507         return;
4508 }
4509
4510 static int
4511 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4512 {
4513         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4514
4515         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4516                                         M_QLA83XXBUF, M_NOWAIT);
4517
4518         if (ha->hw.mdump_template == NULL)
4519                 return (-1);
4520
4521         return (0);
4522 }
4523
4524 static int
4525 ql_alloc_minidump_buffers(qla_host_t *ha)
4526 {
4527         int ret;
4528
4529         ret = ql_alloc_minidump_template_buffer(ha);
4530
4531         if (ret)
4532                 return (ret);
4533
4534         ret = ql_alloc_minidump_buffer(ha);
4535
4536         if (ret)
4537                 ql_free_minidump_template_buffer(ha);
4538
4539         return (ret);
4540 }
4541
4542
4543 static uint32_t
4544 ql_validate_minidump_checksum(qla_host_t *ha)
4545 {
4546         uint64_t sum = 0;
4547         int count;
4548         uint32_t *template_buff;
4549
4550         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4551         template_buff = ha->hw.dma_buf.minidump.dma_b;
4552
4553         while (count-- > 0) {
4554                 sum += *template_buff++;
4555         }
4556
4557         while (sum >> 32) {
4558                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4559         }
4560
4561         return (~sum);
4562 }
4563
4564 int
4565 ql_minidump_init(qla_host_t *ha)
4566 {
4567         int             ret = 0;
4568         uint32_t        template_size = 0;
4569         device_t        dev = ha->pci_dev;
4570
4571         /*
4572          * Get Minidump Template Size
4573          */
4574         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4575
4576         if (ret || (template_size == 0)) {
4577                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4578                         template_size);
4579                 return (-1);
4580         }
4581
4582         /*
4583          * Allocate Memory for Minidump Template
4584          */
4585
4586         ha->hw.dma_buf.minidump.alignment = 8;
4587         ha->hw.dma_buf.minidump.size = template_size;
4588
4589 #ifdef QL_LDFLASH_FW
4590         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4591
4592                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4593
4594                 return (-1);
4595         }
4596         ha->hw.dma_buf.flags.minidump = 1;
4597
4598         /*
4599          * Retrieve Minidump Template
4600          */
4601         ret = ql_get_minidump_template(ha);
4602 #else
4603         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4604
4605 #endif /* #ifdef QL_LDFLASH_FW */
4606
4607         if (ret == 0) {
4608
4609                 ret = ql_validate_minidump_checksum(ha);
4610
4611                 if (ret == 0) {
4612
4613                         ret = ql_alloc_minidump_buffers(ha);
4614
4615                         if (ret == 0)
4616                 ha->hw.mdump_init = 1;
4617                         else
4618                                 device_printf(dev,
4619                                         "%s: ql_alloc_minidump_buffers"
4620                                         " failed\n", __func__);
4621                 } else {
4622                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4623                                 " failed\n", __func__);
4624                 }
4625         } else {
4626                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4627                          __func__);
4628         }
4629
4630         if (ret)
4631                 ql_minidump_free(ha);
4632
4633         return (ret);
4634 }
4635
4636 static void
4637 ql_minidump_free(qla_host_t *ha)
4638 {
4639         ha->hw.mdump_init = 0;
4640         if (ha->hw.dma_buf.flags.minidump) {
4641                 ha->hw.dma_buf.flags.minidump = 0;
4642                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4643         }
4644
4645         ql_free_minidump_template_buffer(ha);
4646         ql_free_minidump_buffer(ha);
4647
4648         return;
4649 }
4650
4651 void
4652 ql_minidump(qla_host_t *ha)
4653 {
4654         if (!ha->hw.mdump_init)
4655                 return;
4656
4657         if (ha->hw.mdump_done)
4658                 return;
4659         ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4660         ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4661
4662         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4663         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4664
4665         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4666                 ha->hw.mdump_template_size);
4667
4668         ql_parse_template(ha);
4669  
4670         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4671
4672         ha->hw.mdump_done = 1;
4673
4674         return;
4675 }
4676
4677
4678 /*
4679  * helper routines
4680  */
4681 static void 
4682 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4683 {
4684         if (esize != entry->hdr.entry_capture_size) {
4685                 entry->hdr.entry_capture_size = esize;
4686                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4687         }
4688         return;
4689 }
4690
4691
4692 static int 
4693 ql_parse_template(qla_host_t *ha)
4694 {
4695         uint32_t num_of_entries, buff_level, e_cnt, esize;
4696         uint32_t end_cnt, rv = 0;
4697         char *dump_buff, *dbuff;
4698         int sane_start = 0, sane_end = 0;
4699         ql_minidump_template_hdr_t *template_hdr;
4700         ql_minidump_entry_t *entry;
4701         uint32_t capture_mask; 
4702         uint32_t dump_size; 
4703
4704         /* Setup parameters */
4705         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4706
4707         if (template_hdr->entry_type == TLHDR)
4708                 sane_start = 1;
4709         
4710         dump_buff = (char *) ha->hw.mdump_buffer;
4711
4712         num_of_entries = template_hdr->num_of_entries;
4713
4714         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4715                         + template_hdr->first_entry_offset );
4716
4717         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4718                 template_hdr->ocm_window_array[ha->pci_func];
4719         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4720
4721         capture_mask = ha->hw.mdump_capture_mask;
4722         dump_size = ha->hw.mdump_buffer_size;
4723
4724         template_hdr->driver_capture_mask = capture_mask;
4725
4726         QL_DPRINT80(ha, (ha->pci_dev,
4727                 "%s: sane_start = %d num_of_entries = %d "
4728                 "capture_mask = 0x%x dump_size = %d \n", 
4729                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4730
4731         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4732
4733                 /*
4734                  * If the capture_mask of the entry does not match capture mask
4735                  * skip the entry after marking the driver_flags indicator.
4736                  */
4737                 
4738                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4739
4740                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4741                         entry = (ql_minidump_entry_t *) ((char *) entry
4742                                         + entry->hdr.entry_size);
4743                         continue;
4744                 }
4745
4746                 /*
4747                  * This is ONLY needed in implementations where
4748                  * the capture buffer allocated is too small to capture
4749                  * all of the required entries for a given capture mask.
4750                  * We need to empty the buffer contents to a file
4751                  * if possible, before processing the next entry
4752                  * If the buff_full_flag is set, no further capture will happen
4753                  * and all remaining non-control entries will be skipped.
4754                  */
4755                 if (entry->hdr.entry_capture_size != 0) {
4756                         if ((buff_level + entry->hdr.entry_capture_size) >
4757                                 dump_size) {
4758                                 /*  Try to recover by emptying buffer to file */
4759                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4760                                 entry = (ql_minidump_entry_t *) ((char *) entry
4761                                                 + entry->hdr.entry_size);
4762                                 continue;
4763                         }
4764                 }
4765
4766                 /*
4767                  * Decode the entry type and process it accordingly
4768                  */
4769
4770                 switch (entry->hdr.entry_type) {
4771                 case RDNOP:
4772                         break;
4773
4774                 case RDEND:
4775                         if (sane_end == 0) {
4776                                 end_cnt = e_cnt;
4777                         }
4778                         sane_end++;
4779                         break;
4780
4781                 case RDCRB:
4782                         dbuff = dump_buff + buff_level;
4783                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4784                         ql_entry_err_chk(entry, esize);
4785                         buff_level += esize;
4786                         break;
4787
4788                 case POLLRD:
4789                         dbuff = dump_buff + buff_level;
4790                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4791                         ql_entry_err_chk(entry, esize);
4792                         buff_level += esize;
4793                         break;
4794
4795                 case POLLRDMWR:
4796                         dbuff = dump_buff + buff_level;
4797                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4798                                         (void *)dbuff);
4799                         ql_entry_err_chk(entry, esize);
4800                         buff_level += esize;
4801                         break;
4802
4803                 case L2ITG:
4804                 case L2DTG:
4805                 case L2DAT:
4806                 case L2INS:
4807                         dbuff = dump_buff + buff_level;
4808                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4809                         if (esize == -1) {
4810                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4811                         } else {
4812                                 ql_entry_err_chk(entry, esize);
4813                                 buff_level += esize;
4814                         }
4815                         break;
4816
4817                 case L1DAT:
4818                 case L1INS:
4819                         dbuff = dump_buff + buff_level;
4820                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4821                         ql_entry_err_chk(entry, esize);
4822                         buff_level += esize;
4823                         break;
4824
4825                 case RDOCM:
4826                         dbuff = dump_buff + buff_level;
4827                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4828                         ql_entry_err_chk(entry, esize);
4829                         buff_level += esize;
4830                         break;
4831
4832                 case RDMEM:
4833                         dbuff = dump_buff + buff_level;
4834                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4835                         ql_entry_err_chk(entry, esize);
4836                         buff_level += esize;
4837                         break;
4838
4839                 case BOARD:
4840                 case RDROM:
4841                         dbuff = dump_buff + buff_level;
4842                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4843                         ql_entry_err_chk(entry, esize);
4844                         buff_level += esize;
4845                         break;
4846
4847                 case RDMUX:
4848                         dbuff = dump_buff + buff_level;
4849                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4850                         ql_entry_err_chk(entry, esize);
4851                         buff_level += esize;
4852                         break;
4853
4854                 case RDMUX2:
4855                         dbuff = dump_buff + buff_level;
4856                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4857                         ql_entry_err_chk(entry, esize);
4858                         buff_level += esize;
4859                         break;
4860
4861                 case QUEUE:
4862                         dbuff = dump_buff + buff_level;
4863                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4864                         ql_entry_err_chk(entry, esize);
4865                         buff_level += esize;
4866                         break;
4867
4868                 case CNTRL:
4869                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4870                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4871                         }
4872                         break;
4873                 default:
4874                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4875                         break;
4876                 }
4877                 /*  next entry in the template */
4878                 entry = (ql_minidump_entry_t *) ((char *) entry
4879                                                 + entry->hdr.entry_size);
4880         }
4881
4882         if (!sane_start || (sane_end > 1)) {
4883                 device_printf(ha->pci_dev,
4884                         "\n%s: Template configuration error. Check Template\n",
4885                         __func__);
4886         }
4887         
4888         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4889                 __func__, template_hdr->num_of_entries));
4890
4891         return 0;
4892 }
4893
4894 /*
4895  * Read CRB operation.
4896  */
4897 static uint32_t
4898 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4899         uint32_t * data_buff)
4900 {
4901         int loop_cnt;
4902         int ret;
4903         uint32_t op_count, addr, stride, value = 0;
4904
4905         addr = crb_entry->addr;
4906         op_count = crb_entry->op_count;
4907         stride = crb_entry->addr_stride;
4908
4909         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4910
4911                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4912
4913                 if (ret)
4914                         return (0);
4915
4916                 *data_buff++ = addr;
4917                 *data_buff++ = value;
4918                 addr = addr + stride;
4919         }
4920
4921         /*
4922          * for testing purpose we return amount of data written
4923          */
4924         return (op_count * (2 * sizeof(uint32_t)));
4925 }
4926
4927 /*
4928  * Handle L2 Cache.
4929  */
4930
4931 static uint32_t 
4932 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4933         uint32_t * data_buff)
4934 {
4935         int i, k;
4936         int loop_cnt;
4937         int ret;
4938
4939         uint32_t read_value;
4940         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4941         uint32_t tag_value, read_cnt;
4942         volatile uint8_t cntl_value_r;
4943         long timeout;
4944         uint32_t data;
4945
4946         loop_cnt = cacheEntry->op_count;
4947
4948         read_addr = cacheEntry->read_addr;
4949         cntrl_addr = cacheEntry->control_addr;
4950         cntl_value_w = (uint32_t) cacheEntry->write_value;
4951
4952         tag_reg_addr = cacheEntry->tag_reg_addr;
4953
4954         tag_value = cacheEntry->init_tag_value;
4955         read_cnt = cacheEntry->read_addr_cnt;
4956
4957         for (i = 0; i < loop_cnt; i++) {
4958
4959                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4960                 if (ret)
4961                         return (0);
4962
4963                 if (cacheEntry->write_value != 0) { 
4964
4965                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4966                                         &cntl_value_w, 0);
4967                         if (ret)
4968                                 return (0);
4969                 }
4970
4971                 if (cacheEntry->poll_mask != 0) { 
4972
4973                         timeout = cacheEntry->poll_wait;
4974
4975                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4976                         if (ret)
4977                                 return (0);
4978
4979                         cntl_value_r = (uint8_t)data;
4980
4981                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4982
4983                                 if (timeout) {
4984                                         qla_mdelay(__func__, 1);
4985                                         timeout--;
4986                                 } else
4987                                         break;
4988
4989                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4990                                                 &data, 1);
4991                                 if (ret)
4992                                         return (0);
4993
4994                                 cntl_value_r = (uint8_t)data;
4995                         }
4996                         if (!timeout) {
4997                                 /* Report timeout error. 
4998                                  * core dump capture failed
4999                                  * Skip remaining entries.
5000                                  * Write buffer out to file
5001                                  * Use driver specific fields in template header
5002                                  * to report this error.
5003                                  */
5004                                 return (-1);
5005                         }
5006                 }
5007
5008                 addr = read_addr;
5009                 for (k = 0; k < read_cnt; k++) {
5010
5011                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5012                         if (ret)
5013                                 return (0);
5014
5015                         *data_buff++ = read_value;
5016                         addr += cacheEntry->read_addr_stride;
5017                 }
5018
5019                 tag_value += cacheEntry->tag_value_stride;
5020         }
5021
5022         return (read_cnt * loop_cnt * sizeof(uint32_t));
5023 }
5024
5025 /*
5026  * Handle L1 Cache.
5027  */
5028
5029 static uint32_t 
5030 ql_L1Cache(qla_host_t *ha,
5031         ql_minidump_entry_cache_t *cacheEntry,
5032         uint32_t *data_buff)
5033 {
5034         int ret;
5035         int i, k;
5036         int loop_cnt;
5037
5038         uint32_t read_value;
5039         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
5040         uint32_t tag_value, read_cnt;
5041         uint32_t cntl_value_w;
5042
5043         loop_cnt = cacheEntry->op_count;
5044
5045         read_addr = cacheEntry->read_addr;
5046         cntrl_addr = cacheEntry->control_addr;
5047         cntl_value_w = (uint32_t) cacheEntry->write_value;
5048
5049         tag_reg_addr = cacheEntry->tag_reg_addr;
5050
5051         tag_value = cacheEntry->init_tag_value;
5052         read_cnt = cacheEntry->read_addr_cnt;
5053
5054         for (i = 0; i < loop_cnt; i++) {
5055
5056                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
5057                 if (ret)
5058                         return (0);
5059
5060                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
5061                 if (ret)
5062                         return (0);
5063
5064                 addr = read_addr;
5065                 for (k = 0; k < read_cnt; k++) {
5066
5067                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5068                         if (ret)
5069                                 return (0);
5070
5071                         *data_buff++ = read_value;
5072                         addr += cacheEntry->read_addr_stride;
5073                 }
5074
5075                 tag_value += cacheEntry->tag_value_stride;
5076         }
5077
5078         return (read_cnt * loop_cnt * sizeof(uint32_t));
5079 }
5080
5081 /*
5082  * Reading OCM memory
5083  */
5084
5085 static uint32_t 
5086 ql_rdocm(qla_host_t *ha,
5087         ql_minidump_entry_rdocm_t *ocmEntry,
5088         uint32_t *data_buff)
5089 {
5090         int i, loop_cnt;
5091         volatile uint32_t addr;
5092         volatile uint32_t value;
5093
5094         addr = ocmEntry->read_addr;
5095         loop_cnt = ocmEntry->op_count;
5096
5097         for (i = 0; i < loop_cnt; i++) {
5098                 value = READ_REG32(ha, addr);
5099                 *data_buff++ = value;
5100                 addr += ocmEntry->read_addr_stride;
5101         }
5102         return (loop_cnt * sizeof(value));
5103 }
5104
5105 /*
5106  * Read memory
5107  */
5108
5109 static uint32_t 
5110 ql_rdmem(qla_host_t *ha,
5111         ql_minidump_entry_rdmem_t *mem_entry,
5112         uint32_t *data_buff)
5113 {
5114         int ret;
5115         int i, loop_cnt;
5116         volatile uint32_t addr;
5117         q80_offchip_mem_val_t val;
5118
5119         addr = mem_entry->read_addr;
5120
5121         /* size in bytes / 16 */
5122         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5123
5124         for (i = 0; i < loop_cnt; i++) {
5125
5126                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5127                 if (ret)
5128                         return (0);
5129
5130                 *data_buff++ = val.data_lo;
5131                 *data_buff++ = val.data_hi;
5132                 *data_buff++ = val.data_ulo;
5133                 *data_buff++ = val.data_uhi;
5134
5135                 addr += (sizeof(uint32_t) * 4);
5136         }
5137
5138         return (loop_cnt * (sizeof(uint32_t) * 4));
5139 }
5140
5141 /*
5142  * Read Rom
5143  */
5144
5145 static uint32_t 
5146 ql_rdrom(qla_host_t *ha,
5147         ql_minidump_entry_rdrom_t *romEntry,
5148         uint32_t *data_buff)
5149 {
5150         int ret;
5151         int i, loop_cnt;
5152         uint32_t addr;
5153         uint32_t value;
5154
5155         addr = romEntry->read_addr;
5156         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5157         loop_cnt /= sizeof(value);
5158
5159         for (i = 0; i < loop_cnt; i++) {
5160
5161                 ret = ql_rd_flash32(ha, addr, &value);
5162                 if (ret)
5163                         return (0);
5164
5165                 *data_buff++ = value;
5166                 addr += sizeof(value);
5167         }
5168
5169         return (loop_cnt * sizeof(value));
5170 }
5171
5172 /*
5173  * Read MUX data
5174  */
5175
5176 static uint32_t 
5177 ql_rdmux(qla_host_t *ha,
5178         ql_minidump_entry_mux_t *muxEntry,
5179         uint32_t *data_buff)
5180 {
5181         int ret;
5182         int loop_cnt;
5183         uint32_t read_value, sel_value;
5184         uint32_t read_addr, select_addr;
5185
5186         select_addr = muxEntry->select_addr;
5187         sel_value = muxEntry->select_value;
5188         read_addr = muxEntry->read_addr;
5189
5190         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5191
5192                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5193                 if (ret)
5194                         return (0);
5195
5196                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5197                 if (ret)
5198                         return (0);
5199
5200                 *data_buff++ = sel_value;
5201                 *data_buff++ = read_value;
5202
5203                 sel_value += muxEntry->select_value_stride;
5204         }
5205
5206         return (loop_cnt * (2 * sizeof(uint32_t)));
5207 }
5208
5209 static uint32_t
5210 ql_rdmux2(qla_host_t *ha,
5211         ql_minidump_entry_mux2_t *muxEntry,
5212         uint32_t *data_buff)
5213 {
5214         int ret;
5215         int loop_cnt;
5216
5217         uint32_t select_addr_1, select_addr_2;
5218         uint32_t select_value_1, select_value_2;
5219         uint32_t select_value_count, select_value_mask;
5220         uint32_t read_addr, read_value;
5221
5222         select_addr_1 = muxEntry->select_addr_1;
5223         select_addr_2 = muxEntry->select_addr_2;
5224         select_value_1 = muxEntry->select_value_1;
5225         select_value_2 = muxEntry->select_value_2;
5226         select_value_count = muxEntry->select_value_count;
5227         select_value_mask  = muxEntry->select_value_mask;
5228
5229         read_addr = muxEntry->read_addr;
5230
5231         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5232                 loop_cnt++) {
5233
5234                 uint32_t temp_sel_val;
5235
5236                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5237                 if (ret)
5238                         return (0);
5239
5240                 temp_sel_val = select_value_1 & select_value_mask;
5241
5242                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5243                 if (ret)
5244                         return (0);
5245
5246                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5247                 if (ret)
5248                         return (0);
5249
5250                 *data_buff++ = temp_sel_val;
5251                 *data_buff++ = read_value;
5252
5253                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5254                 if (ret)
5255                         return (0);
5256
5257                 temp_sel_val = select_value_2 & select_value_mask;
5258
5259                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5260                 if (ret)
5261                         return (0);
5262
5263                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5264                 if (ret)
5265                         return (0);
5266
5267                 *data_buff++ = temp_sel_val;
5268                 *data_buff++ = read_value;
5269
5270                 select_value_1 += muxEntry->select_value_stride;
5271                 select_value_2 += muxEntry->select_value_stride;
5272         }
5273
5274         return (loop_cnt * (4 * sizeof(uint32_t)));
5275 }
5276
5277 /*
5278  * Handling Queue State Reads.
5279  */
5280
5281 static uint32_t 
5282 ql_rdqueue(qla_host_t *ha,
5283         ql_minidump_entry_queue_t *queueEntry,
5284         uint32_t *data_buff)
5285 {
5286         int ret;
5287         int loop_cnt, k;
5288         uint32_t read_value;
5289         uint32_t read_addr, read_stride, select_addr;
5290         uint32_t queue_id, read_cnt;
5291
5292         read_cnt = queueEntry->read_addr_cnt;
5293         read_stride = queueEntry->read_addr_stride;
5294         select_addr = queueEntry->select_addr;
5295
5296         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5297                 loop_cnt++) {
5298
5299                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5300                 if (ret)
5301                         return (0);
5302
5303                 read_addr = queueEntry->read_addr;
5304
5305                 for (k = 0; k < read_cnt; k++) {
5306
5307                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5308                         if (ret)
5309                                 return (0);
5310
5311                         *data_buff++ = read_value;
5312                         read_addr += read_stride;
5313                 }
5314
5315                 queue_id += queueEntry->queue_id_stride;
5316         }
5317
5318         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5319 }
5320
5321 /*
5322  * Handling control entries.
5323  */
5324
5325 static uint32_t 
5326 ql_cntrl(qla_host_t *ha,
5327         ql_minidump_template_hdr_t *template_hdr,
5328         ql_minidump_entry_cntrl_t *crbEntry)
5329 {
5330         int ret;
5331         int count;
5332         uint32_t opcode, read_value, addr, entry_addr;
5333         long timeout;
5334
5335         entry_addr = crbEntry->addr;
5336
5337         for (count = 0; count < crbEntry->op_count; count++) {
5338                 opcode = crbEntry->opcode;
5339
5340                 if (opcode & QL_DBG_OPCODE_WR) {
5341
5342                         ret = ql_rdwr_indreg32(ha, entry_addr,
5343                                         &crbEntry->value_1, 0);
5344                         if (ret)
5345                                 return (0);
5346
5347                         opcode &= ~QL_DBG_OPCODE_WR;
5348                 }
5349
5350                 if (opcode & QL_DBG_OPCODE_RW) {
5351
5352                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5353                         if (ret)
5354                                 return (0);
5355
5356                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5357                         if (ret)
5358                                 return (0);
5359
5360                         opcode &= ~QL_DBG_OPCODE_RW;
5361                 }
5362
5363                 if (opcode & QL_DBG_OPCODE_AND) {
5364
5365                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5366                         if (ret)
5367                                 return (0);
5368
5369                         read_value &= crbEntry->value_2;
5370                         opcode &= ~QL_DBG_OPCODE_AND;
5371
5372                         if (opcode & QL_DBG_OPCODE_OR) {
5373                                 read_value |= crbEntry->value_3;
5374                                 opcode &= ~QL_DBG_OPCODE_OR;
5375                         }
5376
5377                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5378                         if (ret)
5379                                 return (0);
5380                 }
5381
5382                 if (opcode & QL_DBG_OPCODE_OR) {
5383
5384                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5385                         if (ret)
5386                                 return (0);
5387
5388                         read_value |= crbEntry->value_3;
5389
5390                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5391                         if (ret)
5392                                 return (0);
5393
5394                         opcode &= ~QL_DBG_OPCODE_OR;
5395                 }
5396
5397                 if (opcode & QL_DBG_OPCODE_POLL) {
5398
5399                         opcode &= ~QL_DBG_OPCODE_POLL;
5400                         timeout = crbEntry->poll_timeout;
5401                         addr = entry_addr;
5402
5403                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5404                         if (ret)
5405                                 return (0);
5406
5407                         while ((read_value & crbEntry->value_2)
5408                                 != crbEntry->value_1) {
5409
5410                                 if (timeout) {
5411                                         qla_mdelay(__func__, 1);
5412                                         timeout--;
5413                                 } else
5414                                         break;
5415
5416                                 ret = ql_rdwr_indreg32(ha, addr,
5417                                                 &read_value, 1);
5418                                 if (ret)
5419                                         return (0);
5420                         }
5421
5422                         if (!timeout) {
5423                                 /*
5424                                  * Report timeout error.
5425                                  * core dump capture failed
5426                                  * Skip remaining entries.
5427                                  * Write buffer out to file
5428                                  * Use driver specific fields in template header
5429                                  * to report this error.
5430                                  */
5431                                 return (-1);
5432                         }
5433                 }
5434
5435                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5436                         /*
5437                          * decide which address to use.
5438                          */
5439                         if (crbEntry->state_index_a) {
5440                                 addr = template_hdr->saved_state_array[
5441                                                 crbEntry-> state_index_a];
5442                         } else {
5443                                 addr = entry_addr;
5444                         }
5445
5446                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5447                         if (ret)
5448                                 return (0);
5449
5450                         template_hdr->saved_state_array[crbEntry->state_index_v]
5451                                         = read_value;
5452                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5453                 }
5454
5455                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5456                         /*
5457                          * decide which value to use.
5458                          */
5459                         if (crbEntry->state_index_v) {
5460                                 read_value = template_hdr->saved_state_array[
5461                                                 crbEntry->state_index_v];
5462                         } else {
5463                                 read_value = crbEntry->value_1;
5464                         }
5465                         /*
5466                          * decide which address to use.
5467                          */
5468                         if (crbEntry->state_index_a) {
5469                                 addr = template_hdr->saved_state_array[
5470                                                 crbEntry-> state_index_a];
5471                         } else {
5472                                 addr = entry_addr;
5473                         }
5474
5475                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5476                         if (ret)
5477                                 return (0);
5478
5479                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5480                 }
5481
5482                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5483                         /*  Read value from saved state using index */
5484                         read_value = template_hdr->saved_state_array[
5485                                                 crbEntry->state_index_v];
5486
5487                         read_value <<= crbEntry->shl; /*Shift left operation */
5488                         read_value >>= crbEntry->shr; /*Shift right operation */
5489
5490                         if (crbEntry->value_2) {
5491                                 /* check if AND mask is provided */
5492                                 read_value &= crbEntry->value_2;
5493                         }
5494
5495                         read_value |= crbEntry->value_3; /* OR operation */
5496                         read_value += crbEntry->value_1; /* increment op */
5497
5498                         /* Write value back to state area. */
5499
5500                         template_hdr->saved_state_array[crbEntry->state_index_v]
5501                                         = read_value;
5502                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5503                 }
5504
5505                 entry_addr += crbEntry->addr_stride;
5506         }
5507
5508         return (0);
5509 }
5510
5511 /*
5512  * Handling rd poll entry.
5513  */
5514
5515 static uint32_t 
5516 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5517         uint32_t *data_buff)
5518 {
5519         int ret;
5520         int loop_cnt;
5521         uint32_t op_count, select_addr, select_value_stride, select_value;
5522         uint32_t read_addr, poll, mask, data_size, data;
5523         uint32_t wait_count = 0;
5524
5525         select_addr            = entry->select_addr;
5526         read_addr              = entry->read_addr;
5527         select_value           = entry->select_value;
5528         select_value_stride    = entry->select_value_stride;
5529         op_count               = entry->op_count;
5530         poll                   = entry->poll;
5531         mask                   = entry->mask;
5532         data_size              = entry->data_size;
5533
5534         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5535
5536                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5537                 if (ret)
5538                         return (0);
5539
5540                 wait_count = 0;
5541
5542                 while (wait_count < poll) {
5543
5544                         uint32_t temp;
5545
5546                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5547                         if (ret)
5548                                 return (0);
5549
5550                         if ( (temp & mask) != 0 ) {
5551                                 break;
5552                         }
5553                         wait_count++;
5554                 }
5555
5556                 if (wait_count == poll) {
5557                         device_printf(ha->pci_dev,
5558                                 "%s: Error in processing entry\n", __func__);
5559                         device_printf(ha->pci_dev,
5560                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5561                                 __func__, wait_count, poll);
5562                         return 0;
5563                 }
5564
5565                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5566                 if (ret)
5567                         return (0);
5568
5569                 *data_buff++ = select_value;
5570                 *data_buff++ = data;
5571                 select_value = select_value + select_value_stride;
5572         }
5573
5574         /*
5575          * for testing purpose we return amount of data written
5576          */
5577         return (loop_cnt * (2 * sizeof(uint32_t)));
5578 }
5579
5580
5581 /*
5582  * Handling rd modify write poll entry.
5583  */
5584
5585 static uint32_t 
5586 ql_pollrd_modify_write(qla_host_t *ha,
5587         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5588         uint32_t *data_buff)
5589 {
5590         int ret;
5591         uint32_t addr_1, addr_2, value_1, value_2, data;
5592         uint32_t poll, mask, data_size, modify_mask;
5593         uint32_t wait_count = 0;
5594
5595         addr_1          = entry->addr_1;
5596         addr_2          = entry->addr_2;
5597         value_1         = entry->value_1;
5598         value_2         = entry->value_2;
5599
5600         poll            = entry->poll;
5601         mask            = entry->mask;
5602         modify_mask     = entry->modify_mask;
5603         data_size       = entry->data_size;
5604
5605
5606         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5607         if (ret)
5608                 return (0);
5609
5610         wait_count = 0;
5611         while (wait_count < poll) {
5612
5613                 uint32_t temp;
5614
5615                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5616                 if (ret)
5617                         return (0);
5618
5619                 if ( (temp & mask) != 0 ) {
5620                         break;
5621                 }
5622                 wait_count++;
5623         }
5624
5625         if (wait_count == poll) {
5626                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5627                         __func__);
5628         } else {
5629
5630                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5631                 if (ret)
5632                         return (0);
5633
5634                 data = (data & modify_mask);
5635
5636                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5637                 if (ret)
5638                         return (0);
5639
5640                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5641                 if (ret)
5642                         return (0);
5643
5644                 /* Poll again */
5645                 wait_count = 0;
5646                 while (wait_count < poll) {
5647
5648                         uint32_t temp;
5649
5650                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5651                         if (ret)
5652                                 return (0);
5653
5654                         if ( (temp & mask) != 0 ) {
5655                                 break;
5656                         }
5657                         wait_count++;
5658                 }
5659                 *data_buff++ = addr_2;
5660                 *data_buff++ = data;
5661         }
5662
5663         /*
5664          * for testing purpose we return amount of data written
5665          */
5666         return (2 * sizeof(uint32_t));
5667 }
5668
5669