]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r323824
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
76 static int qla_get_cam_search_mode(qla_host_t *ha);
77
78 static void ql_minidump_free(qla_host_t *ha);
79
80 #ifdef QL_DBG
81
82 static void
83 qla_stop_pegs(qla_host_t *ha)
84 {
85         uint32_t val = 1;
86
87         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
91         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
92         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
93 }
94
95 static int
96 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
97 {
98         int err, ret = 0;
99         qla_host_t *ha;
100         
101         err = sysctl_handle_int(oidp, &ret, 0, req);
102
103
104         if (err || !req->newptr)
105                 return (err);
106
107         if (ret == 1) {
108                 ha = (qla_host_t *)arg1;
109                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
110                         qla_stop_pegs(ha);      
111                         QLA_UNLOCK(ha, __func__);
112                 }
113         }
114
115         return err;
116 }
117 #endif /* #ifdef QL_DBG */
118
119 static int
120 qla_validate_set_port_cfg_bit(uint32_t bits)
121 {
122         if ((bits & 0xF) > 1)
123                 return (-1);
124
125         if (((bits >> 4) & 0xF) > 2)
126                 return (-1);
127
128         if (((bits >> 8) & 0xF) > 2)
129                 return (-1);
130
131         return (0);
132 }
133
134 static int
135 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
136 {
137         int err, ret = 0;
138         qla_host_t *ha;
139         uint32_t cfg_bits;
140
141         err = sysctl_handle_int(oidp, &ret, 0, req);
142
143         if (err || !req->newptr)
144                 return (err);
145
146         ha = (qla_host_t *)arg1;
147
148         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
149
150                 err = qla_get_port_config(ha, &cfg_bits);
151
152                 if (err)
153                         goto qla_sysctl_set_port_cfg_exit;
154
155                 if (ret & 0x1) {
156                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
157                 } else {
158                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
159                 }
160
161                 ret = ret >> 4;
162                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
163
164                 if ((ret & 0xF) == 0) {
165                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
166                 } else if ((ret & 0xF) == 1){
167                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
168                 } else {
169                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
170                 }
171
172                 ret = ret >> 4;
173                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
174
175                 if (ret == 0) {
176                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
177                 } else if (ret == 1){
178                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
179                 } else {
180                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
181                 }
182
183                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
184                         err = qla_set_port_config(ha, cfg_bits);
185                         QLA_UNLOCK(ha, __func__);
186                 } else {
187                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
188                 }
189         } else {
190                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
191                         err = qla_get_port_config(ha, &cfg_bits);
192                         QLA_UNLOCK(ha, __func__);
193                 } else {
194                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
195                 }
196         }
197
198 qla_sysctl_set_port_cfg_exit:
199         return err;
200 }
201
202 static int
203 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
204 {
205         int err, ret = 0;
206         qla_host_t *ha;
207
208         err = sysctl_handle_int(oidp, &ret, 0, req);
209
210         if (err || !req->newptr)
211                 return (err);
212
213         ha = (qla_host_t *)arg1;
214
215         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
216                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
217
218                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
219                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
220                         QLA_UNLOCK(ha, __func__);
221                 } else {
222                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
223                 }
224
225         } else {
226                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
227         }
228
229         return (err);
230 }
231
232 static int
233 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
234 {
235         int err, ret = 0;
236         qla_host_t *ha;
237
238         err = sysctl_handle_int(oidp, &ret, 0, req);
239
240         if (err || !req->newptr)
241                 return (err);
242
243         ha = (qla_host_t *)arg1;
244         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
245                 err = qla_get_cam_search_mode(ha);
246                 QLA_UNLOCK(ha, __func__);
247         } else {
248                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
249         }
250
251         return (err);
252 }
253
254 static void
255 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
256 {
257         struct sysctl_ctx_list  *ctx;
258         struct sysctl_oid_list  *children;
259         struct sysctl_oid       *ctx_oid;
260
261         ctx = device_get_sysctl_ctx(ha->pci_dev);
262         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
263
264         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
265                         CTLFLAG_RD, NULL, "stats_hw_mac");
266         children = SYSCTL_CHILDREN(ctx_oid);
267
268         SYSCTL_ADD_QUAD(ctx, children,
269                 OID_AUTO, "xmt_frames",
270                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
271                 "xmt_frames");
272
273         SYSCTL_ADD_QUAD(ctx, children,
274                 OID_AUTO, "xmt_bytes",
275                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
276                 "xmt_frames");
277
278         SYSCTL_ADD_QUAD(ctx, children,
279                 OID_AUTO, "xmt_mcast_pkts",
280                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
281                 "xmt_mcast_pkts");
282
283         SYSCTL_ADD_QUAD(ctx, children,
284                 OID_AUTO, "xmt_bcast_pkts",
285                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
286                 "xmt_bcast_pkts");
287
288         SYSCTL_ADD_QUAD(ctx, children,
289                 OID_AUTO, "xmt_pause_frames",
290                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
291                 "xmt_pause_frames");
292
293         SYSCTL_ADD_QUAD(ctx, children,
294                 OID_AUTO, "xmt_cntrl_pkts",
295                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
296                 "xmt_cntrl_pkts");
297
298         SYSCTL_ADD_QUAD(ctx, children,
299                 OID_AUTO, "xmt_pkt_lt_64bytes",
300                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
301                 "xmt_pkt_lt_64bytes");
302
303         SYSCTL_ADD_QUAD(ctx, children,
304                 OID_AUTO, "xmt_pkt_lt_127bytes",
305                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
306                 "xmt_pkt_lt_127bytes");
307
308         SYSCTL_ADD_QUAD(ctx, children,
309                 OID_AUTO, "xmt_pkt_lt_255bytes",
310                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
311                 "xmt_pkt_lt_255bytes");
312
313         SYSCTL_ADD_QUAD(ctx, children,
314                 OID_AUTO, "xmt_pkt_lt_511bytes",
315                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
316                 "xmt_pkt_lt_511bytes");
317
318         SYSCTL_ADD_QUAD(ctx, children,
319                 OID_AUTO, "xmt_pkt_lt_1023bytes",
320                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
321                 "xmt_pkt_lt_1023bytes");
322
323         SYSCTL_ADD_QUAD(ctx, children,
324                 OID_AUTO, "xmt_pkt_lt_1518bytes",
325                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
326                 "xmt_pkt_lt_1518bytes");
327
328         SYSCTL_ADD_QUAD(ctx, children,
329                 OID_AUTO, "xmt_pkt_gt_1518bytes",
330                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
331                 "xmt_pkt_gt_1518bytes");
332
333         SYSCTL_ADD_QUAD(ctx, children,
334                 OID_AUTO, "rcv_frames",
335                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
336                 "rcv_frames");
337
338         SYSCTL_ADD_QUAD(ctx, children,
339                 OID_AUTO, "rcv_bytes",
340                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
341                 "rcv_bytes");
342
343         SYSCTL_ADD_QUAD(ctx, children,
344                 OID_AUTO, "rcv_mcast_pkts",
345                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
346                 "rcv_mcast_pkts");
347
348         SYSCTL_ADD_QUAD(ctx, children,
349                 OID_AUTO, "rcv_bcast_pkts",
350                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
351                 "rcv_bcast_pkts");
352
353         SYSCTL_ADD_QUAD(ctx, children,
354                 OID_AUTO, "rcv_pause_frames",
355                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
356                 "rcv_pause_frames");
357
358         SYSCTL_ADD_QUAD(ctx, children,
359                 OID_AUTO, "rcv_cntrl_pkts",
360                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
361                 "rcv_cntrl_pkts");
362
363         SYSCTL_ADD_QUAD(ctx, children,
364                 OID_AUTO, "rcv_pkt_lt_64bytes",
365                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
366                 "rcv_pkt_lt_64bytes");
367
368         SYSCTL_ADD_QUAD(ctx, children,
369                 OID_AUTO, "rcv_pkt_lt_127bytes",
370                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
371                 "rcv_pkt_lt_127bytes");
372
373         SYSCTL_ADD_QUAD(ctx, children,
374                 OID_AUTO, "rcv_pkt_lt_255bytes",
375                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
376                 "rcv_pkt_lt_255bytes");
377
378         SYSCTL_ADD_QUAD(ctx, children,
379                 OID_AUTO, "rcv_pkt_lt_511bytes",
380                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
381                 "rcv_pkt_lt_511bytes");
382
383         SYSCTL_ADD_QUAD(ctx, children,
384                 OID_AUTO, "rcv_pkt_lt_1023bytes",
385                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
386                 "rcv_pkt_lt_1023bytes");
387
388         SYSCTL_ADD_QUAD(ctx, children,
389                 OID_AUTO, "rcv_pkt_lt_1518bytes",
390                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
391                 "rcv_pkt_lt_1518bytes");
392
393         SYSCTL_ADD_QUAD(ctx, children,
394                 OID_AUTO, "rcv_pkt_gt_1518bytes",
395                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
396                 "rcv_pkt_gt_1518bytes");
397
398         SYSCTL_ADD_QUAD(ctx, children,
399                 OID_AUTO, "rcv_len_error",
400                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
401                 "rcv_len_error");
402
403         SYSCTL_ADD_QUAD(ctx, children,
404                 OID_AUTO, "rcv_len_small",
405                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
406                 "rcv_len_small");
407
408         SYSCTL_ADD_QUAD(ctx, children,
409                 OID_AUTO, "rcv_len_large",
410                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
411                 "rcv_len_large");
412
413         SYSCTL_ADD_QUAD(ctx, children,
414                 OID_AUTO, "rcv_jabber",
415                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
416                 "rcv_jabber");
417
418         SYSCTL_ADD_QUAD(ctx, children,
419                 OID_AUTO, "rcv_dropped",
420                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
421                 "rcv_dropped");
422
423         SYSCTL_ADD_QUAD(ctx, children,
424                 OID_AUTO, "fcs_error",
425                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
426                 "fcs_error");
427
428         SYSCTL_ADD_QUAD(ctx, children,
429                 OID_AUTO, "align_error",
430                 CTLFLAG_RD, &ha->hw.mac.align_error,
431                 "align_error");
432
433         SYSCTL_ADD_QUAD(ctx, children,
434                 OID_AUTO, "eswitched_frames",
435                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
436                 "eswitched_frames");
437
438         SYSCTL_ADD_QUAD(ctx, children,
439                 OID_AUTO, "eswitched_bytes",
440                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
441                 "eswitched_bytes");
442
443         SYSCTL_ADD_QUAD(ctx, children,
444                 OID_AUTO, "eswitched_mcast_frames",
445                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
446                 "eswitched_mcast_frames");
447
448         SYSCTL_ADD_QUAD(ctx, children,
449                 OID_AUTO, "eswitched_bcast_frames",
450                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
451                 "eswitched_bcast_frames");
452
453         SYSCTL_ADD_QUAD(ctx, children,
454                 OID_AUTO, "eswitched_ucast_frames",
455                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
456                 "eswitched_ucast_frames");
457
458         SYSCTL_ADD_QUAD(ctx, children,
459                 OID_AUTO, "eswitched_err_free_frames",
460                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
461                 "eswitched_err_free_frames");
462
463         SYSCTL_ADD_QUAD(ctx, children,
464                 OID_AUTO, "eswitched_err_free_bytes",
465                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
466                 "eswitched_err_free_bytes");
467
468         return;
469 }
470
471 static void
472 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
473 {
474         struct sysctl_ctx_list  *ctx;
475         struct sysctl_oid_list  *children;
476         struct sysctl_oid       *ctx_oid;
477
478         ctx = device_get_sysctl_ctx(ha->pci_dev);
479         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
480
481         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
482                         CTLFLAG_RD, NULL, "stats_hw_rcv");
483         children = SYSCTL_CHILDREN(ctx_oid);
484
485         SYSCTL_ADD_QUAD(ctx, children,
486                 OID_AUTO, "total_bytes",
487                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
488                 "total_bytes");
489
490         SYSCTL_ADD_QUAD(ctx, children,
491                 OID_AUTO, "total_pkts",
492                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
493                 "total_pkts");
494
495         SYSCTL_ADD_QUAD(ctx, children,
496                 OID_AUTO, "lro_pkt_count",
497                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
498                 "lro_pkt_count");
499
500         SYSCTL_ADD_QUAD(ctx, children,
501                 OID_AUTO, "sw_pkt_count",
502                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
503                 "sw_pkt_count");
504
505         SYSCTL_ADD_QUAD(ctx, children,
506                 OID_AUTO, "ip_chksum_err",
507                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
508                 "ip_chksum_err");
509
510         SYSCTL_ADD_QUAD(ctx, children,
511                 OID_AUTO, "pkts_wo_acntxts",
512                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
513                 "pkts_wo_acntxts");
514
515         SYSCTL_ADD_QUAD(ctx, children,
516                 OID_AUTO, "pkts_dropped_no_sds_card",
517                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
518                 "pkts_dropped_no_sds_card");
519
520         SYSCTL_ADD_QUAD(ctx, children,
521                 OID_AUTO, "pkts_dropped_no_sds_host",
522                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
523                 "pkts_dropped_no_sds_host");
524
525         SYSCTL_ADD_QUAD(ctx, children,
526                 OID_AUTO, "oversized_pkts",
527                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
528                 "oversized_pkts");
529
530         SYSCTL_ADD_QUAD(ctx, children,
531                 OID_AUTO, "pkts_dropped_no_rds",
532                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
533                 "pkts_dropped_no_rds");
534
535         SYSCTL_ADD_QUAD(ctx, children,
536                 OID_AUTO, "unxpctd_mcast_pkts",
537                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
538                 "unxpctd_mcast_pkts");
539
540         SYSCTL_ADD_QUAD(ctx, children,
541                 OID_AUTO, "re1_fbq_error",
542                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
543                 "re1_fbq_error");
544
545         SYSCTL_ADD_QUAD(ctx, children,
546                 OID_AUTO, "invalid_mac_addr",
547                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
548                 "invalid_mac_addr");
549
550         SYSCTL_ADD_QUAD(ctx, children,
551                 OID_AUTO, "rds_prime_trys",
552                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
553                 "rds_prime_trys");
554
555         SYSCTL_ADD_QUAD(ctx, children,
556                 OID_AUTO, "rds_prime_success",
557                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
558                 "rds_prime_success");
559
560         SYSCTL_ADD_QUAD(ctx, children,
561                 OID_AUTO, "lro_flows_added",
562                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
563                 "lro_flows_added");
564
565         SYSCTL_ADD_QUAD(ctx, children,
566                 OID_AUTO, "lro_flows_deleted",
567                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
568                 "lro_flows_deleted");
569
570         SYSCTL_ADD_QUAD(ctx, children,
571                 OID_AUTO, "lro_flows_active",
572                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
573                 "lro_flows_active");
574
575         SYSCTL_ADD_QUAD(ctx, children,
576                 OID_AUTO, "pkts_droped_unknown",
577                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
578                 "pkts_droped_unknown");
579
580         SYSCTL_ADD_QUAD(ctx, children,
581                 OID_AUTO, "pkts_cnt_oversized",
582                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
583                 "pkts_cnt_oversized");
584
585         return;
586 }
587
588 static void
589 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
590 {
591         struct sysctl_ctx_list  *ctx;
592         struct sysctl_oid_list  *children;
593         struct sysctl_oid_list  *node_children;
594         struct sysctl_oid       *ctx_oid;
595         int                     i;
596         uint8_t                 name_str[16];
597
598         ctx = device_get_sysctl_ctx(ha->pci_dev);
599         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
600
601         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
602                         CTLFLAG_RD, NULL, "stats_hw_xmt");
603         children = SYSCTL_CHILDREN(ctx_oid);
604
605         for (i = 0; i < ha->hw.num_tx_rings; i++) {
606
607                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
608                 snprintf(name_str, sizeof(name_str), "%d", i);
609
610                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
611                         CTLFLAG_RD, NULL, name_str);
612                 node_children = SYSCTL_CHILDREN(ctx_oid);
613
614                 /* Tx Related */
615
616                 SYSCTL_ADD_QUAD(ctx, node_children,
617                         OID_AUTO, "total_bytes",
618                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
619                         "total_bytes");
620
621                 SYSCTL_ADD_QUAD(ctx, node_children,
622                         OID_AUTO, "total_pkts",
623                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
624                         "total_pkts");
625
626                 SYSCTL_ADD_QUAD(ctx, node_children,
627                         OID_AUTO, "errors",
628                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
629                         "errors");
630
631                 SYSCTL_ADD_QUAD(ctx, node_children,
632                         OID_AUTO, "pkts_dropped",
633                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
634                         "pkts_dropped");
635
636                 SYSCTL_ADD_QUAD(ctx, node_children,
637                         OID_AUTO, "switch_pkts",
638                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
639                         "switch_pkts");
640
641                 SYSCTL_ADD_QUAD(ctx, node_children,
642                         OID_AUTO, "num_buffers",
643                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
644                         "num_buffers");
645         }
646
647         return;
648 }
649
650 static void
651 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
652 {
653         qlnx_add_hw_mac_stats_sysctls(ha);
654         qlnx_add_hw_rcv_stats_sysctls(ha);
655         qlnx_add_hw_xmt_stats_sysctls(ha);
656
657         return;
658 }
659
660 static void
661 qlnx_add_drvr_sds_stats(qla_host_t *ha)
662 {
663         struct sysctl_ctx_list  *ctx;
664         struct sysctl_oid_list  *children;
665         struct sysctl_oid_list  *node_children;
666         struct sysctl_oid       *ctx_oid;
667         int                     i;
668         uint8_t                 name_str[16];
669
670         ctx = device_get_sysctl_ctx(ha->pci_dev);
671         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
672
673         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
674                         CTLFLAG_RD, NULL, "stats_drvr_sds");
675         children = SYSCTL_CHILDREN(ctx_oid);
676
677         for (i = 0; i < ha->hw.num_sds_rings; i++) {
678
679                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
680                 snprintf(name_str, sizeof(name_str), "%d", i);
681
682                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
683                         CTLFLAG_RD, NULL, name_str);
684                 node_children = SYSCTL_CHILDREN(ctx_oid);
685
686                 SYSCTL_ADD_QUAD(ctx, node_children,
687                         OID_AUTO, "intr_count",
688                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
689                         "intr_count");
690
691                 SYSCTL_ADD_UINT(ctx, node_children,
692                         OID_AUTO, "rx_free",
693                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
694                         ha->hw.sds[i].rx_free, "rx_free");
695         }
696
697         return;
698 }
699 static void
700 qlnx_add_drvr_rds_stats(qla_host_t *ha)
701 {
702         struct sysctl_ctx_list  *ctx;
703         struct sysctl_oid_list  *children;
704         struct sysctl_oid_list  *node_children;
705         struct sysctl_oid       *ctx_oid;
706         int                     i;
707         uint8_t                 name_str[16];
708
709         ctx = device_get_sysctl_ctx(ha->pci_dev);
710         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
711
712         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
713                         CTLFLAG_RD, NULL, "stats_drvr_rds");
714         children = SYSCTL_CHILDREN(ctx_oid);
715
716         for (i = 0; i < ha->hw.num_rds_rings; i++) {
717
718                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
719                 snprintf(name_str, sizeof(name_str), "%d", i);
720
721                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
722                         CTLFLAG_RD, NULL, name_str);
723                 node_children = SYSCTL_CHILDREN(ctx_oid);
724
725                 SYSCTL_ADD_QUAD(ctx, node_children,
726                         OID_AUTO, "count",
727                         CTLFLAG_RD, &ha->hw.rds[i].count,
728                         "count");
729
730                 SYSCTL_ADD_QUAD(ctx, node_children,
731                         OID_AUTO, "lro_pkt_count",
732                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
733                         "lro_pkt_count");
734
735                 SYSCTL_ADD_QUAD(ctx, node_children,
736                         OID_AUTO, "lro_bytes",
737                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
738                         "lro_bytes");
739         }
740
741         return;
742 }
743
744 static void
745 qlnx_add_drvr_tx_stats(qla_host_t *ha)
746 {
747         struct sysctl_ctx_list  *ctx;
748         struct sysctl_oid_list  *children;
749         struct sysctl_oid_list  *node_children;
750         struct sysctl_oid       *ctx_oid;
751         int                     i;
752         uint8_t                 name_str[16];
753
754         ctx = device_get_sysctl_ctx(ha->pci_dev);
755         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
756
757         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
758                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
759         children = SYSCTL_CHILDREN(ctx_oid);
760
761         for (i = 0; i < ha->hw.num_tx_rings; i++) {
762
763                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
764                 snprintf(name_str, sizeof(name_str), "%d", i);
765
766                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
767                         CTLFLAG_RD, NULL, name_str);
768                 node_children = SYSCTL_CHILDREN(ctx_oid);
769
770                 SYSCTL_ADD_QUAD(ctx, node_children,
771                         OID_AUTO, "count",
772                         CTLFLAG_RD, &ha->tx_ring[i].count,
773                         "count");
774
775 #ifdef QL_ENABLE_ISCSI_TLV
776                 SYSCTL_ADD_QUAD(ctx, node_children,
777                         OID_AUTO, "iscsi_pkt_count",
778                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
779                         "iscsi_pkt_count");
780 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
781         }
782
783         return;
784 }
785
786 static void
787 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
788 {
789         qlnx_add_drvr_sds_stats(ha);
790         qlnx_add_drvr_rds_stats(ha);
791         qlnx_add_drvr_tx_stats(ha);
792         return;
793 }
794
795 /*
796  * Name: ql_hw_add_sysctls
797  * Function: Add P3Plus specific sysctls
798  */
799 void
800 ql_hw_add_sysctls(qla_host_t *ha)
801 {
802         device_t        dev;
803
804         dev = ha->pci_dev;
805
806         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
807                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
808                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
809                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
810
811         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
812                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
813                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
814                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
815
816         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
817                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
818                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
819                 ha->hw.num_tx_rings, "Number of Transmit Rings");
820
821         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
822                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
823                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
824                 ha->txr_idx, "Tx Ring Used");
825
826         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
827                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
828                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
829                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
830
831         ha->hw.sds_cidx_thres = 32;
832         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
833                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
834                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
835                 ha->hw.sds_cidx_thres,
836                 "Number of SDS entries to process before updating"
837                 " SDS Ring Consumer Index");
838
839         ha->hw.rds_pidx_thres = 32;
840         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
841                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
842                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
843                 ha->hw.rds_pidx_thres,
844                 "Number of Rcv Rings Entries to post before updating"
845                 " RDS Ring Producer Index");
846
847         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
848         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
849                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
850                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
851                 &ha->hw.rcv_intr_coalesce,
852                 ha->hw.rcv_intr_coalesce,
853                 "Rcv Intr Coalescing Parameters\n"
854                 "\tbits 15:0 max packets\n"
855                 "\tbits 31:16 max micro-seconds to wait\n"
856                 "\tplease run\n"
857                 "\tifconfig <if> down && ifconfig <if> up\n"
858                 "\tto take effect \n");
859
860         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
861         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
862                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
863                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
864                 &ha->hw.xmt_intr_coalesce,
865                 ha->hw.xmt_intr_coalesce,
866                 "Xmt Intr Coalescing Parameters\n"
867                 "\tbits 15:0 max packets\n"
868                 "\tbits 31:16 max micro-seconds to wait\n"
869                 "\tplease run\n"
870                 "\tifconfig <if> down && ifconfig <if> up\n"
871                 "\tto take effect \n");
872
873         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
874                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
875                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
876                 (void *)ha, 0,
877                 qla_sysctl_port_cfg, "I",
878                         "Set Port Configuration if values below "
879                         "otherwise Get Port Configuration\n"
880                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
881                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
882                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
883                         " 1 = xmt only; 2 = rcv only;\n"
884                 );
885
886         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
887                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
888                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
889                 (void *)ha, 0,
890                 qla_sysctl_set_cam_search_mode, "I",
891                         "Set CAM Search Mode"
892                         "\t 1 = search mode internal\n"
893                         "\t 2 = search mode auto\n");
894
895         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
896                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
897                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
898                 (void *)ha, 0,
899                 qla_sysctl_get_cam_search_mode, "I",
900                         "Get CAM Search Mode"
901                         "\t 1 = search mode internal\n"
902                         "\t 2 = search mode auto\n");
903
904         ha->hw.enable_9kb = 1;
905
906         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
907                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
908                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
909                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
910
911         ha->hw.enable_hw_lro = 1;
912
913         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
914                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
915                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
916                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
917                 "\t 1 : Hardware LRO if LRO is enabled\n"
918                 "\t 0 : Software LRO if LRO is enabled\n"
919                 "\t Any change requires ifconfig down/up to take effect\n"
920                 "\t Note that LRO may be turned off/on via ifconfig\n");
921
922         ha->hw.mdump_active = 0;
923         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
924                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
925                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
926                 ha->hw.mdump_active,
927                 "Minidump retrieval is Active");
928
929         ha->hw.mdump_done = 0;
930         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
931                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
932                 OID_AUTO, "mdump_done", CTLFLAG_RW,
933                 &ha->hw.mdump_done, ha->hw.mdump_done,
934                 "Minidump has been done and available for retrieval");
935
936         ha->hw.mdump_capture_mask = 0xF;
937         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
938                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
939                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
940                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
941                 "Minidump capture mask");
942 #ifdef QL_DBG
943
944         ha->err_inject = 0;
945         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
946                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
947                 OID_AUTO, "err_inject",
948                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
949                 "Error to be injected\n"
950                 "\t\t\t 0: No Errors\n"
951                 "\t\t\t 1: rcv: rxb struct invalid\n"
952                 "\t\t\t 2: rcv: mp == NULL\n"
953                 "\t\t\t 3: lro: rxb struct invalid\n"
954                 "\t\t\t 4: lro: mp == NULL\n"
955                 "\t\t\t 5: rcv: num handles invalid\n"
956                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
957                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
958                 "\t\t\t 8: mbx: mailbox command failure\n"
959                 "\t\t\t 9: heartbeat failure\n"
960                 "\t\t\t A: temperature failure\n"
961                 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
962
963         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
964                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
965                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
966                 (void *)ha, 0,
967                 qla_sysctl_stop_pegs, "I", "Peg Stop");
968
969 #endif /* #ifdef QL_DBG */
970
971         ha->hw.user_pri_nic = 0;
972         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
973                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
974                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
975                 ha->hw.user_pri_nic,
976                 "VLAN Tag User Priority for Normal Ethernet Packets");
977
978         ha->hw.user_pri_iscsi = 4;
979         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
980                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
981                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
982                 ha->hw.user_pri_iscsi,
983                 "VLAN Tag User Priority for iSCSI Packets");
984
985         qlnx_add_hw_stats_sysctls(ha);
986         qlnx_add_drvr_stats_sysctls(ha);
987
988         return;
989 }
990
991 void
992 ql_hw_link_status(qla_host_t *ha)
993 {
994         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
995
996         if (ha->hw.link_up) {
997                 device_printf(ha->pci_dev, "link Up\n");
998         } else {
999                 device_printf(ha->pci_dev, "link Down\n");
1000         }
1001
1002         if (ha->hw.flags.fduplex) {
1003                 device_printf(ha->pci_dev, "Full Duplex\n");
1004         } else {
1005                 device_printf(ha->pci_dev, "Half Duplex\n");
1006         }
1007
1008         if (ha->hw.flags.autoneg) {
1009                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1010         } else {
1011                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1012         }
1013
1014         switch (ha->hw.link_speed) {
1015         case 0x710:
1016                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1017                 break;
1018
1019         case 0x3E8:
1020                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1021                 break;
1022
1023         case 0x64:
1024                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1025                 break;
1026
1027         default:
1028                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1029                 break;
1030         }
1031
1032         switch (ha->hw.module_type) {
1033
1034         case 0x01:
1035                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1036                 break;
1037
1038         case 0x02:
1039                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1040                 break;
1041
1042         case 0x03:
1043                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1044                 break;
1045
1046         case 0x04:
1047                 device_printf(ha->pci_dev,
1048                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1049                         ha->hw.cable_length);
1050                 break;
1051
1052         case 0x05:
1053                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1054                         " Limiting Copper(Compliant)[%d m]\n",
1055                         ha->hw.cable_length);
1056                 break;
1057
1058         case 0x06:
1059                 device_printf(ha->pci_dev,
1060                         "Module Type 10GE Passive Copper"
1061                         " (Legacy, Best Effort)[%d m]\n",
1062                         ha->hw.cable_length);
1063                 break;
1064
1065         case 0x07:
1066                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1067                 break;
1068
1069         case 0x08:
1070                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1071                 break;
1072
1073         case 0x09:
1074                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1075                 break;
1076
1077         case 0x0A:
1078                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1079                 break;
1080
1081         case 0x0B:
1082                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1083                         "(Legacy, Best Effort)\n");
1084                 break;
1085
1086         default:
1087                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1088                         ha->hw.module_type);
1089                 break;
1090         }
1091
1092         if (ha->hw.link_faults == 1)
1093                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1094 }
1095
1096 /*
1097  * Name: ql_free_dma
1098  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1099  */
1100 void
1101 ql_free_dma(qla_host_t *ha)
1102 {
1103         uint32_t i;
1104
1105         if (ha->hw.dma_buf.flags.sds_ring) {
1106                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1107                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1108                 }
1109                 ha->hw.dma_buf.flags.sds_ring = 0;
1110         }
1111
1112         if (ha->hw.dma_buf.flags.rds_ring) {
1113                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1114                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1115                 }
1116                 ha->hw.dma_buf.flags.rds_ring = 0;
1117         }
1118
1119         if (ha->hw.dma_buf.flags.tx_ring) {
1120                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1121                 ha->hw.dma_buf.flags.tx_ring = 0;
1122         }
1123         ql_minidump_free(ha);
1124 }
1125
1126 /*
1127  * Name: ql_alloc_dma
1128  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1129  */
1130 int
1131 ql_alloc_dma(qla_host_t *ha)
1132 {
1133         device_t                dev;
1134         uint32_t                i, j, size, tx_ring_size;
1135         qla_hw_t                *hw;
1136         qla_hw_tx_cntxt_t       *tx_cntxt;
1137         uint8_t                 *vaddr;
1138         bus_addr_t              paddr;
1139
1140         dev = ha->pci_dev;
1141
1142         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1143
1144         hw = &ha->hw;
1145         /*
1146          * Allocate Transmit Ring
1147          */
1148         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1149         size = (tx_ring_size * ha->hw.num_tx_rings);
1150
1151         hw->dma_buf.tx_ring.alignment = 8;
1152         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1153         
1154         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1155                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1156                 goto ql_alloc_dma_exit;
1157         }
1158
1159         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1160         paddr = hw->dma_buf.tx_ring.dma_addr;
1161         
1162         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1163                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1164
1165                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1166                 tx_cntxt->tx_ring_paddr = paddr;
1167
1168                 vaddr += tx_ring_size;
1169                 paddr += tx_ring_size;
1170         }
1171
1172         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1173                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1174
1175                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1176                 tx_cntxt->tx_cons_paddr = paddr;
1177
1178                 vaddr += sizeof (uint32_t);
1179                 paddr += sizeof (uint32_t);
1180         }
1181
1182         ha->hw.dma_buf.flags.tx_ring = 1;
1183
1184         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1185                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1186                 hw->dma_buf.tx_ring.dma_b));
1187         /*
1188          * Allocate Receive Descriptor Rings
1189          */
1190
1191         for (i = 0; i < hw->num_rds_rings; i++) {
1192
1193                 hw->dma_buf.rds_ring[i].alignment = 8;
1194                 hw->dma_buf.rds_ring[i].size =
1195                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1196
1197                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1198                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1199                                 __func__, i);
1200
1201                         for (j = 0; j < i; j++)
1202                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1203
1204                         goto ql_alloc_dma_exit;
1205                 }
1206                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1207                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1208                         hw->dma_buf.rds_ring[i].dma_b));
1209         }
1210
1211         hw->dma_buf.flags.rds_ring = 1;
1212
1213         /*
1214          * Allocate Status Descriptor Rings
1215          */
1216
1217         for (i = 0; i < hw->num_sds_rings; i++) {
1218                 hw->dma_buf.sds_ring[i].alignment = 8;
1219                 hw->dma_buf.sds_ring[i].size =
1220                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1221
1222                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1223                         device_printf(dev, "%s: sds ring alloc failed\n",
1224                                 __func__);
1225
1226                         for (j = 0; j < i; j++)
1227                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1228
1229                         goto ql_alloc_dma_exit;
1230                 }
1231                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1232                         __func__, i,
1233                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1234                         hw->dma_buf.sds_ring[i].dma_b));
1235         }
1236         for (i = 0; i < hw->num_sds_rings; i++) {
1237                 hw->sds[i].sds_ring_base =
1238                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1239         }
1240
1241         hw->dma_buf.flags.sds_ring = 1;
1242
1243         return 0;
1244
1245 ql_alloc_dma_exit:
1246         ql_free_dma(ha);
1247         return -1;
1248 }
1249
1250 #define Q8_MBX_MSEC_DELAY       5000
1251
1252 static int
1253 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1254         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1255 {
1256         uint32_t i;
1257         uint32_t data;
1258         int ret = 0;
1259
1260         if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1261                 ret = -3;
1262                 ha->qla_initiate_recovery = 1;
1263                 goto exit_qla_mbx_cmd;
1264         }
1265
1266         if (no_pause)
1267                 i = 1000;
1268         else
1269                 i = Q8_MBX_MSEC_DELAY;
1270
1271         while (i) {
1272                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1273                 if (data == 0)
1274                         break;
1275                 if (no_pause) {
1276                         DELAY(1000);
1277                 } else {
1278                         qla_mdelay(__func__, 1);
1279                 }
1280                 i--;
1281         }
1282
1283         if (i == 0) {
1284                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1285                         __func__, data);
1286                 ret = -1;
1287                 ha->qla_initiate_recovery = 1;
1288                 goto exit_qla_mbx_cmd;
1289         }
1290
1291         for (i = 0; i < n_hmbox; i++) {
1292                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1293                 h_mbox++;
1294         }
1295
1296         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1297
1298
1299         i = Q8_MBX_MSEC_DELAY;
1300         while (i) {
1301                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1302
1303                 if ((data & 0x3) == 1) {
1304                         data = READ_REG32(ha, Q8_FW_MBOX0);
1305                         if ((data & 0xF000) != 0x8000)
1306                                 break;
1307                 }
1308                 if (no_pause) {
1309                         DELAY(1000);
1310                 } else {
1311                         qla_mdelay(__func__, 1);
1312                 }
1313                 i--;
1314         }
1315         if (i == 0) {
1316                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1317                         __func__, data);
1318                 ret = -2;
1319                 ha->qla_initiate_recovery = 1;
1320                 goto exit_qla_mbx_cmd;
1321         }
1322
1323         for (i = 0; i < n_fwmbox; i++) {
1324                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1325         }
1326
1327         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1328         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1329
1330 exit_qla_mbx_cmd:
1331         return (ret);
1332 }
1333
1334 int
1335 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1336         uint32_t *num_rcvq)
1337 {
1338         uint32_t *mbox, err;
1339         device_t dev = ha->pci_dev;
1340
1341         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1342
1343         mbox = ha->hw.mbox;
1344
1345         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1346
1347         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1348                 device_printf(dev, "%s: failed0\n", __func__);
1349                 return (-1);
1350         }
1351         err = mbox[0] >> 25; 
1352
1353         if (supports_9kb != NULL) {
1354                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1355                         *supports_9kb = 1;
1356                 else
1357                         *supports_9kb = 0;
1358         }
1359
1360         if (num_rcvq != NULL)
1361                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1362
1363         if ((err != 1) && (err != 0)) {
1364                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1365                 return (-1);
1366         }
1367         return 0;
1368 }
1369
1370 static int
1371 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1372         uint32_t create)
1373 {
1374         uint32_t i, err;
1375         device_t dev = ha->pci_dev;
1376         q80_config_intr_t *c_intr;
1377         q80_config_intr_rsp_t *c_intr_rsp;
1378
1379         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1380         bzero(c_intr, (sizeof (q80_config_intr_t)));
1381
1382         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1383
1384         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1385         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1386
1387         c_intr->nentries = num_intrs;
1388
1389         for (i = 0; i < num_intrs; i++) {
1390                 if (create) {
1391                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1392                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1393                 } else {
1394                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1395                         c_intr->intr[i].msix_index =
1396                                 ha->hw.intr_id[(start_idx + i)];
1397                 }
1398
1399                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1400         }
1401
1402         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1403                 (sizeof (q80_config_intr_t) >> 2),
1404                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1405                 device_printf(dev, "%s: failed0\n", __func__);
1406                 return (-1);
1407         }
1408
1409         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1410
1411         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1412
1413         if (err) {
1414                 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1415                         c_intr_rsp->nentries);
1416
1417                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1418                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1419                                 __func__, i, 
1420                                 c_intr_rsp->intr[i].status,
1421                                 c_intr_rsp->intr[i].intr_id,
1422                                 c_intr_rsp->intr[i].intr_src);
1423                 }
1424
1425                 return (-1);
1426         }
1427
1428         for (i = 0; ((i < num_intrs) && create); i++) {
1429                 if (!c_intr_rsp->intr[i].status) {
1430                         ha->hw.intr_id[(start_idx + i)] =
1431                                 c_intr_rsp->intr[i].intr_id;
1432                         ha->hw.intr_src[(start_idx + i)] =
1433                                 c_intr_rsp->intr[i].intr_src;
1434                 }
1435         }
1436
1437         return (0);
1438 }
1439
1440 /*
1441  * Name: qla_config_rss
1442  * Function: Configure RSS for the context/interface.
1443  */
1444 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1445                         0x8030f20c77cb2da3ULL,
1446                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1447                         0x255b0ec26d5a56daULL };
1448
1449 static int
1450 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1451 {
1452         q80_config_rss_t        *c_rss;
1453         q80_config_rss_rsp_t    *c_rss_rsp;
1454         uint32_t                err, i;
1455         device_t                dev = ha->pci_dev;
1456
1457         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1458         bzero(c_rss, (sizeof (q80_config_rss_t)));
1459
1460         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1461
1462         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1463         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1464
1465         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1466                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1467         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1468         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1469
1470         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1471         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1472
1473         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1474
1475         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1476         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1477
1478         c_rss->cntxt_id = cntxt_id;
1479
1480         for (i = 0; i < 5; i++) {
1481                 c_rss->rss_key[i] = rss_key[i];
1482         }
1483
1484         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1485                 (sizeof (q80_config_rss_t) >> 2),
1486                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1487                 device_printf(dev, "%s: failed0\n", __func__);
1488                 return (-1);
1489         }
1490         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1491
1492         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1493
1494         if (err) {
1495                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1496                 return (-1);
1497         }
1498         return 0;
1499 }
1500
1501 static int
1502 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1503         uint16_t cntxt_id, uint8_t *ind_table)
1504 {
1505         q80_config_rss_ind_table_t      *c_rss_ind;
1506         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1507         uint32_t                        err;
1508         device_t                        dev = ha->pci_dev;
1509
1510         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1511                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1512                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1513                         start_idx, count);
1514                 return (-1);
1515         }
1516
1517         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1518         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1519
1520         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1521         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1522         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1523
1524         c_rss_ind->start_idx = start_idx;
1525         c_rss_ind->end_idx = start_idx + count - 1;
1526         c_rss_ind->cntxt_id = cntxt_id;
1527         bcopy(ind_table, c_rss_ind->ind_table, count);
1528
1529         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1530                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1531                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1532                 device_printf(dev, "%s: failed0\n", __func__);
1533                 return (-1);
1534         }
1535
1536         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1537         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1538
1539         if (err) {
1540                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1541                 return (-1);
1542         }
1543         return 0;
1544 }
1545
1546 /*
1547  * Name: qla_config_intr_coalesce
1548  * Function: Configure Interrupt Coalescing.
1549  */
1550 static int
1551 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1552         int rcv)
1553 {
1554         q80_config_intr_coalesc_t       *intrc;
1555         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1556         uint32_t                        err, i;
1557         device_t                        dev = ha->pci_dev;
1558         
1559         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1560         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1561
1562         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1563         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1564         intrc->count_version |= Q8_MBX_CMD_VERSION;
1565
1566         if (rcv) {
1567                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1568                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1569                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1570         } else {
1571                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1572                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1573                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1574         }
1575
1576         intrc->cntxt_id = cntxt_id;
1577
1578         if (tenable) {
1579                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1580                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1581
1582                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1583                         intrc->sds_ring_mask |= (1 << i);
1584                 }
1585                 intrc->ms_timeout = 1000;
1586         }
1587
1588         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1589                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1590                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1591                 device_printf(dev, "%s: failed0\n", __func__);
1592                 return (-1);
1593         }
1594         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1595
1596         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1597
1598         if (err) {
1599                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1600                 return (-1);
1601         }
1602         
1603         return 0;
1604 }
1605
1606
1607 /*
1608  * Name: qla_config_mac_addr
1609  * Function: binds a MAC address to the context/interface.
1610  *      Can be unicast, multicast or broadcast.
1611  */
1612 static int
1613 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1614         uint32_t num_mac)
1615 {
1616         q80_config_mac_addr_t           *cmac;
1617         q80_config_mac_addr_rsp_t       *cmac_rsp;
1618         uint32_t                        err;
1619         device_t                        dev = ha->pci_dev;
1620         int                             i;
1621         uint8_t                         *mac_cpy = mac_addr;
1622
1623         if (num_mac > Q8_MAX_MAC_ADDRS) {
1624                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1625                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1626                 return (-1);
1627         }
1628
1629         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1630         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1631
1632         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1633         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1634         cmac->count_version |= Q8_MBX_CMD_VERSION;
1635
1636         if (add_mac) 
1637                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1638         else
1639                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1640                 
1641         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1642
1643         cmac->nmac_entries = num_mac;
1644         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1645
1646         for (i = 0; i < num_mac; i++) {
1647                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1648                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1649         }
1650
1651         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1652                 (sizeof (q80_config_mac_addr_t) >> 2),
1653                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1654                 device_printf(dev, "%s: %s failed0\n", __func__,
1655                         (add_mac ? "Add" : "Del"));
1656                 return (-1);
1657         }
1658         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1659
1660         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1661
1662         if (err) {
1663                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1664                         (add_mac ? "Add" : "Del"), err);
1665                 for (i = 0; i < num_mac; i++) {
1666                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1667                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1668                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1669                         mac_cpy += ETHER_ADDR_LEN;
1670                 }
1671                 return (-1);
1672         }
1673         
1674         return 0;
1675 }
1676
1677
1678 /*
1679  * Name: qla_set_mac_rcv_mode
1680  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1681  */
1682 static int
1683 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1684 {
1685         q80_config_mac_rcv_mode_t       *rcv_mode;
1686         uint32_t                        err;
1687         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1688         device_t                        dev = ha->pci_dev;
1689
1690         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1691         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1692
1693         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1694         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1695         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1696
1697         rcv_mode->mode = mode;
1698
1699         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1700
1701         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1702                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1703                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1704                 device_printf(dev, "%s: failed0\n", __func__);
1705                 return (-1);
1706         }
1707         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1708
1709         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1710
1711         if (err) {
1712                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1713                 return (-1);
1714         }
1715         
1716         return 0;
1717 }
1718
1719 int
1720 ql_set_promisc(qla_host_t *ha)
1721 {
1722         int ret;
1723
1724         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1725         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1726         return (ret);
1727 }
1728
1729 void
1730 qla_reset_promisc(qla_host_t *ha)
1731 {
1732         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1733         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1734 }
1735
1736 int
1737 ql_set_allmulti(qla_host_t *ha)
1738 {
1739         int ret;
1740
1741         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1742         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1743         return (ret);
1744 }
1745
1746 void
1747 qla_reset_allmulti(qla_host_t *ha)
1748 {
1749         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1750         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1751 }
1752
1753 /*
1754  * Name: ql_set_max_mtu
1755  * Function:
1756  *      Sets the maximum transfer unit size for the specified rcv context.
1757  */
1758 int
1759 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1760 {
1761         device_t                dev;
1762         q80_set_max_mtu_t       *max_mtu;
1763         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1764         uint32_t                err;
1765
1766         dev = ha->pci_dev;
1767
1768         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1769         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1770
1771         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1772         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1773         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1774
1775         max_mtu->cntxt_id = cntxt_id;
1776         max_mtu->mtu = mtu;
1777
1778         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1779                 (sizeof (q80_set_max_mtu_t) >> 2),
1780                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1781                 device_printf(dev, "%s: failed\n", __func__);
1782                 return -1;
1783         }
1784
1785         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1786
1787         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1788
1789         if (err) {
1790                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1791         }
1792
1793         return 0;
1794 }
1795
1796 static int
1797 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1798 {
1799         device_t                dev;
1800         q80_link_event_t        *lnk;
1801         q80_link_event_rsp_t    *lnk_rsp;
1802         uint32_t                err;
1803
1804         dev = ha->pci_dev;
1805
1806         lnk = (q80_link_event_t *)ha->hw.mbox;
1807         bzero(lnk, (sizeof (q80_link_event_t)));
1808
1809         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1810         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1811         lnk->count_version |= Q8_MBX_CMD_VERSION;
1812
1813         lnk->cntxt_id = cntxt_id;
1814         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1815
1816         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1817                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1818                 device_printf(dev, "%s: failed\n", __func__);
1819                 return -1;
1820         }
1821
1822         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1823
1824         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1825
1826         if (err) {
1827                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1828         }
1829
1830         return 0;
1831 }
1832
1833 static int
1834 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1835 {
1836         device_t                dev;
1837         q80_config_fw_lro_t     *fw_lro;
1838         q80_config_fw_lro_rsp_t *fw_lro_rsp;
1839         uint32_t                err;
1840
1841         dev = ha->pci_dev;
1842
1843         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1844         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1845
1846         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1847         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1848         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1849
1850         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1851         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1852
1853         fw_lro->cntxt_id = cntxt_id;
1854
1855         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1856                 (sizeof (q80_config_fw_lro_t) >> 2),
1857                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1858                 device_printf(dev, "%s: failed\n", __func__);
1859                 return -1;
1860         }
1861
1862         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1863
1864         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1865
1866         if (err) {
1867                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1868         }
1869
1870         return 0;
1871 }
1872
1873 static int
1874 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1875 {
1876         device_t                dev;
1877         q80_hw_config_t         *hw_config;
1878         q80_hw_config_rsp_t     *hw_config_rsp;
1879         uint32_t                err;
1880
1881         dev = ha->pci_dev;
1882
1883         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1884         bzero(hw_config, sizeof (q80_hw_config_t));
1885
1886         hw_config->opcode = Q8_MBX_HW_CONFIG;
1887         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1888         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1889
1890         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1891
1892         hw_config->u.set_cam_search_mode.mode = search_mode;
1893
1894         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1895                 (sizeof (q80_hw_config_t) >> 2),
1896                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1897                 device_printf(dev, "%s: failed\n", __func__);
1898                 return -1;
1899         }
1900         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1901
1902         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1903
1904         if (err) {
1905                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1906         }
1907
1908         return 0;
1909 }
1910
1911 static int
1912 qla_get_cam_search_mode(qla_host_t *ha)
1913 {
1914         device_t                dev;
1915         q80_hw_config_t         *hw_config;
1916         q80_hw_config_rsp_t     *hw_config_rsp;
1917         uint32_t                err;
1918
1919         dev = ha->pci_dev;
1920
1921         hw_config = (q80_hw_config_t *)ha->hw.mbox;
1922         bzero(hw_config, sizeof (q80_hw_config_t));
1923
1924         hw_config->opcode = Q8_MBX_HW_CONFIG;
1925         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1926         hw_config->count_version |= Q8_MBX_CMD_VERSION;
1927
1928         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1929
1930         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1931                 (sizeof (q80_hw_config_t) >> 2),
1932                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1933                 device_printf(dev, "%s: failed\n", __func__);
1934                 return -1;
1935         }
1936         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1937
1938         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1939
1940         if (err) {
1941                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1942         } else {
1943                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1944                         hw_config_rsp->u.get_cam_search_mode.mode);
1945         }
1946
1947         return 0;
1948 }
1949
1950 static int
1951 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1952 {
1953         device_t                dev;
1954         q80_get_stats_t         *stat;
1955         q80_get_stats_rsp_t     *stat_rsp;
1956         uint32_t                err;
1957
1958         dev = ha->pci_dev;
1959
1960         stat = (q80_get_stats_t *)ha->hw.mbox;
1961         bzero(stat, (sizeof (q80_get_stats_t)));
1962
1963         stat->opcode = Q8_MBX_GET_STATS;
1964         stat->count_version = 2;
1965         stat->count_version |= Q8_MBX_CMD_VERSION;
1966
1967         stat->cmd = cmd;
1968
1969         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1970                 ha->hw.mbox, (rsp_size >> 2), 0)) {
1971                 device_printf(dev, "%s: failed\n", __func__);
1972                 return -1;
1973         }
1974
1975         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1976
1977         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1978
1979         if (err) {
1980                 return -1;
1981         }
1982
1983         return 0;
1984 }
1985
1986 void
1987 ql_get_stats(qla_host_t *ha)
1988 {
1989         q80_get_stats_rsp_t     *stat_rsp;
1990         q80_mac_stats_t         *mstat;
1991         q80_xmt_stats_t         *xstat;
1992         q80_rcv_stats_t         *rstat;
1993         uint32_t                cmd;
1994         int                     i;
1995         struct ifnet *ifp = ha->ifp;
1996
1997         if (ifp == NULL)
1998                 return;
1999
2000         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2001                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2002                 return;
2003         }
2004
2005         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2006                 QLA_UNLOCK(ha, __func__);
2007                 return;
2008         }
2009
2010         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2011         /*
2012          * Get MAC Statistics
2013          */
2014         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2015 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2016
2017         cmd |= ((ha->pci_func & 0x1) << 16);
2018
2019         if (ha->qla_watchdog_pause)
2020                 goto ql_get_stats_exit;
2021
2022         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2023                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2024                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2025         } else {
2026                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2027                         __func__, ha->hw.mbox[0]);
2028         }
2029         /*
2030          * Get RCV Statistics
2031          */
2032         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2033 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2034         cmd |= (ha->hw.rcv_cntxt_id << 16);
2035
2036         if (ha->qla_watchdog_pause)
2037                 goto ql_get_stats_exit;
2038
2039         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2040                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2041                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2042         } else {
2043                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2044                         __func__, ha->hw.mbox[0]);
2045         }
2046
2047         if (ha->qla_watchdog_pause)
2048                 goto ql_get_stats_exit;
2049         /*
2050          * Get XMT Statistics
2051          */
2052         for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2053                 i++) {
2054                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2055 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2056                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2057
2058                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2059                         == 0) {
2060                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2061                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2062                 } else {
2063                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2064                                 __func__, ha->hw.mbox[0]);
2065                 }
2066         }
2067
2068 ql_get_stats_exit:
2069         QLA_UNLOCK(ha, __func__);
2070
2071         return;
2072 }
2073
2074 /*
2075  * Name: qla_tx_tso
2076  * Function: Checks if the packet to be transmitted is a candidate for
2077  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2078  *      Ring Structure are plugged in.
2079  */
2080 static int
2081 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2082 {
2083         struct ether_vlan_header *eh;
2084         struct ip *ip = NULL;
2085         struct ip6_hdr *ip6 = NULL;
2086         struct tcphdr *th = NULL;
2087         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2088         uint16_t etype, opcode, offload = 1;
2089         device_t dev;
2090
2091         dev = ha->pci_dev;
2092
2093
2094         eh = mtod(mp, struct ether_vlan_header *);
2095
2096         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2097                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2098                 etype = ntohs(eh->evl_proto);
2099         } else {
2100                 ehdrlen = ETHER_HDR_LEN;
2101                 etype = ntohs(eh->evl_encap_proto);
2102         }
2103
2104         hdrlen = 0;
2105
2106         switch (etype) {
2107                 case ETHERTYPE_IP:
2108
2109                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2110                                         sizeof(struct tcphdr);
2111
2112                         if (mp->m_len < tcp_opt_off) {
2113                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2114                                 ip = (struct ip *)(hdr + ehdrlen);
2115                         } else {
2116                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2117                         }
2118
2119                         ip_hlen = ip->ip_hl << 2;
2120                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2121
2122                                 
2123                         if ((ip->ip_p != IPPROTO_TCP) ||
2124                                 (ip_hlen != sizeof (struct ip))){
2125                                 /* IP Options are not supported */
2126
2127                                 offload = 0;
2128                         } else
2129                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2130
2131                 break;
2132
2133                 case ETHERTYPE_IPV6:
2134
2135                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2136                                         sizeof (struct tcphdr);
2137
2138                         if (mp->m_len < tcp_opt_off) {
2139                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2140                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2141                         } else {
2142                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2143                         }
2144
2145                         ip_hlen = sizeof(struct ip6_hdr);
2146                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2147
2148                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2149                                 //device_printf(dev, "%s: ipv6\n", __func__);
2150                                 offload = 0;
2151                         } else
2152                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2153                 break;
2154
2155                 default:
2156                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2157                         offload = 0;
2158                 break;
2159         }
2160
2161         if (!offload)
2162                 return (-1);
2163
2164         tcp_hlen = th->th_off << 2;
2165         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2166
2167         if (mp->m_len < hdrlen) {
2168                 if (mp->m_len < tcp_opt_off) {
2169                         if (tcp_hlen > sizeof(struct tcphdr)) {
2170                                 m_copydata(mp, tcp_opt_off,
2171                                         (tcp_hlen - sizeof(struct tcphdr)),
2172                                         &hdr[tcp_opt_off]);
2173                         }
2174                 } else {
2175                         m_copydata(mp, 0, hdrlen, hdr);
2176                 }
2177         }
2178
2179         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2180
2181         tx_cmd->flags_opcode = opcode ;
2182         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2183         tx_cmd->total_hdr_len = hdrlen;
2184
2185         /* Check for Multicast least significant bit of MSB == 1 */
2186         if (eh->evl_dhost[0] & 0x01) {
2187                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2188         }
2189
2190         if (mp->m_len < hdrlen) {
2191                 printf("%d\n", hdrlen);
2192                 return (1);
2193         }
2194
2195         return (0);
2196 }
2197
2198 /*
2199  * Name: qla_tx_chksum
2200  * Function: Checks if the packet to be transmitted is a candidate for
2201  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2202  *      Ring Structure are plugged in.
2203  */
2204 static int
2205 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2206         uint32_t *tcp_hdr_off)
2207 {
2208         struct ether_vlan_header *eh;
2209         struct ip *ip;
2210         struct ip6_hdr *ip6;
2211         uint32_t ehdrlen, ip_hlen;
2212         uint16_t etype, opcode, offload = 1;
2213         device_t dev;
2214         uint8_t buf[sizeof(struct ip6_hdr)];
2215
2216         dev = ha->pci_dev;
2217
2218         *op_code = 0;
2219
2220         if ((mp->m_pkthdr.csum_flags &
2221                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2222                 return (-1);
2223
2224         eh = mtod(mp, struct ether_vlan_header *);
2225
2226         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2227                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2228                 etype = ntohs(eh->evl_proto);
2229         } else {
2230                 ehdrlen = ETHER_HDR_LEN;
2231                 etype = ntohs(eh->evl_encap_proto);
2232         }
2233
2234                 
2235         switch (etype) {
2236                 case ETHERTYPE_IP:
2237                         ip = (struct ip *)(mp->m_data + ehdrlen);
2238
2239                         ip_hlen = sizeof (struct ip);
2240
2241                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2242                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2243                                 ip = (struct ip *)buf;
2244                         }
2245
2246                         if (ip->ip_p == IPPROTO_TCP)
2247                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2248                         else if (ip->ip_p == IPPROTO_UDP)
2249                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2250                         else {
2251                                 //device_printf(dev, "%s: ipv4\n", __func__);
2252                                 offload = 0;
2253                         }
2254                 break;
2255
2256                 case ETHERTYPE_IPV6:
2257                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2258
2259                         ip_hlen = sizeof(struct ip6_hdr);
2260
2261                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2262                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2263                                         buf);
2264                                 ip6 = (struct ip6_hdr *)buf;
2265                         }
2266
2267                         if (ip6->ip6_nxt == IPPROTO_TCP)
2268                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2269                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2270                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2271                         else {
2272                                 //device_printf(dev, "%s: ipv6\n", __func__);
2273                                 offload = 0;
2274                         }
2275                 break;
2276
2277                 default:
2278                         offload = 0;
2279                 break;
2280         }
2281         if (!offload)
2282                 return (-1);
2283
2284         *op_code = opcode;
2285         *tcp_hdr_off = (ip_hlen + ehdrlen);
2286
2287         return (0);
2288 }
2289
2290 #define QLA_TX_MIN_FREE 2
2291 /*
2292  * Name: ql_hw_send
2293  * Function: Transmits a packet. It first checks if the packet is a
2294  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2295  *      offload. If either of these creteria are not met, it is transmitted
2296  *      as a regular ethernet frame.
2297  */
2298 int
2299 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2300         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2301 {
2302         struct ether_vlan_header *eh;
2303         qla_hw_t *hw = &ha->hw;
2304         q80_tx_cmd_t *tx_cmd, tso_cmd;
2305         bus_dma_segment_t *c_seg;
2306         uint32_t num_tx_cmds, hdr_len = 0;
2307         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2308         device_t dev;
2309         int i, ret;
2310         uint8_t *src = NULL, *dst = NULL;
2311         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2312         uint32_t op_code = 0;
2313         uint32_t tcp_hdr_off = 0;
2314
2315         dev = ha->pci_dev;
2316
2317         /*
2318          * Always make sure there is atleast one empty slot in the tx_ring
2319          * tx_ring is considered full when there only one entry available
2320          */
2321         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2322
2323         total_length = mp->m_pkthdr.len;
2324         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2325                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2326                         __func__, total_length);
2327                 return (EINVAL);
2328         }
2329         eh = mtod(mp, struct ether_vlan_header *);
2330
2331         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2332
2333                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2334
2335                 src = frame_hdr;
2336                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2337
2338                 if (!(ret & ~1)) {
2339                         /* find the additional tx_cmd descriptors required */
2340
2341                         if (mp->m_flags & M_VLANTAG)
2342                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2343
2344                         hdr_len = tso_cmd.total_hdr_len;
2345
2346                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2347                         bytes = QL_MIN(bytes, hdr_len);
2348
2349                         num_tx_cmds++;
2350                         hdr_len -= bytes;
2351
2352                         while (hdr_len) {
2353                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2354                                 hdr_len -= bytes;
2355                                 num_tx_cmds++;
2356                         }
2357                         hdr_len = tso_cmd.total_hdr_len;
2358
2359                         if (ret == 0)
2360                                 src = (uint8_t *)eh;
2361                 } else 
2362                         return (EINVAL);
2363         } else {
2364                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2365         }
2366
2367         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2368                 ql_hw_tx_done_locked(ha, txr_idx);
2369                 if (hw->tx_cntxt[txr_idx].txr_free <=
2370                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2371                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2372                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2373                                 __func__));
2374                         return (-1);
2375                 }
2376         }
2377
2378         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2379
2380         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2381
2382                 if (nsegs > ha->hw.max_tx_segs)
2383                         ha->hw.max_tx_segs = nsegs;
2384
2385                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2386
2387                 if (op_code) {
2388                         tx_cmd->flags_opcode = op_code;
2389                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2390
2391                 } else {
2392                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2393                 }
2394         } else {
2395                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2396                 ha->tx_tso_frames++;
2397         }
2398
2399         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2400                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2401
2402                 if (iscsi_pdu)
2403                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2404
2405         } else if (mp->m_flags & M_VLANTAG) {
2406
2407                 if (hdr_len) { /* TSO */
2408                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2409                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2410                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2411                 } else
2412                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2413
2414                 ha->hw_vlan_tx_frames++;
2415                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2416
2417                 if (iscsi_pdu) {
2418                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2419                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2420                 }
2421         }
2422
2423
2424         tx_cmd->n_bufs = (uint8_t)nsegs;
2425         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2426         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2427         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2428
2429         c_seg = segs;
2430
2431         while (1) {
2432                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2433
2434                         switch (i) {
2435                         case 0:
2436                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2437                                 tx_cmd->buf1_len = c_seg->ds_len;
2438                                 break;
2439
2440                         case 1:
2441                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2442                                 tx_cmd->buf2_len = c_seg->ds_len;
2443                                 break;
2444
2445                         case 2:
2446                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2447                                 tx_cmd->buf3_len = c_seg->ds_len;
2448                                 break;
2449
2450                         case 3:
2451                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2452                                 tx_cmd->buf4_len = c_seg->ds_len;
2453                                 break;
2454                         }
2455
2456                         c_seg++;
2457                         nsegs--;
2458                 }
2459
2460                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2461                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2462                                 (NUM_TX_DESCRIPTORS - 1);
2463                 tx_cmd_count++;
2464
2465                 if (!nsegs)
2466                         break;
2467                 
2468                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2469                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2470         }
2471
2472         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2473
2474                 /* TSO : Copy the header in the following tx cmd descriptors */
2475
2476                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2477
2478                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2479                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2480
2481                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2482                 bytes = QL_MIN(bytes, hdr_len);
2483
2484                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2485
2486                 if (mp->m_flags & M_VLANTAG) {
2487                         /* first copy the src/dst MAC addresses */
2488                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2489                         dst += (ETHER_ADDR_LEN * 2);
2490                         src += (ETHER_ADDR_LEN * 2);
2491                         
2492                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2493                         dst += 2;
2494                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2495                         dst += 2;
2496
2497                         /* bytes left in src header */
2498                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2499                                         ETHER_VLAN_ENCAP_LEN);
2500
2501                         /* bytes left in TxCmd Entry */
2502                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2503
2504
2505                         bcopy(src, dst, bytes);
2506                         src += bytes;
2507                         hdr_len -= bytes;
2508                 } else {
2509                         bcopy(src, dst, bytes);
2510                         src += bytes;
2511                         hdr_len -= bytes;
2512                 }
2513
2514                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2515                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2516                                         (NUM_TX_DESCRIPTORS - 1);
2517                 tx_cmd_count++;
2518                 
2519                 while (hdr_len) {
2520                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2521                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2522
2523                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2524
2525                         bcopy(src, tx_cmd, bytes);
2526                         src += bytes;
2527                         hdr_len -= bytes;
2528
2529                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2530                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2531                                         (NUM_TX_DESCRIPTORS - 1);
2532                         tx_cmd_count++;
2533                 }
2534         }
2535
2536         hw->tx_cntxt[txr_idx].txr_free =
2537                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2538
2539         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2540                 txr_idx);
2541         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2542
2543         return (0);
2544 }
2545
2546
2547
2548 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2549 static int
2550 qla_config_rss_ind_table(qla_host_t *ha)
2551 {
2552         uint32_t i, count;
2553         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2554
2555
2556         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2557                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2558         }
2559
2560         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2561                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2562
2563                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2564                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2565                 } else {
2566                         count = Q8_CONFIG_IND_TBL_SIZE;
2567                 }
2568
2569                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2570                         rss_ind_tbl))
2571                         return (-1);
2572         }
2573
2574         return (0);
2575 }
2576
2577 static int
2578 qla_config_soft_lro(qla_host_t *ha)
2579 {
2580         int i;
2581         qla_hw_t *hw = &ha->hw;
2582         struct lro_ctrl *lro;
2583
2584         for (i = 0; i < hw->num_sds_rings; i++) {
2585                 lro = &hw->sds[i].lro;
2586
2587                 bzero(lro, sizeof(struct lro_ctrl));
2588
2589 #if (__FreeBSD_version >= 1100101)
2590                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2591                         device_printf(ha->pci_dev,
2592                                 "%s: tcp_lro_init_args [%d] failed\n",
2593                                 __func__, i);
2594                         return (-1);
2595                 }
2596 #else
2597                 if (tcp_lro_init(lro)) {
2598                         device_printf(ha->pci_dev,
2599                                 "%s: tcp_lro_init [%d] failed\n",
2600                                 __func__, i);
2601                         return (-1);
2602                 }
2603 #endif /* #if (__FreeBSD_version >= 1100101) */
2604
2605                 lro->ifp = ha->ifp;
2606         }
2607
2608         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2609         return (0);
2610 }
2611
2612 static void
2613 qla_drain_soft_lro(qla_host_t *ha)
2614 {
2615         int i;
2616         qla_hw_t *hw = &ha->hw;
2617         struct lro_ctrl *lro;
2618
2619         for (i = 0; i < hw->num_sds_rings; i++) {
2620                 lro = &hw->sds[i].lro;
2621
2622 #if (__FreeBSD_version >= 1100101)
2623                 tcp_lro_flush_all(lro);
2624 #else
2625                 struct lro_entry *queued;
2626
2627                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2628                         queued = SLIST_FIRST(&lro->lro_active);
2629                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2630                         tcp_lro_flush(lro, queued);
2631                 }
2632 #endif /* #if (__FreeBSD_version >= 1100101) */
2633         }
2634
2635         return;
2636 }
2637
2638 static void
2639 qla_free_soft_lro(qla_host_t *ha)
2640 {
2641         int i;
2642         qla_hw_t *hw = &ha->hw;
2643         struct lro_ctrl *lro;
2644
2645         for (i = 0; i < hw->num_sds_rings; i++) {
2646                 lro = &hw->sds[i].lro;
2647                 tcp_lro_free(lro);
2648         }
2649
2650         return;
2651 }
2652
2653
2654 /*
2655  * Name: ql_del_hw_if
2656  * Function: Destroys the hardware specific entities corresponding to an
2657  *      Ethernet Interface
2658  */
2659 void
2660 ql_del_hw_if(qla_host_t *ha)
2661 {
2662         uint32_t i;
2663         uint32_t num_msix;
2664
2665         (void)qla_stop_nic_func(ha);
2666
2667         qla_del_rcv_cntxt(ha);
2668
2669         qla_del_xmt_cntxt(ha);
2670
2671         if (ha->hw.flags.init_intr_cnxt) {
2672                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2673
2674                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2675                                 num_msix = Q8_MAX_INTR_VECTORS;
2676                         else
2677                                 num_msix = ha->hw.num_sds_rings - i;
2678                         qla_config_intr_cntxt(ha, i, num_msix, 0);
2679
2680                         i += num_msix;
2681                 }
2682
2683                 ha->hw.flags.init_intr_cnxt = 0;
2684         }
2685
2686         if (ha->hw.enable_soft_lro) {
2687                 qla_drain_soft_lro(ha);
2688                 qla_free_soft_lro(ha);
2689         }
2690
2691         return;
2692 }
2693
2694 void
2695 qla_confirm_9kb_enable(qla_host_t *ha)
2696 {
2697         uint32_t supports_9kb = 0;
2698
2699         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2700
2701         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2702         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2703         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2704
2705         qla_get_nic_partition(ha, &supports_9kb, NULL);
2706
2707         if (!supports_9kb)
2708                 ha->hw.enable_9kb = 0;
2709
2710         return;
2711 }
2712
2713 /*
2714  * Name: ql_init_hw_if
2715  * Function: Creates the hardware specific entities corresponding to an
2716  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2717  *      corresponding to the interface. Enables LRO if allowed.
2718  */
2719 int
2720 ql_init_hw_if(qla_host_t *ha)
2721 {
2722         device_t        dev;
2723         uint32_t        i;
2724         uint8_t         bcast_mac[6];
2725         qla_rdesc_t     *rdesc;
2726         uint32_t        num_msix;
2727
2728         dev = ha->pci_dev;
2729
2730         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2731                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2732                         ha->hw.dma_buf.sds_ring[i].size);
2733         }
2734
2735         for (i = 0; i < ha->hw.num_sds_rings; ) {
2736
2737                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2738                         num_msix = Q8_MAX_INTR_VECTORS;
2739                 else
2740                         num_msix = ha->hw.num_sds_rings - i;
2741
2742                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2743
2744                         if (i > 0) {
2745
2746                                 num_msix = i;
2747
2748                                 for (i = 0; i < num_msix; ) {
2749                                         qla_config_intr_cntxt(ha, i,
2750                                                 Q8_MAX_INTR_VECTORS, 0);
2751                                         i += Q8_MAX_INTR_VECTORS;
2752                                 }
2753                         }
2754                         return (-1);
2755                 }
2756
2757                 i = i + num_msix;
2758         }
2759
2760         ha->hw.flags.init_intr_cnxt = 1;
2761
2762         /*
2763          * Create Receive Context
2764          */
2765         if (qla_init_rcv_cntxt(ha)) {
2766                 return (-1);
2767         }
2768
2769         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2770                 rdesc = &ha->hw.rds[i];
2771                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2772                 rdesc->rx_in = 0;
2773                 /* Update the RDS Producer Indices */
2774                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2775                         rdesc->rx_next);
2776         }
2777
2778         /*
2779          * Create Transmit Context
2780          */
2781         if (qla_init_xmt_cntxt(ha)) {
2782                 qla_del_rcv_cntxt(ha);
2783                 return (-1);
2784         }
2785         ha->hw.max_tx_segs = 0;
2786
2787         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2788                 return(-1);
2789
2790         ha->hw.flags.unicast_mac = 1;
2791
2792         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2793         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2794
2795         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2796                 return (-1);
2797
2798         ha->hw.flags.bcast_mac = 1;
2799
2800         /*
2801          * program any cached multicast addresses
2802          */
2803         if (qla_hw_add_all_mcast(ha))
2804                 return (-1);
2805
2806         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2807                 return (-1);
2808
2809         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2810                 return (-1);
2811
2812         if (qla_config_rss_ind_table(ha))
2813                 return (-1);
2814
2815         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2816                 return (-1);
2817
2818         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2819                 return (-1);
2820
2821         if (ha->ifp->if_capenable & IFCAP_LRO) {
2822                 if (ha->hw.enable_hw_lro) {
2823                         ha->hw.enable_soft_lro = 0;
2824
2825                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2826                                 return (-1);
2827                 } else {
2828                         ha->hw.enable_soft_lro = 1;
2829
2830                         if (qla_config_soft_lro(ha))
2831                                 return (-1);
2832                 }
2833         }
2834
2835         if (qla_init_nic_func(ha))
2836                 return (-1);
2837
2838         if (qla_query_fw_dcbx_caps(ha))
2839                 return (-1);
2840
2841         for (i = 0; i < ha->hw.num_sds_rings; i++)
2842                 QL_ENABLE_INTERRUPTS(ha, i);
2843
2844         return (0);
2845 }
2846
2847 static int
2848 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2849 {
2850         device_t                dev = ha->pci_dev;
2851         q80_rq_map_sds_to_rds_t *map_rings;
2852         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2853         uint32_t                i, err;
2854         qla_hw_t                *hw = &ha->hw;
2855
2856         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2857         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2858
2859         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2860         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2861         map_rings->count_version |= Q8_MBX_CMD_VERSION;
2862
2863         map_rings->cntxt_id = hw->rcv_cntxt_id;
2864         map_rings->num_rings = num_idx;
2865
2866         for (i = 0; i < num_idx; i++) {
2867                 map_rings->sds_rds[i].sds_ring = i + start_idx;
2868                 map_rings->sds_rds[i].rds_ring = i + start_idx;
2869         }
2870
2871         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2872                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2873                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2874                 device_printf(dev, "%s: failed0\n", __func__);
2875                 return (-1);
2876         }
2877
2878         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2879
2880         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2881
2882         if (err) {
2883                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2884                 return (-1);
2885         }
2886
2887         return (0);
2888 }
2889
2890 /*
2891  * Name: qla_init_rcv_cntxt
2892  * Function: Creates the Receive Context.
2893  */
2894 static int
2895 qla_init_rcv_cntxt(qla_host_t *ha)
2896 {
2897         q80_rq_rcv_cntxt_t      *rcntxt;
2898         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
2899         q80_stat_desc_t         *sdesc;
2900         int                     i, j;
2901         qla_hw_t                *hw = &ha->hw;
2902         device_t                dev;
2903         uint32_t                err;
2904         uint32_t                rcntxt_sds_rings;
2905         uint32_t                rcntxt_rds_rings;
2906         uint32_t                max_idx;
2907
2908         dev = ha->pci_dev;
2909
2910         /*
2911          * Create Receive Context
2912          */
2913
2914         for (i = 0; i < hw->num_sds_rings; i++) {
2915                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2916
2917                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2918                         sdesc->data[0] = 1ULL;
2919                         sdesc->data[1] = 1ULL;
2920                 }
2921         }
2922
2923         rcntxt_sds_rings = hw->num_sds_rings;
2924         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2925                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2926
2927         rcntxt_rds_rings = hw->num_rds_rings;
2928
2929         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2930                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2931
2932         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2933         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2934
2935         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2936         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2937         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2938
2939         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2940                         Q8_RCV_CNTXT_CAP0_LRO |
2941                         Q8_RCV_CNTXT_CAP0_HW_LRO |
2942                         Q8_RCV_CNTXT_CAP0_RSS |
2943                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
2944
2945         if (ha->hw.enable_9kb)
2946                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2947         else
2948                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2949
2950         if (ha->hw.num_rds_rings > 1) {
2951                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2952                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2953         } else
2954                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2955
2956         rcntxt->nsds_rings = rcntxt_sds_rings;
2957
2958         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2959
2960         rcntxt->rcv_vpid = 0;
2961
2962         for (i = 0; i <  rcntxt_sds_rings; i++) {
2963                 rcntxt->sds[i].paddr =
2964                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2965                 rcntxt->sds[i].size =
2966                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2967                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2968                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2969         }
2970
2971         for (i = 0; i <  rcntxt_rds_rings; i++) {
2972                 rcntxt->rds[i].paddr_std =
2973                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2974
2975                 if (ha->hw.enable_9kb)
2976                         rcntxt->rds[i].std_bsize =
2977                                 qla_host_to_le64(MJUM9BYTES);
2978                 else
2979                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2980
2981                 rcntxt->rds[i].std_nentries =
2982                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
2983         }
2984
2985         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2986                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2987                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2988                 device_printf(dev, "%s: failed0\n", __func__);
2989                 return (-1);
2990         }
2991
2992         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2993
2994         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2995
2996         if (err) {
2997                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2998                 return (-1);
2999         }
3000
3001         for (i = 0; i <  rcntxt_sds_rings; i++) {
3002                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3003         }
3004
3005         for (i = 0; i <  rcntxt_rds_rings; i++) {
3006                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3007         }
3008
3009         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3010
3011         ha->hw.flags.init_rx_cnxt = 1;
3012
3013         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3014
3015                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3016
3017                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3018                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3019                         else
3020                                 max_idx = hw->num_sds_rings - i;
3021
3022                         err = qla_add_rcv_rings(ha, i, max_idx);
3023                         if (err)
3024                                 return -1;
3025
3026                         i += max_idx;
3027                 }
3028         }
3029
3030         if (hw->num_rds_rings > 1) {
3031
3032                 for (i = 0; i < hw->num_rds_rings; ) {
3033
3034                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3035                                 max_idx = MAX_SDS_TO_RDS_MAP;
3036                         else
3037                                 max_idx = hw->num_rds_rings - i;
3038
3039                         err = qla_map_sds_to_rds(ha, i, max_idx);
3040                         if (err)
3041                                 return -1;
3042
3043                         i += max_idx;
3044                 }
3045         }
3046
3047         return (0);
3048 }
3049
3050 static int
3051 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3052 {
3053         device_t                dev = ha->pci_dev;
3054         q80_rq_add_rcv_rings_t  *add_rcv;
3055         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3056         uint32_t                i,j, err;
3057         qla_hw_t                *hw = &ha->hw;
3058
3059         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3060         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3061
3062         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3063         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3064         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3065
3066         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3067         add_rcv->nsds_rings = nsds;
3068         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3069
3070         for (i = 0; i <  nsds; i++) {
3071
3072                 j = i + sds_idx;
3073
3074                 add_rcv->sds[i].paddr =
3075                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3076
3077                 add_rcv->sds[i].size =
3078                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3079
3080                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3081                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3082
3083         }
3084
3085         for (i = 0; (i <  nsds); i++) {
3086                 j = i + sds_idx;
3087
3088                 add_rcv->rds[i].paddr_std =
3089                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3090
3091                 if (ha->hw.enable_9kb)
3092                         add_rcv->rds[i].std_bsize =
3093                                 qla_host_to_le64(MJUM9BYTES);
3094                 else
3095                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3096
3097                 add_rcv->rds[i].std_nentries =
3098                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3099         }
3100
3101
3102         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3103                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3104                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3105                 device_printf(dev, "%s: failed0\n", __func__);
3106                 return (-1);
3107         }
3108
3109         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3110
3111         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3112
3113         if (err) {
3114                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3115                 return (-1);
3116         }
3117
3118         for (i = 0; i < nsds; i++) {
3119                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3120         }
3121
3122         for (i = 0; i < nsds; i++) {
3123                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3124         }
3125
3126         return (0);
3127 }
3128
3129 /*
3130  * Name: qla_del_rcv_cntxt
3131  * Function: Destroys the Receive Context.
3132  */
3133 static void
3134 qla_del_rcv_cntxt(qla_host_t *ha)
3135 {
3136         device_t                        dev = ha->pci_dev;
3137         q80_rcv_cntxt_destroy_t         *rcntxt;
3138         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3139         uint32_t                        err;
3140         uint8_t                         bcast_mac[6];
3141
3142         if (!ha->hw.flags.init_rx_cnxt)
3143                 return;
3144
3145         if (qla_hw_del_all_mcast(ha))
3146                 return;
3147
3148         if (ha->hw.flags.bcast_mac) {
3149
3150                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3151                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3152
3153                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3154                         return;
3155                 ha->hw.flags.bcast_mac = 0;
3156
3157         }
3158
3159         if (ha->hw.flags.unicast_mac) {
3160                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3161                         return;
3162                 ha->hw.flags.unicast_mac = 0;
3163         }
3164
3165         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3166         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3167
3168         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3169         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3170         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3171
3172         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3173
3174         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3175                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3176                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3177                 device_printf(dev, "%s: failed0\n", __func__);
3178                 return;
3179         }
3180         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3181
3182         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3183
3184         if (err) {
3185                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3186         }
3187
3188         ha->hw.flags.init_rx_cnxt = 0;
3189         return;
3190 }
3191
3192 /*
3193  * Name: qla_init_xmt_cntxt
3194  * Function: Creates the Transmit Context.
3195  */
3196 static int
3197 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3198 {
3199         device_t                dev;
3200         qla_hw_t                *hw = &ha->hw;
3201         q80_rq_tx_cntxt_t       *tcntxt;
3202         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3203         uint32_t                err;
3204         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3205         uint32_t                intr_idx;
3206
3207         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3208
3209         dev = ha->pci_dev;
3210
3211         /*
3212          * Create Transmit Context
3213          */
3214         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3215         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3216
3217         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3218         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3219         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3220
3221         intr_idx = txr_idx;
3222
3223 #ifdef QL_ENABLE_ISCSI_TLV
3224
3225         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3226                                 Q8_TX_CNTXT_CAP0_TC;
3227
3228         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3229                 tcntxt->traffic_class = 1;
3230         }
3231
3232         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3233
3234 #else
3235         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3236
3237 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3238
3239         tcntxt->ntx_rings = 1;
3240
3241         tcntxt->tx_ring[0].paddr =
3242                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3243         tcntxt->tx_ring[0].tx_consumer =
3244                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3245         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3246
3247         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3248         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3249
3250         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3251         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3252
3253         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3254                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3255                 ha->hw.mbox,
3256                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3257                 device_printf(dev, "%s: failed0\n", __func__);
3258                 return (-1);
3259         }
3260         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3261
3262         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3263
3264         if (err) {
3265                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3266                 return -1;
3267         }
3268
3269         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3270         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3271
3272         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3273                 return (-1);
3274
3275         return (0);
3276 }
3277
3278
3279 /*
3280  * Name: qla_del_xmt_cntxt
3281  * Function: Destroys the Transmit Context.
3282  */
3283 static int
3284 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3285 {
3286         device_t                        dev = ha->pci_dev;
3287         q80_tx_cntxt_destroy_t          *tcntxt;
3288         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3289         uint32_t                        err;
3290
3291         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3292         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3293
3294         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3295         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3296         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3297
3298         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3299
3300         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3301                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3302                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3303                 device_printf(dev, "%s: failed0\n", __func__);
3304                 return (-1);
3305         }
3306         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3307
3308         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3309
3310         if (err) {
3311                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3312                 return (-1);
3313         }
3314
3315         return (0);
3316 }
3317 static void
3318 qla_del_xmt_cntxt(qla_host_t *ha)
3319 {
3320         uint32_t i;
3321
3322         if (!ha->hw.flags.init_tx_cnxt)
3323                 return;
3324
3325         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3326                 if (qla_del_xmt_cntxt_i(ha, i))
3327                         break;
3328         }
3329         ha->hw.flags.init_tx_cnxt = 0;
3330 }
3331
3332 static int
3333 qla_init_xmt_cntxt(qla_host_t *ha)
3334 {
3335         uint32_t i, j;
3336
3337         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3338                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3339                         for (j = 0; j < i; j++)
3340                                 qla_del_xmt_cntxt_i(ha, j);
3341                         return (-1);
3342                 }
3343         }
3344         ha->hw.flags.init_tx_cnxt = 1;
3345         return (0);
3346 }
3347
3348 static int
3349 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3350 {
3351         int i, nmcast;
3352         uint32_t count = 0;
3353         uint8_t *mcast;
3354
3355         nmcast = ha->hw.nmcast;
3356
3357         QL_DPRINT2(ha, (ha->pci_dev,
3358                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3359
3360         mcast = ha->hw.mac_addr_arr;
3361         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3362
3363         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3364                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3365                         (ha->hw.mcast[i].addr[1] != 0) ||
3366                         (ha->hw.mcast[i].addr[2] != 0) ||
3367                         (ha->hw.mcast[i].addr[3] != 0) ||
3368                         (ha->hw.mcast[i].addr[4] != 0) ||
3369                         (ha->hw.mcast[i].addr[5] != 0)) {
3370
3371                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3372                         mcast = mcast + ETHER_ADDR_LEN;
3373                         count++;
3374                         
3375                         if (count == Q8_MAX_MAC_ADDRS) {
3376                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3377                                         add_mcast, count)) {
3378                                         device_printf(ha->pci_dev,
3379                                                 "%s: failed\n", __func__);
3380                                         return (-1);
3381                                 }
3382
3383                                 count = 0;
3384                                 mcast = ha->hw.mac_addr_arr;
3385                                 memset(mcast, 0,
3386                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3387                         }
3388
3389                         nmcast--;
3390                 }
3391         }
3392
3393         if (count) {
3394                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3395                         count)) {
3396                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3397                         return (-1);
3398                 }
3399         }
3400         QL_DPRINT2(ha, (ha->pci_dev,
3401                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3402
3403         return 0;
3404 }
3405
3406 static int
3407 qla_hw_add_all_mcast(qla_host_t *ha)
3408 {
3409         int ret;
3410
3411         ret = qla_hw_all_mcast(ha, 1);
3412
3413         return (ret);
3414 }
3415
3416 static int
3417 qla_hw_del_all_mcast(qla_host_t *ha)
3418 {
3419         int ret;
3420
3421         ret = qla_hw_all_mcast(ha, 0);
3422
3423         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3424         ha->hw.nmcast = 0;
3425
3426         return (ret);
3427 }
3428
3429 static int
3430 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3431 {
3432         int i;
3433
3434         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3435                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3436                         return (0); /* its been already added */
3437         }
3438         return (-1);
3439 }
3440
3441 static int
3442 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3443 {
3444         int i;
3445
3446         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3447
3448                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3449                         (ha->hw.mcast[i].addr[1] == 0) &&
3450                         (ha->hw.mcast[i].addr[2] == 0) &&
3451                         (ha->hw.mcast[i].addr[3] == 0) &&
3452                         (ha->hw.mcast[i].addr[4] == 0) &&
3453                         (ha->hw.mcast[i].addr[5] == 0)) {
3454
3455                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3456                         ha->hw.nmcast++;        
3457
3458                         mta = mta + ETHER_ADDR_LEN;
3459                         nmcast--;
3460
3461                         if (nmcast == 0)
3462                                 break;
3463                 }
3464
3465         }
3466         return 0;
3467 }
3468
3469 static int
3470 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3471 {
3472         int i;
3473
3474         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3475                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3476
3477                         ha->hw.mcast[i].addr[0] = 0;
3478                         ha->hw.mcast[i].addr[1] = 0;
3479                         ha->hw.mcast[i].addr[2] = 0;
3480                         ha->hw.mcast[i].addr[3] = 0;
3481                         ha->hw.mcast[i].addr[4] = 0;
3482                         ha->hw.mcast[i].addr[5] = 0;
3483
3484                         ha->hw.nmcast--;        
3485
3486                         mta = mta + ETHER_ADDR_LEN;
3487                         nmcast--;
3488
3489                         if (nmcast == 0)
3490                                 break;
3491                 }
3492         }
3493         return 0;
3494 }
3495
3496 /*
3497  * Name: ql_hw_set_multi
3498  * Function: Sets the Multicast Addresses provided by the host O.S into the
3499  *      hardware (for the given interface)
3500  */
3501 int
3502 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3503         uint32_t add_mac)
3504 {
3505         uint8_t *mta = mcast_addr;
3506         int i;
3507         int ret = 0;
3508         uint32_t count = 0;
3509         uint8_t *mcast;
3510
3511         mcast = ha->hw.mac_addr_arr;
3512         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3513
3514         for (i = 0; i < mcnt; i++) {
3515                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3516                         if (add_mac) {
3517                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3518                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3519                                         mcast = mcast + ETHER_ADDR_LEN;
3520                                         count++;
3521                                 }
3522                         } else {
3523                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3524                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3525                                         mcast = mcast + ETHER_ADDR_LEN;
3526                                         count++;
3527                                 }
3528                         }
3529                 }
3530                 if (count == Q8_MAX_MAC_ADDRS) {
3531                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3532                                 add_mac, count)) {
3533                                 device_printf(ha->pci_dev, "%s: failed\n",
3534                                         __func__);
3535                                 return (-1);
3536                         }
3537
3538                         if (add_mac) {
3539                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3540                                         count);
3541                         } else {
3542                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3543                                         count);
3544                         }
3545
3546                         count = 0;
3547                         mcast = ha->hw.mac_addr_arr;
3548                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3549                 }
3550                         
3551                 mta += Q8_MAC_ADDR_LEN;
3552         }
3553
3554         if (count) {
3555                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3556                         count)) {
3557                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3558                         return (-1);
3559                 }
3560                 if (add_mac) {
3561                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3562                 } else {
3563                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3564                 }
3565         }
3566
3567         return (ret);
3568 }
3569
3570 /*
3571  * Name: ql_hw_tx_done_locked
3572  * Function: Handle Transmit Completions
3573  */
3574 void
3575 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3576 {
3577         qla_tx_buf_t *txb;
3578         qla_hw_t *hw = &ha->hw;
3579         uint32_t comp_idx, comp_count = 0;
3580         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3581
3582         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3583
3584         /* retrieve index of last entry in tx ring completed */
3585         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3586
3587         while (comp_idx != hw_tx_cntxt->txr_comp) {
3588
3589                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3590
3591                 hw_tx_cntxt->txr_comp++;
3592                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3593                         hw_tx_cntxt->txr_comp = 0;
3594
3595                 comp_count++;
3596
3597                 if (txb->m_head) {
3598                         ha->ifp->if_opackets++;
3599
3600                         bus_dmamap_sync(ha->tx_tag, txb->map,
3601                                 BUS_DMASYNC_POSTWRITE);
3602                         bus_dmamap_unload(ha->tx_tag, txb->map);
3603                         m_freem(txb->m_head);
3604
3605                         txb->m_head = NULL;
3606                 }
3607         }
3608
3609         hw_tx_cntxt->txr_free += comp_count;
3610         return;
3611 }
3612
3613 void
3614 ql_update_link_state(qla_host_t *ha)
3615 {
3616         uint32_t link_state;
3617         uint32_t prev_link_state;
3618
3619         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3620                 ha->hw.link_up = 0;
3621                 return;
3622         }
3623         link_state = READ_REG32(ha, Q8_LINK_STATE);
3624
3625         prev_link_state =  ha->hw.link_up;
3626
3627         if (ha->pci_func == 0) 
3628                 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3629         else
3630                 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3631
3632         if (prev_link_state !=  ha->hw.link_up) {
3633                 if (ha->hw.link_up) {
3634                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3635                 } else {
3636                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3637                 }
3638         }
3639         return;
3640 }
3641
3642 int
3643 ql_hw_check_health(qla_host_t *ha)
3644 {
3645         uint32_t val;
3646
3647         ha->hw.health_count++;
3648
3649         if (ha->hw.health_count < 500)
3650                 return 0;
3651
3652         ha->hw.health_count = 0;
3653
3654         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3655
3656         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3657                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3658                 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3659                         __func__, val);
3660                 return -1;
3661         }
3662
3663         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3664
3665         if ((val != ha->hw.hbeat_value) &&
3666                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3667                 ha->hw.hbeat_value = val;
3668                 ha->hw.hbeat_failure = 0;
3669                 return 0;
3670         }
3671
3672         ha->hw.hbeat_failure++;
3673
3674         
3675         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3676                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3677                         __func__, val);
3678         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3679                 return 0;
3680         else 
3681                 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3682                         __func__, val);
3683
3684         return -1;
3685 }
3686
3687 static int
3688 qla_init_nic_func(qla_host_t *ha)
3689 {
3690         device_t                dev;
3691         q80_init_nic_func_t     *init_nic;
3692         q80_init_nic_func_rsp_t *init_nic_rsp;
3693         uint32_t                err;
3694
3695         dev = ha->pci_dev;
3696
3697         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3698         bzero(init_nic, sizeof(q80_init_nic_func_t));
3699
3700         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3701         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3702         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3703
3704         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3705         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3706         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3707
3708 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3709         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3710                 (sizeof (q80_init_nic_func_t) >> 2),
3711                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3712                 device_printf(dev, "%s: failed\n", __func__);
3713                 return -1;
3714         }
3715
3716         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3717 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3718
3719         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3720
3721         if (err) {
3722                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3723         }
3724
3725         return 0;
3726 }
3727
3728 static int
3729 qla_stop_nic_func(qla_host_t *ha)
3730 {
3731         device_t                dev;
3732         q80_stop_nic_func_t     *stop_nic;
3733         q80_stop_nic_func_rsp_t *stop_nic_rsp;
3734         uint32_t                err;
3735
3736         dev = ha->pci_dev;
3737
3738         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3739         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3740
3741         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3742         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3743         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3744
3745         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3746         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3747
3748 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3749         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3750                 (sizeof (q80_stop_nic_func_t) >> 2),
3751                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3752                 device_printf(dev, "%s: failed\n", __func__);
3753                 return -1;
3754         }
3755
3756         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3757 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3758
3759         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3760
3761         if (err) {
3762                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3763         }
3764
3765         return 0;
3766 }
3767
3768 static int
3769 qla_query_fw_dcbx_caps(qla_host_t *ha)
3770 {
3771         device_t                        dev;
3772         q80_query_fw_dcbx_caps_t        *fw_dcbx;
3773         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3774         uint32_t                        err;
3775
3776         dev = ha->pci_dev;
3777
3778         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3779         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3780
3781         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3782         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3783         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3784
3785         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3786         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3787                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3788                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3789                 device_printf(dev, "%s: failed\n", __func__);
3790                 return -1;
3791         }
3792
3793         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3794         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3795                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3796
3797         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3798
3799         if (err) {
3800                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3801         }
3802
3803         return 0;
3804 }
3805
3806 static int
3807 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3808         uint32_t aen_mb3, uint32_t aen_mb4)
3809 {
3810         device_t                dev;
3811         q80_idc_ack_t           *idc_ack;
3812         q80_idc_ack_rsp_t       *idc_ack_rsp;
3813         uint32_t                err;
3814         int                     count = 300;
3815
3816         dev = ha->pci_dev;
3817
3818         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3819         bzero(idc_ack, sizeof(q80_idc_ack_t));
3820
3821         idc_ack->opcode = Q8_MBX_IDC_ACK;
3822         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3823         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3824
3825         idc_ack->aen_mb1 = aen_mb1;
3826         idc_ack->aen_mb2 = aen_mb2;
3827         idc_ack->aen_mb3 = aen_mb3;
3828         idc_ack->aen_mb4 = aen_mb4;
3829
3830         ha->hw.imd_compl= 0;
3831
3832         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3833                 (sizeof (q80_idc_ack_t) >> 2),
3834                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3835                 device_printf(dev, "%s: failed\n", __func__);
3836                 return -1;
3837         }
3838
3839         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3840
3841         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3842
3843         if (err) {
3844                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3845                 return(-1);
3846         }
3847
3848         while (count && !ha->hw.imd_compl) {
3849                 qla_mdelay(__func__, 100);
3850                 count--;
3851         }
3852
3853         if (!count)
3854                 return -1;
3855         else
3856                 device_printf(dev, "%s: count %d\n", __func__, count);
3857
3858         return (0);
3859 }
3860
3861 static int
3862 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3863 {
3864         device_t                dev;
3865         q80_set_port_cfg_t      *pcfg;
3866         q80_set_port_cfg_rsp_t  *pfg_rsp;
3867         uint32_t                err;
3868         int                     count = 300;
3869
3870         dev = ha->pci_dev;
3871
3872         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3873         bzero(pcfg, sizeof(q80_set_port_cfg_t));
3874
3875         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3876         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3877         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3878
3879         pcfg->cfg_bits = cfg_bits;
3880
3881         device_printf(dev, "%s: cfg_bits"
3882                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3883                 " [0x%x, 0x%x, 0x%x]\n", __func__,
3884                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3885                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3886                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3887
3888         ha->hw.imd_compl= 0;
3889
3890         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3891                 (sizeof (q80_set_port_cfg_t) >> 2),
3892                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3893                 device_printf(dev, "%s: failed\n", __func__);
3894                 return -1;
3895         }
3896
3897         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3898
3899         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3900
3901         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3902                 while (count && !ha->hw.imd_compl) {
3903                         qla_mdelay(__func__, 100);
3904                         count--;
3905                 }
3906                 if (count) {
3907                         device_printf(dev, "%s: count %d\n", __func__, count);
3908
3909                         err = 0;
3910                 }
3911         }
3912
3913         if (err) {
3914                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3915                 return(-1);
3916         }
3917
3918         return (0);
3919 }
3920
3921
3922 static int
3923 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3924 {
3925         uint32_t                        err;
3926         device_t                        dev = ha->pci_dev;
3927         q80_config_md_templ_size_t      *md_size;
3928         q80_config_md_templ_size_rsp_t  *md_size_rsp;
3929
3930 #ifndef QL_LDFLASH_FW
3931
3932         ql_minidump_template_hdr_t *hdr;
3933
3934         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3935         *size = hdr->size_of_template;
3936         return (0);
3937
3938 #endif /* #ifdef QL_LDFLASH_FW */
3939
3940         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3941         bzero(md_size, sizeof(q80_config_md_templ_size_t));
3942
3943         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3944         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3945         md_size->count_version |= Q8_MBX_CMD_VERSION;
3946
3947         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3948                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3949                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3950
3951                 device_printf(dev, "%s: failed\n", __func__);
3952
3953                 return (-1);
3954         }
3955
3956         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3957
3958         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3959
3960         if (err) {
3961                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3962                 return(-1);
3963         }
3964
3965         *size = md_size_rsp->templ_size;
3966
3967         return (0);
3968 }
3969
3970 static int
3971 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3972 {
3973         device_t                dev;
3974         q80_get_port_cfg_t      *pcfg;
3975         q80_get_port_cfg_rsp_t  *pcfg_rsp;
3976         uint32_t                err;
3977
3978         dev = ha->pci_dev;
3979
3980         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3981         bzero(pcfg, sizeof(q80_get_port_cfg_t));
3982
3983         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3984         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3985         pcfg->count_version |= Q8_MBX_CMD_VERSION;
3986
3987         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3988                 (sizeof (q80_get_port_cfg_t) >> 2),
3989                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3990                 device_printf(dev, "%s: failed\n", __func__);
3991                 return -1;
3992         }
3993
3994         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3995
3996         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3997
3998         if (err) {
3999                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4000                 return(-1);
4001         }
4002
4003         device_printf(dev, "%s: [cfg_bits, port type]"
4004                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4005                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4006                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4007                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4008                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4009                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4010                 );
4011
4012         *cfg_bits = pcfg_rsp->cfg_bits;
4013
4014         return (0);
4015 }
4016
4017 int
4018 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4019 {
4020         struct ether_vlan_header        *eh;
4021         uint16_t                        etype;
4022         struct ip                       *ip = NULL;
4023         struct ip6_hdr                  *ip6 = NULL;
4024         struct tcphdr                   *th = NULL;
4025         uint32_t                        hdrlen;
4026         uint32_t                        offset;
4027         uint8_t                         buf[sizeof(struct ip6_hdr)];
4028
4029         eh = mtod(mp, struct ether_vlan_header *);
4030
4031         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4032                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4033                 etype = ntohs(eh->evl_proto);
4034         } else {
4035                 hdrlen = ETHER_HDR_LEN;
4036                 etype = ntohs(eh->evl_encap_proto);
4037         }
4038
4039         if (etype == ETHERTYPE_IP) {
4040
4041                 offset = (hdrlen + sizeof (struct ip));
4042
4043                 if (mp->m_len >= offset) {
4044                         ip = (struct ip *)(mp->m_data + hdrlen);
4045                 } else {
4046                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4047                         ip = (struct ip *)buf;
4048                 }
4049
4050                 if (ip->ip_p == IPPROTO_TCP) {
4051
4052                         hdrlen += ip->ip_hl << 2;
4053                         offset = hdrlen + 4;
4054         
4055                         if (mp->m_len >= offset) {
4056                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4057                         } else {
4058                                 m_copydata(mp, hdrlen, 4, buf);
4059                                 th = (struct tcphdr *)buf;
4060                         }
4061                 }
4062
4063         } else if (etype == ETHERTYPE_IPV6) {
4064
4065                 offset = (hdrlen + sizeof (struct ip6_hdr));
4066
4067                 if (mp->m_len >= offset) {
4068                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4069                 } else {
4070                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4071                         ip6 = (struct ip6_hdr *)buf;
4072                 }
4073
4074                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4075
4076                         hdrlen += sizeof(struct ip6_hdr);
4077                         offset = hdrlen + 4;
4078
4079                         if (mp->m_len >= offset) {
4080                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4081                         } else {
4082                                 m_copydata(mp, hdrlen, 4, buf);
4083                                 th = (struct tcphdr *)buf;
4084                         }
4085                 }
4086         }
4087
4088         if (th != NULL) {
4089                 if ((th->th_sport == htons(3260)) ||
4090                         (th->th_dport == htons(3260)))
4091                         return 0;
4092         }
4093         return (-1);
4094 }
4095
4096 void
4097 qla_hw_async_event(qla_host_t *ha)
4098 {
4099         switch (ha->hw.aen_mb0) {
4100         case 0x8101:
4101                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4102                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4103
4104                 break;
4105
4106         default:
4107                 break;
4108         }
4109
4110         return;
4111 }
4112
4113 #ifdef QL_LDFLASH_FW
4114 static int
4115 ql_get_minidump_template(qla_host_t *ha)
4116 {
4117         uint32_t                        err;
4118         device_t                        dev = ha->pci_dev;
4119         q80_config_md_templ_cmd_t       *md_templ;
4120         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4121
4122         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4123         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4124
4125         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4126         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4127         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4128
4129         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4130         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4131
4132         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4133                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4134                  ha->hw.mbox,
4135                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4136
4137                 device_printf(dev, "%s: failed\n", __func__);
4138
4139                 return (-1);
4140         }
4141
4142         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4143
4144         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4145
4146         if (err) {
4147                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4148                 return (-1);
4149         }
4150
4151         return (0);
4152
4153 }
4154 #endif /* #ifdef QL_LDFLASH_FW */
4155
4156 /*
4157  * Minidump related functionality 
4158  */
4159
4160 static int ql_parse_template(qla_host_t *ha);
4161
4162 static uint32_t ql_rdcrb(qla_host_t *ha,
4163                         ql_minidump_entry_rdcrb_t *crb_entry,
4164                         uint32_t * data_buff);
4165
4166 static uint32_t ql_pollrd(qla_host_t *ha,
4167                         ql_minidump_entry_pollrd_t *entry,
4168                         uint32_t * data_buff);
4169
4170 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4171                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4172                         uint32_t *data_buff);
4173
4174 static uint32_t ql_L2Cache(qla_host_t *ha,
4175                         ql_minidump_entry_cache_t *cacheEntry,
4176                         uint32_t * data_buff);
4177
4178 static uint32_t ql_L1Cache(qla_host_t *ha,
4179                         ql_minidump_entry_cache_t *cacheEntry,
4180                         uint32_t *data_buff);
4181
4182 static uint32_t ql_rdocm(qla_host_t *ha,
4183                         ql_minidump_entry_rdocm_t *ocmEntry,
4184                         uint32_t *data_buff);
4185
4186 static uint32_t ql_rdmem(qla_host_t *ha,
4187                         ql_minidump_entry_rdmem_t *mem_entry,
4188                         uint32_t *data_buff);
4189
4190 static uint32_t ql_rdrom(qla_host_t *ha,
4191                         ql_minidump_entry_rdrom_t *romEntry,
4192                         uint32_t *data_buff);
4193
4194 static uint32_t ql_rdmux(qla_host_t *ha,
4195                         ql_minidump_entry_mux_t *muxEntry,
4196                         uint32_t *data_buff);
4197
4198 static uint32_t ql_rdmux2(qla_host_t *ha,
4199                         ql_minidump_entry_mux2_t *muxEntry,
4200                         uint32_t *data_buff);
4201
4202 static uint32_t ql_rdqueue(qla_host_t *ha,
4203                         ql_minidump_entry_queue_t *queueEntry,
4204                         uint32_t *data_buff);
4205
4206 static uint32_t ql_cntrl(qla_host_t *ha,
4207                         ql_minidump_template_hdr_t *template_hdr,
4208                         ql_minidump_entry_cntrl_t *crbEntry);
4209
4210
4211 static uint32_t
4212 ql_minidump_size(qla_host_t *ha)
4213 {
4214         uint32_t i, k;
4215         uint32_t size = 0;
4216         ql_minidump_template_hdr_t *hdr;
4217
4218         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4219
4220         i = 0x2;
4221
4222         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4223                 if (i & ha->hw.mdump_capture_mask)
4224                         size += hdr->capture_size_array[k];
4225                 i = i << 1;
4226         }
4227         return (size);
4228 }
4229
4230 static void
4231 ql_free_minidump_buffer(qla_host_t *ha)
4232 {
4233         if (ha->hw.mdump_buffer != NULL) {
4234                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4235                 ha->hw.mdump_buffer = NULL;
4236                 ha->hw.mdump_buffer_size = 0;
4237         }
4238         return;
4239 }
4240
4241 static int
4242 ql_alloc_minidump_buffer(qla_host_t *ha)
4243 {
4244         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4245
4246         if (!ha->hw.mdump_buffer_size)
4247                 return (-1);
4248
4249         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4250                                         M_NOWAIT);
4251
4252         if (ha->hw.mdump_buffer == NULL)
4253                 return (-1);
4254
4255         return (0);
4256 }
4257
4258 static void
4259 ql_free_minidump_template_buffer(qla_host_t *ha)
4260 {
4261         if (ha->hw.mdump_template != NULL) {
4262                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4263                 ha->hw.mdump_template = NULL;
4264                 ha->hw.mdump_template_size = 0;
4265         }
4266         return;
4267 }
4268
4269 static int
4270 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4271 {
4272         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4273
4274         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4275                                         M_QLA83XXBUF, M_NOWAIT);
4276
4277         if (ha->hw.mdump_template == NULL)
4278                 return (-1);
4279
4280         return (0);
4281 }
4282
4283 static int
4284 ql_alloc_minidump_buffers(qla_host_t *ha)
4285 {
4286         int ret;
4287
4288         ret = ql_alloc_minidump_template_buffer(ha);
4289
4290         if (ret)
4291                 return (ret);
4292
4293         ret = ql_alloc_minidump_buffer(ha);
4294
4295         if (ret)
4296                 ql_free_minidump_template_buffer(ha);
4297
4298         return (ret);
4299 }
4300
4301
4302 static uint32_t
4303 ql_validate_minidump_checksum(qla_host_t *ha)
4304 {
4305         uint64_t sum = 0;
4306         int count;
4307         uint32_t *template_buff;
4308
4309         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4310         template_buff = ha->hw.dma_buf.minidump.dma_b;
4311
4312         while (count-- > 0) {
4313                 sum += *template_buff++;
4314         }
4315
4316         while (sum >> 32) {
4317                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4318         }
4319
4320         return (~sum);
4321 }
4322
4323 int
4324 ql_minidump_init(qla_host_t *ha)
4325 {
4326         int             ret = 0;
4327         uint32_t        template_size = 0;
4328         device_t        dev = ha->pci_dev;
4329
4330         /*
4331          * Get Minidump Template Size
4332          */
4333         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4334
4335         if (ret || (template_size == 0)) {
4336                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4337                         template_size);
4338                 return (-1);
4339         }
4340
4341         /*
4342          * Allocate Memory for Minidump Template
4343          */
4344
4345         ha->hw.dma_buf.minidump.alignment = 8;
4346         ha->hw.dma_buf.minidump.size = template_size;
4347
4348 #ifdef QL_LDFLASH_FW
4349         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4350
4351                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4352
4353                 return (-1);
4354         }
4355         ha->hw.dma_buf.flags.minidump = 1;
4356
4357         /*
4358          * Retrieve Minidump Template
4359          */
4360         ret = ql_get_minidump_template(ha);
4361 #else
4362         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4363
4364 #endif /* #ifdef QL_LDFLASH_FW */
4365
4366         if (ret == 0) {
4367
4368                 ret = ql_validate_minidump_checksum(ha);
4369
4370                 if (ret == 0) {
4371
4372                         ret = ql_alloc_minidump_buffers(ha);
4373
4374                         if (ret == 0)
4375                 ha->hw.mdump_init = 1;
4376                         else
4377                                 device_printf(dev,
4378                                         "%s: ql_alloc_minidump_buffers"
4379                                         " failed\n", __func__);
4380                 } else {
4381                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4382                                 " failed\n", __func__);
4383                 }
4384         } else {
4385                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4386                          __func__);
4387         }
4388
4389         if (ret)
4390                 ql_minidump_free(ha);
4391
4392         return (ret);
4393 }
4394
4395 static void
4396 ql_minidump_free(qla_host_t *ha)
4397 {
4398         ha->hw.mdump_init = 0;
4399         if (ha->hw.dma_buf.flags.minidump) {
4400                 ha->hw.dma_buf.flags.minidump = 0;
4401                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4402         }
4403
4404         ql_free_minidump_template_buffer(ha);
4405         ql_free_minidump_buffer(ha);
4406
4407         return;
4408 }
4409
4410 void
4411 ql_minidump(qla_host_t *ha)
4412 {
4413         if (!ha->hw.mdump_init)
4414                 return;
4415
4416         if (ha->hw.mdump_done)
4417                 return;
4418
4419                 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4420
4421         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4422         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4423
4424         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4425                 ha->hw.mdump_template_size);
4426
4427         ql_parse_template(ha);
4428  
4429         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4430
4431         ha->hw.mdump_done = 1;
4432
4433         return;
4434 }
4435
4436
4437 /*
4438  * helper routines
4439  */
4440 static void 
4441 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4442 {
4443         if (esize != entry->hdr.entry_capture_size) {
4444                 entry->hdr.entry_capture_size = esize;
4445                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4446         }
4447         return;
4448 }
4449
4450
4451 static int 
4452 ql_parse_template(qla_host_t *ha)
4453 {
4454         uint32_t num_of_entries, buff_level, e_cnt, esize;
4455         uint32_t end_cnt, rv = 0;
4456         char *dump_buff, *dbuff;
4457         int sane_start = 0, sane_end = 0;
4458         ql_minidump_template_hdr_t *template_hdr;
4459         ql_minidump_entry_t *entry;
4460         uint32_t capture_mask; 
4461         uint32_t dump_size; 
4462
4463         /* Setup parameters */
4464         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4465
4466         if (template_hdr->entry_type == TLHDR)
4467                 sane_start = 1;
4468         
4469         dump_buff = (char *) ha->hw.mdump_buffer;
4470
4471         num_of_entries = template_hdr->num_of_entries;
4472
4473         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4474                         + template_hdr->first_entry_offset );
4475
4476         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4477                 template_hdr->ocm_window_array[ha->pci_func];
4478         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4479
4480         capture_mask = ha->hw.mdump_capture_mask;
4481         dump_size = ha->hw.mdump_buffer_size;
4482
4483         template_hdr->driver_capture_mask = capture_mask;
4484
4485         QL_DPRINT80(ha, (ha->pci_dev,
4486                 "%s: sane_start = %d num_of_entries = %d "
4487                 "capture_mask = 0x%x dump_size = %d \n", 
4488                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4489
4490         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4491
4492                 /*
4493                  * If the capture_mask of the entry does not match capture mask
4494                  * skip the entry after marking the driver_flags indicator.
4495                  */
4496                 
4497                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4498
4499                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4500                         entry = (ql_minidump_entry_t *) ((char *) entry
4501                                         + entry->hdr.entry_size);
4502                         continue;
4503                 }
4504
4505                 /*
4506                  * This is ONLY needed in implementations where
4507                  * the capture buffer allocated is too small to capture
4508                  * all of the required entries for a given capture mask.
4509                  * We need to empty the buffer contents to a file
4510                  * if possible, before processing the next entry
4511                  * If the buff_full_flag is set, no further capture will happen
4512                  * and all remaining non-control entries will be skipped.
4513                  */
4514                 if (entry->hdr.entry_capture_size != 0) {
4515                         if ((buff_level + entry->hdr.entry_capture_size) >
4516                                 dump_size) {
4517                                 /*  Try to recover by emptying buffer to file */
4518                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4519                                 entry = (ql_minidump_entry_t *) ((char *) entry
4520                                                 + entry->hdr.entry_size);
4521                                 continue;
4522                         }
4523                 }
4524
4525                 /*
4526                  * Decode the entry type and process it accordingly
4527                  */
4528
4529                 switch (entry->hdr.entry_type) {
4530                 case RDNOP:
4531                         break;
4532
4533                 case RDEND:
4534                         if (sane_end == 0) {
4535                                 end_cnt = e_cnt;
4536                         }
4537                         sane_end++;
4538                         break;
4539
4540                 case RDCRB:
4541                         dbuff = dump_buff + buff_level;
4542                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4543                         ql_entry_err_chk(entry, esize);
4544                         buff_level += esize;
4545                         break;
4546
4547                 case POLLRD:
4548                         dbuff = dump_buff + buff_level;
4549                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4550                         ql_entry_err_chk(entry, esize);
4551                         buff_level += esize;
4552                         break;
4553
4554                 case POLLRDMWR:
4555                         dbuff = dump_buff + buff_level;
4556                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4557                                         (void *)dbuff);
4558                         ql_entry_err_chk(entry, esize);
4559                         buff_level += esize;
4560                         break;
4561
4562                 case L2ITG:
4563                 case L2DTG:
4564                 case L2DAT:
4565                 case L2INS:
4566                         dbuff = dump_buff + buff_level;
4567                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4568                         if (esize == -1) {
4569                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4570                         } else {
4571                                 ql_entry_err_chk(entry, esize);
4572                                 buff_level += esize;
4573                         }
4574                         break;
4575
4576                 case L1DAT:
4577                 case L1INS:
4578                         dbuff = dump_buff + buff_level;
4579                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4580                         ql_entry_err_chk(entry, esize);
4581                         buff_level += esize;
4582                         break;
4583
4584                 case RDOCM:
4585                         dbuff = dump_buff + buff_level;
4586                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4587                         ql_entry_err_chk(entry, esize);
4588                         buff_level += esize;
4589                         break;
4590
4591                 case RDMEM:
4592                         dbuff = dump_buff + buff_level;
4593                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4594                         ql_entry_err_chk(entry, esize);
4595                         buff_level += esize;
4596                         break;
4597
4598                 case BOARD:
4599                 case RDROM:
4600                         dbuff = dump_buff + buff_level;
4601                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4602                         ql_entry_err_chk(entry, esize);
4603                         buff_level += esize;
4604                         break;
4605
4606                 case RDMUX:
4607                         dbuff = dump_buff + buff_level;
4608                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4609                         ql_entry_err_chk(entry, esize);
4610                         buff_level += esize;
4611                         break;
4612
4613                 case RDMUX2:
4614                         dbuff = dump_buff + buff_level;
4615                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4616                         ql_entry_err_chk(entry, esize);
4617                         buff_level += esize;
4618                         break;
4619
4620                 case QUEUE:
4621                         dbuff = dump_buff + buff_level;
4622                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4623                         ql_entry_err_chk(entry, esize);
4624                         buff_level += esize;
4625                         break;
4626
4627                 case CNTRL:
4628                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4629                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4630                         }
4631                         break;
4632                 default:
4633                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4634                         break;
4635                 }
4636                 /*  next entry in the template */
4637                 entry = (ql_minidump_entry_t *) ((char *) entry
4638                                                 + entry->hdr.entry_size);
4639         }
4640
4641         if (!sane_start || (sane_end > 1)) {
4642                 device_printf(ha->pci_dev,
4643                         "\n%s: Template configuration error. Check Template\n",
4644                         __func__);
4645         }
4646         
4647         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4648                 __func__, template_hdr->num_of_entries));
4649
4650         return 0;
4651 }
4652
4653 /*
4654  * Read CRB operation.
4655  */
4656 static uint32_t
4657 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4658         uint32_t * data_buff)
4659 {
4660         int loop_cnt;
4661         int ret;
4662         uint32_t op_count, addr, stride, value = 0;
4663
4664         addr = crb_entry->addr;
4665         op_count = crb_entry->op_count;
4666         stride = crb_entry->addr_stride;
4667
4668         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4669
4670                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4671
4672                 if (ret)
4673                         return (0);
4674
4675                 *data_buff++ = addr;
4676                 *data_buff++ = value;
4677                 addr = addr + stride;
4678         }
4679
4680         /*
4681          * for testing purpose we return amount of data written
4682          */
4683         return (op_count * (2 * sizeof(uint32_t)));
4684 }
4685
4686 /*
4687  * Handle L2 Cache.
4688  */
4689
4690 static uint32_t 
4691 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4692         uint32_t * data_buff)
4693 {
4694         int i, k;
4695         int loop_cnt;
4696         int ret;
4697
4698         uint32_t read_value;
4699         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4700         uint32_t tag_value, read_cnt;
4701         volatile uint8_t cntl_value_r;
4702         long timeout;
4703         uint32_t data;
4704
4705         loop_cnt = cacheEntry->op_count;
4706
4707         read_addr = cacheEntry->read_addr;
4708         cntrl_addr = cacheEntry->control_addr;
4709         cntl_value_w = (uint32_t) cacheEntry->write_value;
4710
4711         tag_reg_addr = cacheEntry->tag_reg_addr;
4712
4713         tag_value = cacheEntry->init_tag_value;
4714         read_cnt = cacheEntry->read_addr_cnt;
4715
4716         for (i = 0; i < loop_cnt; i++) {
4717
4718                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4719                 if (ret)
4720                         return (0);
4721
4722                 if (cacheEntry->write_value != 0) { 
4723
4724                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4725                                         &cntl_value_w, 0);
4726                         if (ret)
4727                                 return (0);
4728                 }
4729
4730                 if (cacheEntry->poll_mask != 0) { 
4731
4732                         timeout = cacheEntry->poll_wait;
4733
4734                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4735                         if (ret)
4736                                 return (0);
4737
4738                         cntl_value_r = (uint8_t)data;
4739
4740                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4741
4742                                 if (timeout) {
4743                                         qla_mdelay(__func__, 1);
4744                                         timeout--;
4745                                 } else
4746                                         break;
4747
4748                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4749                                                 &data, 1);
4750                                 if (ret)
4751                                         return (0);
4752
4753                                 cntl_value_r = (uint8_t)data;
4754                         }
4755                         if (!timeout) {
4756                                 /* Report timeout error. 
4757                                  * core dump capture failed
4758                                  * Skip remaining entries.
4759                                  * Write buffer out to file
4760                                  * Use driver specific fields in template header
4761                                  * to report this error.
4762                                  */
4763                                 return (-1);
4764                         }
4765                 }
4766
4767                 addr = read_addr;
4768                 for (k = 0; k < read_cnt; k++) {
4769
4770                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4771                         if (ret)
4772                                 return (0);
4773
4774                         *data_buff++ = read_value;
4775                         addr += cacheEntry->read_addr_stride;
4776                 }
4777
4778                 tag_value += cacheEntry->tag_value_stride;
4779         }
4780
4781         return (read_cnt * loop_cnt * sizeof(uint32_t));
4782 }
4783
4784 /*
4785  * Handle L1 Cache.
4786  */
4787
4788 static uint32_t 
4789 ql_L1Cache(qla_host_t *ha,
4790         ql_minidump_entry_cache_t *cacheEntry,
4791         uint32_t *data_buff)
4792 {
4793         int ret;
4794         int i, k;
4795         int loop_cnt;
4796
4797         uint32_t read_value;
4798         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4799         uint32_t tag_value, read_cnt;
4800         uint32_t cntl_value_w;
4801
4802         loop_cnt = cacheEntry->op_count;
4803
4804         read_addr = cacheEntry->read_addr;
4805         cntrl_addr = cacheEntry->control_addr;
4806         cntl_value_w = (uint32_t) cacheEntry->write_value;
4807
4808         tag_reg_addr = cacheEntry->tag_reg_addr;
4809
4810         tag_value = cacheEntry->init_tag_value;
4811         read_cnt = cacheEntry->read_addr_cnt;
4812
4813         for (i = 0; i < loop_cnt; i++) {
4814
4815                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4816                 if (ret)
4817                         return (0);
4818
4819                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4820                 if (ret)
4821                         return (0);
4822
4823                 addr = read_addr;
4824                 for (k = 0; k < read_cnt; k++) {
4825
4826                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4827                         if (ret)
4828                                 return (0);
4829
4830                         *data_buff++ = read_value;
4831                         addr += cacheEntry->read_addr_stride;
4832                 }
4833
4834                 tag_value += cacheEntry->tag_value_stride;
4835         }
4836
4837         return (read_cnt * loop_cnt * sizeof(uint32_t));
4838 }
4839
4840 /*
4841  * Reading OCM memory
4842  */
4843
4844 static uint32_t 
4845 ql_rdocm(qla_host_t *ha,
4846         ql_minidump_entry_rdocm_t *ocmEntry,
4847         uint32_t *data_buff)
4848 {
4849         int i, loop_cnt;
4850         volatile uint32_t addr;
4851         volatile uint32_t value;
4852
4853         addr = ocmEntry->read_addr;
4854         loop_cnt = ocmEntry->op_count;
4855
4856         for (i = 0; i < loop_cnt; i++) {
4857                 value = READ_REG32(ha, addr);
4858                 *data_buff++ = value;
4859                 addr += ocmEntry->read_addr_stride;
4860         }
4861         return (loop_cnt * sizeof(value));
4862 }
4863
4864 /*
4865  * Read memory
4866  */
4867
4868 static uint32_t 
4869 ql_rdmem(qla_host_t *ha,
4870         ql_minidump_entry_rdmem_t *mem_entry,
4871         uint32_t *data_buff)
4872 {
4873         int ret;
4874         int i, loop_cnt;
4875         volatile uint32_t addr;
4876         q80_offchip_mem_val_t val;
4877
4878         addr = mem_entry->read_addr;
4879
4880         /* size in bytes / 16 */
4881         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4882
4883         for (i = 0; i < loop_cnt; i++) {
4884
4885                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4886                 if (ret)
4887                         return (0);
4888
4889                 *data_buff++ = val.data_lo;
4890                 *data_buff++ = val.data_hi;
4891                 *data_buff++ = val.data_ulo;
4892                 *data_buff++ = val.data_uhi;
4893
4894                 addr += (sizeof(uint32_t) * 4);
4895         }
4896
4897         return (loop_cnt * (sizeof(uint32_t) * 4));
4898 }
4899
4900 /*
4901  * Read Rom
4902  */
4903
4904 static uint32_t 
4905 ql_rdrom(qla_host_t *ha,
4906         ql_minidump_entry_rdrom_t *romEntry,
4907         uint32_t *data_buff)
4908 {
4909         int ret;
4910         int i, loop_cnt;
4911         uint32_t addr;
4912         uint32_t value;
4913
4914         addr = romEntry->read_addr;
4915         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4916         loop_cnt /= sizeof(value);
4917
4918         for (i = 0; i < loop_cnt; i++) {
4919
4920                 ret = ql_rd_flash32(ha, addr, &value);
4921                 if (ret)
4922                         return (0);
4923
4924                 *data_buff++ = value;
4925                 addr += sizeof(value);
4926         }
4927
4928         return (loop_cnt * sizeof(value));
4929 }
4930
4931 /*
4932  * Read MUX data
4933  */
4934
4935 static uint32_t 
4936 ql_rdmux(qla_host_t *ha,
4937         ql_minidump_entry_mux_t *muxEntry,
4938         uint32_t *data_buff)
4939 {
4940         int ret;
4941         int loop_cnt;
4942         uint32_t read_value, sel_value;
4943         uint32_t read_addr, select_addr;
4944
4945         select_addr = muxEntry->select_addr;
4946         sel_value = muxEntry->select_value;
4947         read_addr = muxEntry->read_addr;
4948
4949         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4950
4951                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4952                 if (ret)
4953                         return (0);
4954
4955                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4956                 if (ret)
4957                         return (0);
4958
4959                 *data_buff++ = sel_value;
4960                 *data_buff++ = read_value;
4961
4962                 sel_value += muxEntry->select_value_stride;
4963         }
4964
4965         return (loop_cnt * (2 * sizeof(uint32_t)));
4966 }
4967
4968 static uint32_t
4969 ql_rdmux2(qla_host_t *ha,
4970         ql_minidump_entry_mux2_t *muxEntry,
4971         uint32_t *data_buff)
4972 {
4973         int ret;
4974         int loop_cnt;
4975
4976         uint32_t select_addr_1, select_addr_2;
4977         uint32_t select_value_1, select_value_2;
4978         uint32_t select_value_count, select_value_mask;
4979         uint32_t read_addr, read_value;
4980
4981         select_addr_1 = muxEntry->select_addr_1;
4982         select_addr_2 = muxEntry->select_addr_2;
4983         select_value_1 = muxEntry->select_value_1;
4984         select_value_2 = muxEntry->select_value_2;
4985         select_value_count = muxEntry->select_value_count;
4986         select_value_mask  = muxEntry->select_value_mask;
4987
4988         read_addr = muxEntry->read_addr;
4989
4990         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4991                 loop_cnt++) {
4992
4993                 uint32_t temp_sel_val;
4994
4995                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4996                 if (ret)
4997                         return (0);
4998
4999                 temp_sel_val = select_value_1 & select_value_mask;
5000
5001                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5002                 if (ret)
5003                         return (0);
5004
5005                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5006                 if (ret)
5007                         return (0);
5008
5009                 *data_buff++ = temp_sel_val;
5010                 *data_buff++ = read_value;
5011
5012                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5013                 if (ret)
5014                         return (0);
5015
5016                 temp_sel_val = select_value_2 & select_value_mask;
5017
5018                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5019                 if (ret)
5020                         return (0);
5021
5022                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5023                 if (ret)
5024                         return (0);
5025
5026                 *data_buff++ = temp_sel_val;
5027                 *data_buff++ = read_value;
5028
5029                 select_value_1 += muxEntry->select_value_stride;
5030                 select_value_2 += muxEntry->select_value_stride;
5031         }
5032
5033         return (loop_cnt * (4 * sizeof(uint32_t)));
5034 }
5035
5036 /*
5037  * Handling Queue State Reads.
5038  */
5039
5040 static uint32_t 
5041 ql_rdqueue(qla_host_t *ha,
5042         ql_minidump_entry_queue_t *queueEntry,
5043         uint32_t *data_buff)
5044 {
5045         int ret;
5046         int loop_cnt, k;
5047         uint32_t read_value;
5048         uint32_t read_addr, read_stride, select_addr;
5049         uint32_t queue_id, read_cnt;
5050
5051         read_cnt = queueEntry->read_addr_cnt;
5052         read_stride = queueEntry->read_addr_stride;
5053         select_addr = queueEntry->select_addr;
5054
5055         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5056                 loop_cnt++) {
5057
5058                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5059                 if (ret)
5060                         return (0);
5061
5062                 read_addr = queueEntry->read_addr;
5063
5064                 for (k = 0; k < read_cnt; k++) {
5065
5066                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5067                         if (ret)
5068                                 return (0);
5069
5070                         *data_buff++ = read_value;
5071                         read_addr += read_stride;
5072                 }
5073
5074                 queue_id += queueEntry->queue_id_stride;
5075         }
5076
5077         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5078 }
5079
5080 /*
5081  * Handling control entries.
5082  */
5083
5084 static uint32_t 
5085 ql_cntrl(qla_host_t *ha,
5086         ql_minidump_template_hdr_t *template_hdr,
5087         ql_minidump_entry_cntrl_t *crbEntry)
5088 {
5089         int ret;
5090         int count;
5091         uint32_t opcode, read_value, addr, entry_addr;
5092         long timeout;
5093
5094         entry_addr = crbEntry->addr;
5095
5096         for (count = 0; count < crbEntry->op_count; count++) {
5097                 opcode = crbEntry->opcode;
5098
5099                 if (opcode & QL_DBG_OPCODE_WR) {
5100
5101                         ret = ql_rdwr_indreg32(ha, entry_addr,
5102                                         &crbEntry->value_1, 0);
5103                         if (ret)
5104                                 return (0);
5105
5106                         opcode &= ~QL_DBG_OPCODE_WR;
5107                 }
5108
5109                 if (opcode & QL_DBG_OPCODE_RW) {
5110
5111                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5112                         if (ret)
5113                                 return (0);
5114
5115                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5116                         if (ret)
5117                                 return (0);
5118
5119                         opcode &= ~QL_DBG_OPCODE_RW;
5120                 }
5121
5122                 if (opcode & QL_DBG_OPCODE_AND) {
5123
5124                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5125                         if (ret)
5126                                 return (0);
5127
5128                         read_value &= crbEntry->value_2;
5129                         opcode &= ~QL_DBG_OPCODE_AND;
5130
5131                         if (opcode & QL_DBG_OPCODE_OR) {
5132                                 read_value |= crbEntry->value_3;
5133                                 opcode &= ~QL_DBG_OPCODE_OR;
5134                         }
5135
5136                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5137                         if (ret)
5138                                 return (0);
5139                 }
5140
5141                 if (opcode & QL_DBG_OPCODE_OR) {
5142
5143                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5144                         if (ret)
5145                                 return (0);
5146
5147                         read_value |= crbEntry->value_3;
5148
5149                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5150                         if (ret)
5151                                 return (0);
5152
5153                         opcode &= ~QL_DBG_OPCODE_OR;
5154                 }
5155
5156                 if (opcode & QL_DBG_OPCODE_POLL) {
5157
5158                         opcode &= ~QL_DBG_OPCODE_POLL;
5159                         timeout = crbEntry->poll_timeout;
5160                         addr = entry_addr;
5161
5162                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5163                         if (ret)
5164                                 return (0);
5165
5166                         while ((read_value & crbEntry->value_2)
5167                                 != crbEntry->value_1) {
5168
5169                                 if (timeout) {
5170                                         qla_mdelay(__func__, 1);
5171                                         timeout--;
5172                                 } else
5173                                         break;
5174
5175                                 ret = ql_rdwr_indreg32(ha, addr,
5176                                                 &read_value, 1);
5177                                 if (ret)
5178                                         return (0);
5179                         }
5180
5181                         if (!timeout) {
5182                                 /*
5183                                  * Report timeout error.
5184                                  * core dump capture failed
5185                                  * Skip remaining entries.
5186                                  * Write buffer out to file
5187                                  * Use driver specific fields in template header
5188                                  * to report this error.
5189                                  */
5190                                 return (-1);
5191                         }
5192                 }
5193
5194                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5195                         /*
5196                          * decide which address to use.
5197                          */
5198                         if (crbEntry->state_index_a) {
5199                                 addr = template_hdr->saved_state_array[
5200                                                 crbEntry-> state_index_a];
5201                         } else {
5202                                 addr = entry_addr;
5203                         }
5204
5205                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5206                         if (ret)
5207                                 return (0);
5208
5209                         template_hdr->saved_state_array[crbEntry->state_index_v]
5210                                         = read_value;
5211                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5212                 }
5213
5214                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5215                         /*
5216                          * decide which value to use.
5217                          */
5218                         if (crbEntry->state_index_v) {
5219                                 read_value = template_hdr->saved_state_array[
5220                                                 crbEntry->state_index_v];
5221                         } else {
5222                                 read_value = crbEntry->value_1;
5223                         }
5224                         /*
5225                          * decide which address to use.
5226                          */
5227                         if (crbEntry->state_index_a) {
5228                                 addr = template_hdr->saved_state_array[
5229                                                 crbEntry-> state_index_a];
5230                         } else {
5231                                 addr = entry_addr;
5232                         }
5233
5234                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5235                         if (ret)
5236                                 return (0);
5237
5238                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5239                 }
5240
5241                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5242                         /*  Read value from saved state using index */
5243                         read_value = template_hdr->saved_state_array[
5244                                                 crbEntry->state_index_v];
5245
5246                         read_value <<= crbEntry->shl; /*Shift left operation */
5247                         read_value >>= crbEntry->shr; /*Shift right operation */
5248
5249                         if (crbEntry->value_2) {
5250                                 /* check if AND mask is provided */
5251                                 read_value &= crbEntry->value_2;
5252                         }
5253
5254                         read_value |= crbEntry->value_3; /* OR operation */
5255                         read_value += crbEntry->value_1; /* increment op */
5256
5257                         /* Write value back to state area. */
5258
5259                         template_hdr->saved_state_array[crbEntry->state_index_v]
5260                                         = read_value;
5261                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5262                 }
5263
5264                 entry_addr += crbEntry->addr_stride;
5265         }
5266
5267         return (0);
5268 }
5269
5270 /*
5271  * Handling rd poll entry.
5272  */
5273
5274 static uint32_t 
5275 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5276         uint32_t *data_buff)
5277 {
5278         int ret;
5279         int loop_cnt;
5280         uint32_t op_count, select_addr, select_value_stride, select_value;
5281         uint32_t read_addr, poll, mask, data_size, data;
5282         uint32_t wait_count = 0;
5283
5284         select_addr            = entry->select_addr;
5285         read_addr              = entry->read_addr;
5286         select_value           = entry->select_value;
5287         select_value_stride    = entry->select_value_stride;
5288         op_count               = entry->op_count;
5289         poll                   = entry->poll;
5290         mask                   = entry->mask;
5291         data_size              = entry->data_size;
5292
5293         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5294
5295                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5296                 if (ret)
5297                         return (0);
5298
5299                 wait_count = 0;
5300
5301                 while (wait_count < poll) {
5302
5303                         uint32_t temp;
5304
5305                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5306                         if (ret)
5307                                 return (0);
5308
5309                         if ( (temp & mask) != 0 ) {
5310                                 break;
5311                         }
5312                         wait_count++;
5313                 }
5314
5315                 if (wait_count == poll) {
5316                         device_printf(ha->pci_dev,
5317                                 "%s: Error in processing entry\n", __func__);
5318                         device_printf(ha->pci_dev,
5319                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5320                                 __func__, wait_count, poll);
5321                         return 0;
5322                 }
5323
5324                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5325                 if (ret)
5326                         return (0);
5327
5328                 *data_buff++ = select_value;
5329                 *data_buff++ = data;
5330                 select_value = select_value + select_value_stride;
5331         }
5332
5333         /*
5334          * for testing purpose we return amount of data written
5335          */
5336         return (loop_cnt * (2 * sizeof(uint32_t)));
5337 }
5338
5339
5340 /*
5341  * Handling rd modify write poll entry.
5342  */
5343
5344 static uint32_t 
5345 ql_pollrd_modify_write(qla_host_t *ha,
5346         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5347         uint32_t *data_buff)
5348 {
5349         int ret;
5350         uint32_t addr_1, addr_2, value_1, value_2, data;
5351         uint32_t poll, mask, data_size, modify_mask;
5352         uint32_t wait_count = 0;
5353
5354         addr_1          = entry->addr_1;
5355         addr_2          = entry->addr_2;
5356         value_1         = entry->value_1;
5357         value_2         = entry->value_2;
5358
5359         poll            = entry->poll;
5360         mask            = entry->mask;
5361         modify_mask     = entry->modify_mask;
5362         data_size       = entry->data_size;
5363
5364
5365         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5366         if (ret)
5367                 return (0);
5368
5369         wait_count = 0;
5370         while (wait_count < poll) {
5371
5372                 uint32_t temp;
5373
5374                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5375                 if (ret)
5376                         return (0);
5377
5378                 if ( (temp & mask) != 0 ) {
5379                         break;
5380                 }
5381                 wait_count++;
5382         }
5383
5384         if (wait_count == poll) {
5385                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5386                         __func__);
5387         } else {
5388
5389                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5390                 if (ret)
5391                         return (0);
5392
5393                 data = (data & modify_mask);
5394
5395                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5396                 if (ret)
5397                         return (0);
5398
5399                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5400                 if (ret)
5401                         return (0);
5402
5403                 /* Poll again */
5404                 wait_count = 0;
5405                 while (wait_count < poll) {
5406
5407                         uint32_t temp;
5408
5409                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5410                         if (ret)
5411                                 return (0);
5412
5413                         if ( (temp & mask) != 0 ) {
5414                                 break;
5415                         }
5416                         wait_count++;
5417                 }
5418                 *data_buff++ = addr_2;
5419                 *data_buff++ = data;
5420         }
5421
5422         /*
5423          * for testing purpose we return amount of data written
5424          */
5425         return (2 * sizeof(uint32_t));
5426 }
5427
5428