]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_hw.c
MFC r331739
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_hw.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include "ql_minidump.h"
45
46 /*
47  * Static Functions
48  */
49
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static int qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57         uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60         int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65                 uint8_t *hdr);
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
68
69 static int qla_init_nic_func(qla_host_t *ha);
70 static int qla_stop_nic_func(qla_host_t *ha);
71 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
72 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75 static int qla_get_cam_search_mode(qla_host_t *ha);
76
77 static void ql_minidump_free(qla_host_t *ha);
78
79 #ifdef QL_DBG
80
81 static void
82 qla_stop_pegs(qla_host_t *ha)
83 {
84         uint32_t val = 1;
85
86         ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87         ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88         ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89         ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90         ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
91         device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
92 }
93
94 static int
95 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
96 {
97         int err, ret = 0;
98         qla_host_t *ha;
99         
100         err = sysctl_handle_int(oidp, &ret, 0, req);
101
102
103         if (err || !req->newptr)
104                 return (err);
105
106         if (ret == 1) {
107                 ha = (qla_host_t *)arg1;
108                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
109                         qla_stop_pegs(ha);      
110                         QLA_UNLOCK(ha, __func__);
111                 }
112         }
113
114         return err;
115 }
116 #endif /* #ifdef QL_DBG */
117
118 static int
119 qla_validate_set_port_cfg_bit(uint32_t bits)
120 {
121         if ((bits & 0xF) > 1)
122                 return (-1);
123
124         if (((bits >> 4) & 0xF) > 2)
125                 return (-1);
126
127         if (((bits >> 8) & 0xF) > 2)
128                 return (-1);
129
130         return (0);
131 }
132
133 static int
134 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
135 {
136         int err, ret = 0;
137         qla_host_t *ha;
138         uint32_t cfg_bits;
139
140         err = sysctl_handle_int(oidp, &ret, 0, req);
141
142         if (err || !req->newptr)
143                 return (err);
144
145         ha = (qla_host_t *)arg1;
146
147         if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
148
149                 err = qla_get_port_config(ha, &cfg_bits);
150
151                 if (err)
152                         goto qla_sysctl_set_port_cfg_exit;
153
154                 if (ret & 0x1) {
155                         cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
156                 } else {
157                         cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
158                 }
159
160                 ret = ret >> 4;
161                 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
162
163                 if ((ret & 0xF) == 0) {
164                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
165                 } else if ((ret & 0xF) == 1){
166                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
167                 } else {
168                         cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
169                 }
170
171                 ret = ret >> 4;
172                 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
173
174                 if (ret == 0) {
175                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176                 } else if (ret == 1){
177                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
178                 } else {
179                         cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
180                 }
181
182                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
183                         err = qla_set_port_config(ha, cfg_bits);
184                         QLA_UNLOCK(ha, __func__);
185                 } else {
186                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
187                 }
188         } else {
189                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190                         err = qla_get_port_config(ha, &cfg_bits);
191                         QLA_UNLOCK(ha, __func__);
192                 } else {
193                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
194                 }
195         }
196
197 qla_sysctl_set_port_cfg_exit:
198         return err;
199 }
200
201 static int
202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
203 {
204         int err, ret = 0;
205         qla_host_t *ha;
206
207         err = sysctl_handle_int(oidp, &ret, 0, req);
208
209         if (err || !req->newptr)
210                 return (err);
211
212         ha = (qla_host_t *)arg1;
213
214         if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215                 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
216
217                 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
218                         err = qla_set_cam_search_mode(ha, (uint32_t)ret);
219                         QLA_UNLOCK(ha, __func__);
220                 } else {
221                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
222                 }
223
224         } else {
225                 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
226         }
227
228         return (err);
229 }
230
231 static int
232 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
233 {
234         int err, ret = 0;
235         qla_host_t *ha;
236
237         err = sysctl_handle_int(oidp, &ret, 0, req);
238
239         if (err || !req->newptr)
240                 return (err);
241
242         ha = (qla_host_t *)arg1;
243         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
244                 err = qla_get_cam_search_mode(ha);
245                 QLA_UNLOCK(ha, __func__);
246         } else {
247                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
248         }
249
250         return (err);
251 }
252
253 static void
254 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
255 {
256         struct sysctl_ctx_list  *ctx;
257         struct sysctl_oid_list  *children;
258         struct sysctl_oid       *ctx_oid;
259
260         ctx = device_get_sysctl_ctx(ha->pci_dev);
261         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
262
263         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
264                         CTLFLAG_RD, NULL, "stats_hw_mac");
265         children = SYSCTL_CHILDREN(ctx_oid);
266
267         SYSCTL_ADD_QUAD(ctx, children,
268                 OID_AUTO, "xmt_frames",
269                 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
270                 "xmt_frames");
271
272         SYSCTL_ADD_QUAD(ctx, children,
273                 OID_AUTO, "xmt_bytes",
274                 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
275                 "xmt_frames");
276
277         SYSCTL_ADD_QUAD(ctx, children,
278                 OID_AUTO, "xmt_mcast_pkts",
279                 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
280                 "xmt_mcast_pkts");
281
282         SYSCTL_ADD_QUAD(ctx, children,
283                 OID_AUTO, "xmt_bcast_pkts",
284                 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
285                 "xmt_bcast_pkts");
286
287         SYSCTL_ADD_QUAD(ctx, children,
288                 OID_AUTO, "xmt_pause_frames",
289                 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
290                 "xmt_pause_frames");
291
292         SYSCTL_ADD_QUAD(ctx, children,
293                 OID_AUTO, "xmt_cntrl_pkts",
294                 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
295                 "xmt_cntrl_pkts");
296
297         SYSCTL_ADD_QUAD(ctx, children,
298                 OID_AUTO, "xmt_pkt_lt_64bytes",
299                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
300                 "xmt_pkt_lt_64bytes");
301
302         SYSCTL_ADD_QUAD(ctx, children,
303                 OID_AUTO, "xmt_pkt_lt_127bytes",
304                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
305                 "xmt_pkt_lt_127bytes");
306
307         SYSCTL_ADD_QUAD(ctx, children,
308                 OID_AUTO, "xmt_pkt_lt_255bytes",
309                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
310                 "xmt_pkt_lt_255bytes");
311
312         SYSCTL_ADD_QUAD(ctx, children,
313                 OID_AUTO, "xmt_pkt_lt_511bytes",
314                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
315                 "xmt_pkt_lt_511bytes");
316
317         SYSCTL_ADD_QUAD(ctx, children,
318                 OID_AUTO, "xmt_pkt_lt_1023bytes",
319                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
320                 "xmt_pkt_lt_1023bytes");
321
322         SYSCTL_ADD_QUAD(ctx, children,
323                 OID_AUTO, "xmt_pkt_lt_1518bytes",
324                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
325                 "xmt_pkt_lt_1518bytes");
326
327         SYSCTL_ADD_QUAD(ctx, children,
328                 OID_AUTO, "xmt_pkt_gt_1518bytes",
329                 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
330                 "xmt_pkt_gt_1518bytes");
331
332         SYSCTL_ADD_QUAD(ctx, children,
333                 OID_AUTO, "rcv_frames",
334                 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
335                 "rcv_frames");
336
337         SYSCTL_ADD_QUAD(ctx, children,
338                 OID_AUTO, "rcv_bytes",
339                 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
340                 "rcv_bytes");
341
342         SYSCTL_ADD_QUAD(ctx, children,
343                 OID_AUTO, "rcv_mcast_pkts",
344                 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
345                 "rcv_mcast_pkts");
346
347         SYSCTL_ADD_QUAD(ctx, children,
348                 OID_AUTO, "rcv_bcast_pkts",
349                 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
350                 "rcv_bcast_pkts");
351
352         SYSCTL_ADD_QUAD(ctx, children,
353                 OID_AUTO, "rcv_pause_frames",
354                 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
355                 "rcv_pause_frames");
356
357         SYSCTL_ADD_QUAD(ctx, children,
358                 OID_AUTO, "rcv_cntrl_pkts",
359                 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
360                 "rcv_cntrl_pkts");
361
362         SYSCTL_ADD_QUAD(ctx, children,
363                 OID_AUTO, "rcv_pkt_lt_64bytes",
364                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
365                 "rcv_pkt_lt_64bytes");
366
367         SYSCTL_ADD_QUAD(ctx, children,
368                 OID_AUTO, "rcv_pkt_lt_127bytes",
369                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
370                 "rcv_pkt_lt_127bytes");
371
372         SYSCTL_ADD_QUAD(ctx, children,
373                 OID_AUTO, "rcv_pkt_lt_255bytes",
374                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
375                 "rcv_pkt_lt_255bytes");
376
377         SYSCTL_ADD_QUAD(ctx, children,
378                 OID_AUTO, "rcv_pkt_lt_511bytes",
379                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
380                 "rcv_pkt_lt_511bytes");
381
382         SYSCTL_ADD_QUAD(ctx, children,
383                 OID_AUTO, "rcv_pkt_lt_1023bytes",
384                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
385                 "rcv_pkt_lt_1023bytes");
386
387         SYSCTL_ADD_QUAD(ctx, children,
388                 OID_AUTO, "rcv_pkt_lt_1518bytes",
389                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
390                 "rcv_pkt_lt_1518bytes");
391
392         SYSCTL_ADD_QUAD(ctx, children,
393                 OID_AUTO, "rcv_pkt_gt_1518bytes",
394                 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
395                 "rcv_pkt_gt_1518bytes");
396
397         SYSCTL_ADD_QUAD(ctx, children,
398                 OID_AUTO, "rcv_len_error",
399                 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
400                 "rcv_len_error");
401
402         SYSCTL_ADD_QUAD(ctx, children,
403                 OID_AUTO, "rcv_len_small",
404                 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
405                 "rcv_len_small");
406
407         SYSCTL_ADD_QUAD(ctx, children,
408                 OID_AUTO, "rcv_len_large",
409                 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
410                 "rcv_len_large");
411
412         SYSCTL_ADD_QUAD(ctx, children,
413                 OID_AUTO, "rcv_jabber",
414                 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
415                 "rcv_jabber");
416
417         SYSCTL_ADD_QUAD(ctx, children,
418                 OID_AUTO, "rcv_dropped",
419                 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
420                 "rcv_dropped");
421
422         SYSCTL_ADD_QUAD(ctx, children,
423                 OID_AUTO, "fcs_error",
424                 CTLFLAG_RD, &ha->hw.mac.fcs_error,
425                 "fcs_error");
426
427         SYSCTL_ADD_QUAD(ctx, children,
428                 OID_AUTO, "align_error",
429                 CTLFLAG_RD, &ha->hw.mac.align_error,
430                 "align_error");
431
432         SYSCTL_ADD_QUAD(ctx, children,
433                 OID_AUTO, "eswitched_frames",
434                 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
435                 "eswitched_frames");
436
437         SYSCTL_ADD_QUAD(ctx, children,
438                 OID_AUTO, "eswitched_bytes",
439                 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
440                 "eswitched_bytes");
441
442         SYSCTL_ADD_QUAD(ctx, children,
443                 OID_AUTO, "eswitched_mcast_frames",
444                 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
445                 "eswitched_mcast_frames");
446
447         SYSCTL_ADD_QUAD(ctx, children,
448                 OID_AUTO, "eswitched_bcast_frames",
449                 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
450                 "eswitched_bcast_frames");
451
452         SYSCTL_ADD_QUAD(ctx, children,
453                 OID_AUTO, "eswitched_ucast_frames",
454                 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
455                 "eswitched_ucast_frames");
456
457         SYSCTL_ADD_QUAD(ctx, children,
458                 OID_AUTO, "eswitched_err_free_frames",
459                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
460                 "eswitched_err_free_frames");
461
462         SYSCTL_ADD_QUAD(ctx, children,
463                 OID_AUTO, "eswitched_err_free_bytes",
464                 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
465                 "eswitched_err_free_bytes");
466
467         return;
468 }
469
470 static void
471 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
472 {
473         struct sysctl_ctx_list  *ctx;
474         struct sysctl_oid_list  *children;
475         struct sysctl_oid       *ctx_oid;
476
477         ctx = device_get_sysctl_ctx(ha->pci_dev);
478         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
479
480         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
481                         CTLFLAG_RD, NULL, "stats_hw_rcv");
482         children = SYSCTL_CHILDREN(ctx_oid);
483
484         SYSCTL_ADD_QUAD(ctx, children,
485                 OID_AUTO, "total_bytes",
486                 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
487                 "total_bytes");
488
489         SYSCTL_ADD_QUAD(ctx, children,
490                 OID_AUTO, "total_pkts",
491                 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
492                 "total_pkts");
493
494         SYSCTL_ADD_QUAD(ctx, children,
495                 OID_AUTO, "lro_pkt_count",
496                 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
497                 "lro_pkt_count");
498
499         SYSCTL_ADD_QUAD(ctx, children,
500                 OID_AUTO, "sw_pkt_count",
501                 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
502                 "sw_pkt_count");
503
504         SYSCTL_ADD_QUAD(ctx, children,
505                 OID_AUTO, "ip_chksum_err",
506                 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
507                 "ip_chksum_err");
508
509         SYSCTL_ADD_QUAD(ctx, children,
510                 OID_AUTO, "pkts_wo_acntxts",
511                 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
512                 "pkts_wo_acntxts");
513
514         SYSCTL_ADD_QUAD(ctx, children,
515                 OID_AUTO, "pkts_dropped_no_sds_card",
516                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
517                 "pkts_dropped_no_sds_card");
518
519         SYSCTL_ADD_QUAD(ctx, children,
520                 OID_AUTO, "pkts_dropped_no_sds_host",
521                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
522                 "pkts_dropped_no_sds_host");
523
524         SYSCTL_ADD_QUAD(ctx, children,
525                 OID_AUTO, "oversized_pkts",
526                 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
527                 "oversized_pkts");
528
529         SYSCTL_ADD_QUAD(ctx, children,
530                 OID_AUTO, "pkts_dropped_no_rds",
531                 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
532                 "pkts_dropped_no_rds");
533
534         SYSCTL_ADD_QUAD(ctx, children,
535                 OID_AUTO, "unxpctd_mcast_pkts",
536                 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
537                 "unxpctd_mcast_pkts");
538
539         SYSCTL_ADD_QUAD(ctx, children,
540                 OID_AUTO, "re1_fbq_error",
541                 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
542                 "re1_fbq_error");
543
544         SYSCTL_ADD_QUAD(ctx, children,
545                 OID_AUTO, "invalid_mac_addr",
546                 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
547                 "invalid_mac_addr");
548
549         SYSCTL_ADD_QUAD(ctx, children,
550                 OID_AUTO, "rds_prime_trys",
551                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
552                 "rds_prime_trys");
553
554         SYSCTL_ADD_QUAD(ctx, children,
555                 OID_AUTO, "rds_prime_success",
556                 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
557                 "rds_prime_success");
558
559         SYSCTL_ADD_QUAD(ctx, children,
560                 OID_AUTO, "lro_flows_added",
561                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
562                 "lro_flows_added");
563
564         SYSCTL_ADD_QUAD(ctx, children,
565                 OID_AUTO, "lro_flows_deleted",
566                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
567                 "lro_flows_deleted");
568
569         SYSCTL_ADD_QUAD(ctx, children,
570                 OID_AUTO, "lro_flows_active",
571                 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
572                 "lro_flows_active");
573
574         SYSCTL_ADD_QUAD(ctx, children,
575                 OID_AUTO, "pkts_droped_unknown",
576                 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
577                 "pkts_droped_unknown");
578
579         SYSCTL_ADD_QUAD(ctx, children,
580                 OID_AUTO, "pkts_cnt_oversized",
581                 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
582                 "pkts_cnt_oversized");
583
584         return;
585 }
586
587 static void
588 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
589 {
590         struct sysctl_ctx_list  *ctx;
591         struct sysctl_oid_list  *children;
592         struct sysctl_oid_list  *node_children;
593         struct sysctl_oid       *ctx_oid;
594         int                     i;
595         uint8_t                 name_str[16];
596
597         ctx = device_get_sysctl_ctx(ha->pci_dev);
598         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
599
600         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
601                         CTLFLAG_RD, NULL, "stats_hw_xmt");
602         children = SYSCTL_CHILDREN(ctx_oid);
603
604         for (i = 0; i < ha->hw.num_tx_rings; i++) {
605
606                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
607                 snprintf(name_str, sizeof(name_str), "%d", i);
608
609                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
610                         CTLFLAG_RD, NULL, name_str);
611                 node_children = SYSCTL_CHILDREN(ctx_oid);
612
613                 /* Tx Related */
614
615                 SYSCTL_ADD_QUAD(ctx, node_children,
616                         OID_AUTO, "total_bytes",
617                         CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
618                         "total_bytes");
619
620                 SYSCTL_ADD_QUAD(ctx, node_children,
621                         OID_AUTO, "total_pkts",
622                         CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
623                         "total_pkts");
624
625                 SYSCTL_ADD_QUAD(ctx, node_children,
626                         OID_AUTO, "errors",
627                         CTLFLAG_RD, &ha->hw.xmt[i].errors,
628                         "errors");
629
630                 SYSCTL_ADD_QUAD(ctx, node_children,
631                         OID_AUTO, "pkts_dropped",
632                         CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
633                         "pkts_dropped");
634
635                 SYSCTL_ADD_QUAD(ctx, node_children,
636                         OID_AUTO, "switch_pkts",
637                         CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
638                         "switch_pkts");
639
640                 SYSCTL_ADD_QUAD(ctx, node_children,
641                         OID_AUTO, "num_buffers",
642                         CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
643                         "num_buffers");
644         }
645
646         return;
647 }
648
649 static void
650 qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha)
651 {
652         struct sysctl_ctx_list  *ctx;
653         struct sysctl_oid_list  *node_children;
654
655         ctx = device_get_sysctl_ctx(ha->pci_dev);
656         node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
657
658         SYSCTL_ADD_QUAD(ctx, node_children,
659                 OID_AUTO, "mbx_completion_time_lt_200ms",
660                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0],
661                 "mbx_completion_time_lt_200ms");
662
663         SYSCTL_ADD_QUAD(ctx, node_children,
664                 OID_AUTO, "mbx_completion_time_200ms_400ms",
665                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1],
666                 "mbx_completion_time_200ms_400ms");
667
668         SYSCTL_ADD_QUAD(ctx, node_children,
669                 OID_AUTO, "mbx_completion_time_400ms_600ms",
670                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2],
671                 "mbx_completion_time_400ms_600ms");
672
673         SYSCTL_ADD_QUAD(ctx, node_children,
674                 OID_AUTO, "mbx_completion_time_600ms_800ms",
675                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3],
676                 "mbx_completion_time_600ms_800ms");
677
678         SYSCTL_ADD_QUAD(ctx, node_children,
679                 OID_AUTO, "mbx_completion_time_800ms_1000ms",
680                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4],
681                 "mbx_completion_time_800ms_1000ms");
682
683         SYSCTL_ADD_QUAD(ctx, node_children,
684                 OID_AUTO, "mbx_completion_time_1000ms_1200ms",
685                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5],
686                 "mbx_completion_time_1000ms_1200ms");
687
688         SYSCTL_ADD_QUAD(ctx, node_children,
689                 OID_AUTO, "mbx_completion_time_1200ms_1400ms",
690                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6],
691                 "mbx_completion_time_1200ms_1400ms");
692
693         SYSCTL_ADD_QUAD(ctx, node_children,
694                 OID_AUTO, "mbx_completion_time_1400ms_1600ms",
695                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7],
696                 "mbx_completion_time_1400ms_1600ms");
697
698         SYSCTL_ADD_QUAD(ctx, node_children,
699                 OID_AUTO, "mbx_completion_time_1600ms_1800ms",
700                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8],
701                 "mbx_completion_time_1600ms_1800ms");
702
703         SYSCTL_ADD_QUAD(ctx, node_children,
704                 OID_AUTO, "mbx_completion_time_1800ms_2000ms",
705                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9],
706                 "mbx_completion_time_1800ms_2000ms");
707
708         SYSCTL_ADD_QUAD(ctx, node_children,
709                 OID_AUTO, "mbx_completion_time_2000ms_2200ms",
710                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10],
711                 "mbx_completion_time_2000ms_2200ms");
712
713         SYSCTL_ADD_QUAD(ctx, node_children,
714                 OID_AUTO, "mbx_completion_time_2200ms_2400ms",
715                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11],
716                 "mbx_completion_time_2200ms_2400ms");
717
718         SYSCTL_ADD_QUAD(ctx, node_children,
719                 OID_AUTO, "mbx_completion_time_2400ms_2600ms",
720                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12],
721                 "mbx_completion_time_2400ms_2600ms");
722
723         SYSCTL_ADD_QUAD(ctx, node_children,
724                 OID_AUTO, "mbx_completion_time_2600ms_2800ms",
725                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13],
726                 "mbx_completion_time_2600ms_2800ms");
727
728         SYSCTL_ADD_QUAD(ctx, node_children,
729                 OID_AUTO, "mbx_completion_time_2800ms_3000ms",
730                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14],
731                 "mbx_completion_time_2800ms_3000ms");
732
733         SYSCTL_ADD_QUAD(ctx, node_children,
734                 OID_AUTO, "mbx_completion_time_3000ms_4000ms",
735                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15],
736                 "mbx_completion_time_3000ms_4000ms");
737
738         SYSCTL_ADD_QUAD(ctx, node_children,
739                 OID_AUTO, "mbx_completion_time_4000ms_5000ms",
740                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16],
741                 "mbx_completion_time_4000ms_5000ms");
742
743         SYSCTL_ADD_QUAD(ctx, node_children,
744                 OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout",
745                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17],
746                 "mbx_completion_host_mbx_cntrl_timeout");
747
748         SYSCTL_ADD_QUAD(ctx, node_children,
749                 OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout",
750                 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18],
751                 "mbx_completion_fw_mbx_cntrl_timeout");
752         return;
753 }
754
755 static void
756 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
757 {
758         qlnx_add_hw_mac_stats_sysctls(ha);
759         qlnx_add_hw_rcv_stats_sysctls(ha);
760         qlnx_add_hw_xmt_stats_sysctls(ha);
761         qlnx_add_hw_mbx_cmpl_stats_sysctls(ha);
762
763         return;
764 }
765
766 static void
767 qlnx_add_drvr_sds_stats(qla_host_t *ha)
768 {
769         struct sysctl_ctx_list  *ctx;
770         struct sysctl_oid_list  *children;
771         struct sysctl_oid_list  *node_children;
772         struct sysctl_oid       *ctx_oid;
773         int                     i;
774         uint8_t                 name_str[16];
775
776         ctx = device_get_sysctl_ctx(ha->pci_dev);
777         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
778
779         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
780                         CTLFLAG_RD, NULL, "stats_drvr_sds");
781         children = SYSCTL_CHILDREN(ctx_oid);
782
783         for (i = 0; i < ha->hw.num_sds_rings; i++) {
784
785                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
786                 snprintf(name_str, sizeof(name_str), "%d", i);
787
788                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
789                         CTLFLAG_RD, NULL, name_str);
790                 node_children = SYSCTL_CHILDREN(ctx_oid);
791
792                 SYSCTL_ADD_QUAD(ctx, node_children,
793                         OID_AUTO, "intr_count",
794                         CTLFLAG_RD, &ha->hw.sds[i].intr_count,
795                         "intr_count");
796
797                 SYSCTL_ADD_UINT(ctx, node_children,
798                         OID_AUTO, "rx_free",
799                         CTLFLAG_RD, &ha->hw.sds[i].rx_free,
800                         ha->hw.sds[i].rx_free, "rx_free");
801         }
802
803         return;
804 }
805 static void
806 qlnx_add_drvr_rds_stats(qla_host_t *ha)
807 {
808         struct sysctl_ctx_list  *ctx;
809         struct sysctl_oid_list  *children;
810         struct sysctl_oid_list  *node_children;
811         struct sysctl_oid       *ctx_oid;
812         int                     i;
813         uint8_t                 name_str[16];
814
815         ctx = device_get_sysctl_ctx(ha->pci_dev);
816         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
817
818         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
819                         CTLFLAG_RD, NULL, "stats_drvr_rds");
820         children = SYSCTL_CHILDREN(ctx_oid);
821
822         for (i = 0; i < ha->hw.num_rds_rings; i++) {
823
824                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
825                 snprintf(name_str, sizeof(name_str), "%d", i);
826
827                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
828                         CTLFLAG_RD, NULL, name_str);
829                 node_children = SYSCTL_CHILDREN(ctx_oid);
830
831                 SYSCTL_ADD_QUAD(ctx, node_children,
832                         OID_AUTO, "count",
833                         CTLFLAG_RD, &ha->hw.rds[i].count,
834                         "count");
835
836                 SYSCTL_ADD_QUAD(ctx, node_children,
837                         OID_AUTO, "lro_pkt_count",
838                         CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
839                         "lro_pkt_count");
840
841                 SYSCTL_ADD_QUAD(ctx, node_children,
842                         OID_AUTO, "lro_bytes",
843                         CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
844                         "lro_bytes");
845         }
846
847         return;
848 }
849
850 static void
851 qlnx_add_drvr_tx_stats(qla_host_t *ha)
852 {
853         struct sysctl_ctx_list  *ctx;
854         struct sysctl_oid_list  *children;
855         struct sysctl_oid_list  *node_children;
856         struct sysctl_oid       *ctx_oid;
857         int                     i;
858         uint8_t                 name_str[16];
859
860         ctx = device_get_sysctl_ctx(ha->pci_dev);
861         children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
862
863         ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
864                         CTLFLAG_RD, NULL, "stats_drvr_xmt");
865         children = SYSCTL_CHILDREN(ctx_oid);
866
867         for (i = 0; i < ha->hw.num_tx_rings; i++) {
868
869                 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
870                 snprintf(name_str, sizeof(name_str), "%d", i);
871
872                 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
873                         CTLFLAG_RD, NULL, name_str);
874                 node_children = SYSCTL_CHILDREN(ctx_oid);
875
876                 SYSCTL_ADD_QUAD(ctx, node_children,
877                         OID_AUTO, "count",
878                         CTLFLAG_RD, &ha->tx_ring[i].count,
879                         "count");
880
881 #ifdef QL_ENABLE_ISCSI_TLV
882                 SYSCTL_ADD_QUAD(ctx, node_children,
883                         OID_AUTO, "iscsi_pkt_count",
884                         CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
885                         "iscsi_pkt_count");
886 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
887         }
888
889         return;
890 }
891
892 static void
893 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
894 {
895         qlnx_add_drvr_sds_stats(ha);
896         qlnx_add_drvr_rds_stats(ha);
897         qlnx_add_drvr_tx_stats(ha);
898         return;
899 }
900
901 /*
902  * Name: ql_hw_add_sysctls
903  * Function: Add P3Plus specific sysctls
904  */
905 void
906 ql_hw_add_sysctls(qla_host_t *ha)
907 {
908         device_t        dev;
909
910         dev = ha->pci_dev;
911
912         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
915                 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
916
917         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
918                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
919                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
920                 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
921
922         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
925                 ha->hw.num_tx_rings, "Number of Transmit Rings");
926
927         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
928                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
929                 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
930                 ha->txr_idx, "Tx Ring Used");
931
932         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
933                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
934                 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
935                 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
936
937         ha->hw.sds_cidx_thres = 32;
938         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
939                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
940                 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
941                 ha->hw.sds_cidx_thres,
942                 "Number of SDS entries to process before updating"
943                 " SDS Ring Consumer Index");
944
945         ha->hw.rds_pidx_thres = 32;
946         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
947                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
948                 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
949                 ha->hw.rds_pidx_thres,
950                 "Number of Rcv Rings Entries to post before updating"
951                 " RDS Ring Producer Index");
952
953         ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
954         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
955                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
956                 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
957                 &ha->hw.rcv_intr_coalesce,
958                 ha->hw.rcv_intr_coalesce,
959                 "Rcv Intr Coalescing Parameters\n"
960                 "\tbits 15:0 max packets\n"
961                 "\tbits 31:16 max micro-seconds to wait\n"
962                 "\tplease run\n"
963                 "\tifconfig <if> down && ifconfig <if> up\n"
964                 "\tto take effect \n");
965
966         ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
967         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
968                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
969                 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
970                 &ha->hw.xmt_intr_coalesce,
971                 ha->hw.xmt_intr_coalesce,
972                 "Xmt Intr Coalescing Parameters\n"
973                 "\tbits 15:0 max packets\n"
974                 "\tbits 31:16 max micro-seconds to wait\n"
975                 "\tplease run\n"
976                 "\tifconfig <if> down && ifconfig <if> up\n"
977                 "\tto take effect \n");
978
979         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
980                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
981                 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
982                 (void *)ha, 0,
983                 qla_sysctl_port_cfg, "I",
984                         "Set Port Configuration if values below "
985                         "otherwise Get Port Configuration\n"
986                         "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
987                         "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
988                         "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
989                         " 1 = xmt only; 2 = rcv only;\n"
990                 );
991
992         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
993                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
994                 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
995                 (void *)ha, 0,
996                 qla_sysctl_set_cam_search_mode, "I",
997                         "Set CAM Search Mode"
998                         "\t 1 = search mode internal\n"
999                         "\t 2 = search mode auto\n");
1000
1001         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1002                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1003                 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
1004                 (void *)ha, 0,
1005                 qla_sysctl_get_cam_search_mode, "I",
1006                         "Get CAM Search Mode"
1007                         "\t 1 = search mode internal\n"
1008                         "\t 2 = search mode auto\n");
1009
1010         ha->hw.enable_9kb = 1;
1011
1012         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1013                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1014                 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
1015                 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
1016
1017         ha->hw.enable_hw_lro = 1;
1018
1019         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1020                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1021                 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
1022                 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
1023                 "\t 1 : Hardware LRO if LRO is enabled\n"
1024                 "\t 0 : Software LRO if LRO is enabled\n"
1025                 "\t Any change requires ifconfig down/up to take effect\n"
1026                 "\t Note that LRO may be turned off/on via ifconfig\n");
1027
1028         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1029                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1030                 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index,
1031                 ha->hw.sp_log_index, "sp_log_index");
1032
1033         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1034                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1035                 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop,
1036                 ha->hw.sp_log_stop, "sp_log_stop");
1037
1038         ha->hw.sp_log_stop_events = 0;
1039
1040         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1041                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1042                 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW,
1043                 &ha->hw.sp_log_stop_events,
1044                 ha->hw.sp_log_stop_events, "Slow path event log is stopped"
1045                 " when OR of the following events occur \n"
1046                 "\t 0x01 : Heart beat Failure\n"
1047                 "\t 0x02 : Temperature Failure\n"
1048                 "\t 0x04 : HW Initialization Failure\n"
1049                 "\t 0x08 : Interface Initialization Failure\n"
1050                 "\t 0x10 : Error Recovery Failure\n");
1051
1052         ha->hw.mdump_active = 0;
1053         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1054                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1055                 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
1056                 ha->hw.mdump_active,
1057                 "Minidump retrieval is Active");
1058
1059         ha->hw.mdump_done = 0;
1060         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1061                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1062                 OID_AUTO, "mdump_done", CTLFLAG_RW,
1063                 &ha->hw.mdump_done, ha->hw.mdump_done,
1064                 "Minidump has been done and available for retrieval");
1065
1066         ha->hw.mdump_capture_mask = 0xF;
1067         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1068                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1069                 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
1070                 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
1071                 "Minidump capture mask");
1072 #ifdef QL_DBG
1073
1074         ha->err_inject = 0;
1075         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1076                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1077                 OID_AUTO, "err_inject",
1078                 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
1079                 "Error to be injected\n"
1080                 "\t\t\t 0: No Errors\n"
1081                 "\t\t\t 1: rcv: rxb struct invalid\n"
1082                 "\t\t\t 2: rcv: mp == NULL\n"
1083                 "\t\t\t 3: lro: rxb struct invalid\n"
1084                 "\t\t\t 4: lro: mp == NULL\n"
1085                 "\t\t\t 5: rcv: num handles invalid\n"
1086                 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
1087                 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
1088                 "\t\t\t 8: mbx: mailbox command failure\n"
1089                 "\t\t\t 9: heartbeat failure\n"
1090                 "\t\t\t A: temperature failure\n"
1091                 "\t\t\t 11: m_getcl or m_getjcl failure\n"
1092                 "\t\t\t 13: Invalid Descriptor Count in SGL Receive\n"
1093                 "\t\t\t 14: Invalid Descriptor Count in LRO Receive\n"
1094                 "\t\t\t 15: peer port error recovery failure\n"
1095                 "\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" );
1096
1097         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1098                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1099                 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
1100                 (void *)ha, 0,
1101                 qla_sysctl_stop_pegs, "I", "Peg Stop");
1102
1103 #endif /* #ifdef QL_DBG */
1104
1105         ha->hw.user_pri_nic = 0;
1106         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1107                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1108                 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
1109                 ha->hw.user_pri_nic,
1110                 "VLAN Tag User Priority for Normal Ethernet Packets");
1111
1112         ha->hw.user_pri_iscsi = 4;
1113         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
1114                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1115                 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
1116                 ha->hw.user_pri_iscsi,
1117                 "VLAN Tag User Priority for iSCSI Packets");
1118
1119         qlnx_add_hw_stats_sysctls(ha);
1120         qlnx_add_drvr_stats_sysctls(ha);
1121
1122         return;
1123 }
1124
1125 void
1126 ql_hw_link_status(qla_host_t *ha)
1127 {
1128         device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
1129
1130         if (ha->hw.link_up) {
1131                 device_printf(ha->pci_dev, "link Up\n");
1132         } else {
1133                 device_printf(ha->pci_dev, "link Down\n");
1134         }
1135
1136         if (ha->hw.fduplex) {
1137                 device_printf(ha->pci_dev, "Full Duplex\n");
1138         } else {
1139                 device_printf(ha->pci_dev, "Half Duplex\n");
1140         }
1141
1142         if (ha->hw.autoneg) {
1143                 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1144         } else {
1145                 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1146         }
1147
1148         switch (ha->hw.link_speed) {
1149         case 0x710:
1150                 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1151                 break;
1152
1153         case 0x3E8:
1154                 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1155                 break;
1156
1157         case 0x64:
1158                 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1159                 break;
1160
1161         default:
1162                 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1163                 break;
1164         }
1165
1166         switch (ha->hw.module_type) {
1167
1168         case 0x01:
1169                 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1170                 break;
1171
1172         case 0x02:
1173                 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1174                 break;
1175
1176         case 0x03:
1177                 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1178                 break;
1179
1180         case 0x04:
1181                 device_printf(ha->pci_dev,
1182                         "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1183                         ha->hw.cable_length);
1184                 break;
1185
1186         case 0x05:
1187                 device_printf(ha->pci_dev, "Module Type 10GE Active"
1188                         " Limiting Copper(Compliant)[%d m]\n",
1189                         ha->hw.cable_length);
1190                 break;
1191
1192         case 0x06:
1193                 device_printf(ha->pci_dev,
1194                         "Module Type 10GE Passive Copper"
1195                         " (Legacy, Best Effort)[%d m]\n",
1196                         ha->hw.cable_length);
1197                 break;
1198
1199         case 0x07:
1200                 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1201                 break;
1202
1203         case 0x08:
1204                 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1205                 break;
1206
1207         case 0x09:
1208                 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1209                 break;
1210
1211         case 0x0A:
1212                 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1213                 break;
1214
1215         case 0x0B:
1216                 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1217                         "(Legacy, Best Effort)\n");
1218                 break;
1219
1220         default:
1221                 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1222                         ha->hw.module_type);
1223                 break;
1224         }
1225
1226         if (ha->hw.link_faults == 1)
1227                 device_printf(ha->pci_dev, "SFP Power Fault\n");
1228 }
1229
1230 /*
1231  * Name: ql_free_dma
1232  * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1233  */
1234 void
1235 ql_free_dma(qla_host_t *ha)
1236 {
1237         uint32_t i;
1238
1239         if (ha->hw.dma_buf.flags.sds_ring) {
1240                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1241                         ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1242                 }
1243                 ha->hw.dma_buf.flags.sds_ring = 0;
1244         }
1245
1246         if (ha->hw.dma_buf.flags.rds_ring) {
1247                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1248                         ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1249                 }
1250                 ha->hw.dma_buf.flags.rds_ring = 0;
1251         }
1252
1253         if (ha->hw.dma_buf.flags.tx_ring) {
1254                 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1255                 ha->hw.dma_buf.flags.tx_ring = 0;
1256         }
1257         ql_minidump_free(ha);
1258 }
1259
1260 /*
1261  * Name: ql_alloc_dma
1262  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1263  */
1264 int
1265 ql_alloc_dma(qla_host_t *ha)
1266 {
1267         device_t                dev;
1268         uint32_t                i, j, size, tx_ring_size;
1269         qla_hw_t                *hw;
1270         qla_hw_tx_cntxt_t       *tx_cntxt;
1271         uint8_t                 *vaddr;
1272         bus_addr_t              paddr;
1273
1274         dev = ha->pci_dev;
1275
1276         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1277
1278         hw = &ha->hw;
1279         /*
1280          * Allocate Transmit Ring
1281          */
1282         tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1283         size = (tx_ring_size * ha->hw.num_tx_rings);
1284
1285         hw->dma_buf.tx_ring.alignment = 8;
1286         hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1287         
1288         if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1289                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1290                 goto ql_alloc_dma_exit;
1291         }
1292
1293         vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1294         paddr = hw->dma_buf.tx_ring.dma_addr;
1295         
1296         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1297                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1298
1299                 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1300                 tx_cntxt->tx_ring_paddr = paddr;
1301
1302                 vaddr += tx_ring_size;
1303                 paddr += tx_ring_size;
1304         }
1305
1306         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1307                 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1308
1309                 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1310                 tx_cntxt->tx_cons_paddr = paddr;
1311
1312                 vaddr += sizeof (uint32_t);
1313                 paddr += sizeof (uint32_t);
1314         }
1315
1316         ha->hw.dma_buf.flags.tx_ring = 1;
1317
1318         QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1319                 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1320                 hw->dma_buf.tx_ring.dma_b));
1321         /*
1322          * Allocate Receive Descriptor Rings
1323          */
1324
1325         for (i = 0; i < hw->num_rds_rings; i++) {
1326
1327                 hw->dma_buf.rds_ring[i].alignment = 8;
1328                 hw->dma_buf.rds_ring[i].size =
1329                         (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1330
1331                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1332                         device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1333                                 __func__, i);
1334
1335                         for (j = 0; j < i; j++)
1336                                 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1337
1338                         goto ql_alloc_dma_exit;
1339                 }
1340                 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1341                         __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1342                         hw->dma_buf.rds_ring[i].dma_b));
1343         }
1344
1345         hw->dma_buf.flags.rds_ring = 1;
1346
1347         /*
1348          * Allocate Status Descriptor Rings
1349          */
1350
1351         for (i = 0; i < hw->num_sds_rings; i++) {
1352                 hw->dma_buf.sds_ring[i].alignment = 8;
1353                 hw->dma_buf.sds_ring[i].size =
1354                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1355
1356                 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1357                         device_printf(dev, "%s: sds ring alloc failed\n",
1358                                 __func__);
1359
1360                         for (j = 0; j < i; j++)
1361                                 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1362
1363                         goto ql_alloc_dma_exit;
1364                 }
1365                 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1366                         __func__, i,
1367                         (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1368                         hw->dma_buf.sds_ring[i].dma_b));
1369         }
1370         for (i = 0; i < hw->num_sds_rings; i++) {
1371                 hw->sds[i].sds_ring_base =
1372                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1373         }
1374
1375         hw->dma_buf.flags.sds_ring = 1;
1376
1377         return 0;
1378
1379 ql_alloc_dma_exit:
1380         ql_free_dma(ha);
1381         return -1;
1382 }
1383
1384 #define Q8_MBX_MSEC_DELAY       5000
1385
1386 static int
1387 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1388         uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1389 {
1390         uint32_t i;
1391         uint32_t data;
1392         int ret = 0;
1393         uint64_t start_usecs;
1394         uint64_t end_usecs;
1395         uint64_t msecs_200;
1396
1397         ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]);
1398
1399         if (ha->offline || ha->qla_initiate_recovery) {
1400                 ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0);
1401                 goto exit_qla_mbx_cmd;
1402         }
1403
1404         if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) &&
1405                 (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))||
1406                 !(ha->err_inject & ~0xFFFF))) {
1407                 ret = -3;
1408                 QL_INITIATE_RECOVERY(ha);
1409                 goto exit_qla_mbx_cmd;
1410         }
1411
1412         start_usecs = qla_get_usec_timestamp();
1413
1414         if (no_pause)
1415                 i = 1000;
1416         else
1417                 i = Q8_MBX_MSEC_DELAY;
1418
1419         while (i) {
1420
1421                 if (ha->qla_initiate_recovery) {
1422                         ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1423                         return (-1);
1424                 }
1425
1426                 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1427                 if (data == 0)
1428                         break;
1429                 if (no_pause) {
1430                         DELAY(1000);
1431                 } else {
1432                         qla_mdelay(__func__, 1);
1433                 }
1434                 i--;
1435         }
1436
1437         if (i == 0) {
1438                 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1439                         __func__, data);
1440                 ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0);
1441                 ret = -1;
1442                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++;
1443                 QL_INITIATE_RECOVERY(ha);
1444                 goto exit_qla_mbx_cmd;
1445         }
1446
1447         for (i = 0; i < n_hmbox; i++) {
1448                 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1449                 h_mbox++;
1450         }
1451
1452         WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1453
1454
1455         i = Q8_MBX_MSEC_DELAY;
1456         while (i) {
1457
1458                 if (ha->qla_initiate_recovery) {
1459                         ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1460                         return (-1);
1461                 }
1462
1463                 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1464
1465                 if ((data & 0x3) == 1) {
1466                         data = READ_REG32(ha, Q8_FW_MBOX0);
1467                         if ((data & 0xF000) != 0x8000)
1468                                 break;
1469                 }
1470                 if (no_pause) {
1471                         DELAY(1000);
1472                 } else {
1473                         qla_mdelay(__func__, 1);
1474                 }
1475                 i--;
1476         }
1477         if (i == 0) {
1478                 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1479                         __func__, data);
1480                 ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0);
1481                 ret = -2;
1482                 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++;
1483                 QL_INITIATE_RECOVERY(ha);
1484                 goto exit_qla_mbx_cmd;
1485         }
1486
1487         for (i = 0; i < n_fwmbox; i++) {
1488
1489                 if (ha->qla_initiate_recovery) {
1490                         ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
1491                         return (-1);
1492                 }
1493
1494                 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1495         }
1496
1497         WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1498         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1499
1500         end_usecs = qla_get_usec_timestamp();
1501
1502         if (end_usecs > start_usecs) {
1503                 msecs_200 = (end_usecs - start_usecs)/(1000 * 200);
1504
1505                 if (msecs_200 < 15) 
1506                         ha->hw.mbx_comp_msecs[msecs_200]++;
1507                 else if (msecs_200 < 20)
1508                         ha->hw.mbx_comp_msecs[15]++;
1509                 else {
1510                         device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__,
1511                                 start_usecs, end_usecs, msecs_200);
1512                         ha->hw.mbx_comp_msecs[16]++;
1513                 }
1514         }
1515         ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
1516
1517
1518 exit_qla_mbx_cmd:
1519         return (ret);
1520 }
1521
1522 int
1523 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1524         uint32_t *num_rcvq)
1525 {
1526         uint32_t *mbox, err;
1527         device_t dev = ha->pci_dev;
1528
1529         bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1530
1531         mbox = ha->hw.mbox;
1532
1533         mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 
1534
1535         if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1536                 device_printf(dev, "%s: failed0\n", __func__);
1537                 return (-1);
1538         }
1539         err = mbox[0] >> 25; 
1540
1541         if (supports_9kb != NULL) {
1542                 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1543                         *supports_9kb = 1;
1544                 else
1545                         *supports_9kb = 0;
1546         }
1547
1548         if (num_rcvq != NULL)
1549                 *num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1550
1551         if ((err != 1) && (err != 0)) {
1552                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1553                 return (-1);
1554         }
1555         return 0;
1556 }
1557
1558 static int
1559 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1560         uint32_t create)
1561 {
1562         uint32_t i, err;
1563         device_t dev = ha->pci_dev;
1564         q80_config_intr_t *c_intr;
1565         q80_config_intr_rsp_t *c_intr_rsp;
1566
1567         c_intr = (q80_config_intr_t *)ha->hw.mbox;
1568         bzero(c_intr, (sizeof (q80_config_intr_t)));
1569
1570         c_intr->opcode = Q8_MBX_CONFIG_INTR;
1571
1572         c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1573         c_intr->count_version |= Q8_MBX_CMD_VERSION;
1574
1575         c_intr->nentries = num_intrs;
1576
1577         for (i = 0; i < num_intrs; i++) {
1578                 if (create) {
1579                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1580                         c_intr->intr[i].msix_index = start_idx + 1 + i;
1581                 } else {
1582                         c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1583                         c_intr->intr[i].msix_index =
1584                                 ha->hw.intr_id[(start_idx + i)];
1585                 }
1586
1587                 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1588         }
1589
1590         if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1591                 (sizeof (q80_config_intr_t) >> 2),
1592                 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1593                 device_printf(dev, "%s: %s failed0\n", __func__,
1594                         (create ? "create" : "delete"));
1595                 return (-1);
1596         }
1597
1598         c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1599
1600         err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1601
1602         if (err) {
1603                 device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__,
1604                         (create ? "create" : "delete"), err, c_intr_rsp->nentries);
1605
1606                 for (i = 0; i < c_intr_rsp->nentries; i++) {
1607                         device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1608                                 __func__, i, 
1609                                 c_intr_rsp->intr[i].status,
1610                                 c_intr_rsp->intr[i].intr_id,
1611                                 c_intr_rsp->intr[i].intr_src);
1612                 }
1613
1614                 return (-1);
1615         }
1616
1617         for (i = 0; ((i < num_intrs) && create); i++) {
1618                 if (!c_intr_rsp->intr[i].status) {
1619                         ha->hw.intr_id[(start_idx + i)] =
1620                                 c_intr_rsp->intr[i].intr_id;
1621                         ha->hw.intr_src[(start_idx + i)] =
1622                                 c_intr_rsp->intr[i].intr_src;
1623                 }
1624         }
1625
1626         return (0);
1627 }
1628
1629 /*
1630  * Name: qla_config_rss
1631  * Function: Configure RSS for the context/interface.
1632  */
1633 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1634                         0x8030f20c77cb2da3ULL,
1635                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1636                         0x255b0ec26d5a56daULL };
1637
1638 static int
1639 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1640 {
1641         q80_config_rss_t        *c_rss;
1642         q80_config_rss_rsp_t    *c_rss_rsp;
1643         uint32_t                err, i;
1644         device_t                dev = ha->pci_dev;
1645
1646         c_rss = (q80_config_rss_t *)ha->hw.mbox;
1647         bzero(c_rss, (sizeof (q80_config_rss_t)));
1648
1649         c_rss->opcode = Q8_MBX_CONFIG_RSS;
1650
1651         c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1652         c_rss->count_version |= Q8_MBX_CMD_VERSION;
1653
1654         c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1655                                 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1656         //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1657         //                      Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1658
1659         c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1660         c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1661
1662         c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1663
1664         c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1665         c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1666
1667         c_rss->cntxt_id = cntxt_id;
1668
1669         for (i = 0; i < 5; i++) {
1670                 c_rss->rss_key[i] = rss_key[i];
1671         }
1672
1673         if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1674                 (sizeof (q80_config_rss_t) >> 2),
1675                 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1676                 device_printf(dev, "%s: failed0\n", __func__);
1677                 return (-1);
1678         }
1679         c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1680
1681         err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1682
1683         if (err) {
1684                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1685                 return (-1);
1686         }
1687         return 0;
1688 }
1689
1690 static int
1691 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1692         uint16_t cntxt_id, uint8_t *ind_table)
1693 {
1694         q80_config_rss_ind_table_t      *c_rss_ind;
1695         q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1696         uint32_t                        err;
1697         device_t                        dev = ha->pci_dev;
1698
1699         if ((count > Q8_RSS_IND_TBL_SIZE) ||
1700                 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1701                 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1702                         start_idx, count);
1703                 return (-1);
1704         }
1705
1706         c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1707         bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1708
1709         c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1710         c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1711         c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1712
1713         c_rss_ind->start_idx = start_idx;
1714         c_rss_ind->end_idx = start_idx + count - 1;
1715         c_rss_ind->cntxt_id = cntxt_id;
1716         bcopy(ind_table, c_rss_ind->ind_table, count);
1717
1718         if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1719                 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1720                 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1721                 device_printf(dev, "%s: failed0\n", __func__);
1722                 return (-1);
1723         }
1724
1725         c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1726         err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1727
1728         if (err) {
1729                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1730                 return (-1);
1731         }
1732         return 0;
1733 }
1734
1735 /*
1736  * Name: qla_config_intr_coalesce
1737  * Function: Configure Interrupt Coalescing.
1738  */
1739 static int
1740 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1741         int rcv)
1742 {
1743         q80_config_intr_coalesc_t       *intrc;
1744         q80_config_intr_coalesc_rsp_t   *intrc_rsp;
1745         uint32_t                        err, i;
1746         device_t                        dev = ha->pci_dev;
1747         
1748         intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1749         bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1750
1751         intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1752         intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1753         intrc->count_version |= Q8_MBX_CMD_VERSION;
1754
1755         if (rcv) {
1756                 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1757                 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1758                 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1759         } else {
1760                 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1761                 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1762                 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1763         }
1764
1765         intrc->cntxt_id = cntxt_id;
1766
1767         if (tenable) {
1768                 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1769                 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1770
1771                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1772                         intrc->sds_ring_mask |= (1 << i);
1773                 }
1774                 intrc->ms_timeout = 1000;
1775         }
1776
1777         if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1778                 (sizeof (q80_config_intr_coalesc_t) >> 2),
1779                 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1780                 device_printf(dev, "%s: failed0\n", __func__);
1781                 return (-1);
1782         }
1783         intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1784
1785         err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1786
1787         if (err) {
1788                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1789                 return (-1);
1790         }
1791         
1792         return 0;
1793 }
1794
1795
1796 /*
1797  * Name: qla_config_mac_addr
1798  * Function: binds a MAC address to the context/interface.
1799  *      Can be unicast, multicast or broadcast.
1800  */
1801 static int
1802 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1803         uint32_t num_mac)
1804 {
1805         q80_config_mac_addr_t           *cmac;
1806         q80_config_mac_addr_rsp_t       *cmac_rsp;
1807         uint32_t                        err;
1808         device_t                        dev = ha->pci_dev;
1809         int                             i;
1810         uint8_t                         *mac_cpy = mac_addr;
1811
1812         if (num_mac > Q8_MAX_MAC_ADDRS) {
1813                 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1814                         __func__, (add_mac ? "Add" : "Del"), num_mac);
1815                 return (-1);
1816         }
1817
1818         cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1819         bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1820
1821         cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1822         cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1823         cmac->count_version |= Q8_MBX_CMD_VERSION;
1824
1825         if (add_mac) 
1826                 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1827         else
1828                 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1829                 
1830         cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1831
1832         cmac->nmac_entries = num_mac;
1833         cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1834
1835         for (i = 0; i < num_mac; i++) {
1836                 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 
1837                 mac_addr = mac_addr + ETHER_ADDR_LEN;
1838         }
1839
1840         if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1841                 (sizeof (q80_config_mac_addr_t) >> 2),
1842                 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1843                 device_printf(dev, "%s: %s failed0\n", __func__,
1844                         (add_mac ? "Add" : "Del"));
1845                 return (-1);
1846         }
1847         cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1848
1849         err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1850
1851         if (err) {
1852                 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1853                         (add_mac ? "Add" : "Del"), err);
1854                 for (i = 0; i < num_mac; i++) {
1855                         device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1856                                 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1857                                 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1858                         mac_cpy += ETHER_ADDR_LEN;
1859                 }
1860                 return (-1);
1861         }
1862         
1863         return 0;
1864 }
1865
1866
1867 /*
1868  * Name: qla_set_mac_rcv_mode
1869  * Function: Enable/Disable AllMulticast and Promiscous Modes.
1870  */
1871 static int
1872 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1873 {
1874         q80_config_mac_rcv_mode_t       *rcv_mode;
1875         uint32_t                        err;
1876         q80_config_mac_rcv_mode_rsp_t   *rcv_mode_rsp;
1877         device_t                        dev = ha->pci_dev;
1878
1879         rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1880         bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1881
1882         rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1883         rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1884         rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1885
1886         rcv_mode->mode = mode;
1887
1888         rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1889
1890         if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1891                 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1892                 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1893                 device_printf(dev, "%s: failed0\n", __func__);
1894                 return (-1);
1895         }
1896         rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1897
1898         err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1899
1900         if (err) {
1901                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1902                 return (-1);
1903         }
1904         
1905         return 0;
1906 }
1907
1908 int
1909 ql_set_promisc(qla_host_t *ha)
1910 {
1911         int ret;
1912
1913         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1914         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1915         return (ret);
1916 }
1917
1918 void
1919 qla_reset_promisc(qla_host_t *ha)
1920 {
1921         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1922         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1923 }
1924
1925 int
1926 ql_set_allmulti(qla_host_t *ha)
1927 {
1928         int ret;
1929
1930         ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1931         ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1932         return (ret);
1933 }
1934
1935 void
1936 qla_reset_allmulti(qla_host_t *ha)
1937 {
1938         ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1939         (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1940 }
1941
1942 /*
1943  * Name: ql_set_max_mtu
1944  * Function:
1945  *      Sets the maximum transfer unit size for the specified rcv context.
1946  */
1947 int
1948 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1949 {
1950         device_t                dev;
1951         q80_set_max_mtu_t       *max_mtu;
1952         q80_set_max_mtu_rsp_t   *max_mtu_rsp;
1953         uint32_t                err;
1954
1955         dev = ha->pci_dev;
1956
1957         max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1958         bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1959
1960         max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1961         max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1962         max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1963
1964         max_mtu->cntxt_id = cntxt_id;
1965         max_mtu->mtu = mtu;
1966
1967         if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1968                 (sizeof (q80_set_max_mtu_t) >> 2),
1969                 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1970                 device_printf(dev, "%s: failed\n", __func__);
1971                 return -1;
1972         }
1973
1974         max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1975
1976         err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1977
1978         if (err) {
1979                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1980         }
1981
1982         return 0;
1983 }
1984
1985 static int
1986 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1987 {
1988         device_t                dev;
1989         q80_link_event_t        *lnk;
1990         q80_link_event_rsp_t    *lnk_rsp;
1991         uint32_t                err;
1992
1993         dev = ha->pci_dev;
1994
1995         lnk = (q80_link_event_t *)ha->hw.mbox;
1996         bzero(lnk, (sizeof (q80_link_event_t)));
1997
1998         lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1999         lnk->count_version = (sizeof (q80_link_event_t) >> 2);
2000         lnk->count_version |= Q8_MBX_CMD_VERSION;
2001
2002         lnk->cntxt_id = cntxt_id;
2003         lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
2004
2005         if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
2006                 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
2007                 device_printf(dev, "%s: failed\n", __func__);
2008                 return -1;
2009         }
2010
2011         lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
2012
2013         err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
2014
2015         if (err) {
2016                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2017         }
2018
2019         return 0;
2020 }
2021
2022 static int
2023 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
2024 {
2025         device_t                dev;
2026         q80_config_fw_lro_t     *fw_lro;
2027         q80_config_fw_lro_rsp_t *fw_lro_rsp;
2028         uint32_t                err;
2029
2030         dev = ha->pci_dev;
2031
2032         fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
2033         bzero(fw_lro, sizeof(q80_config_fw_lro_t));
2034
2035         fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
2036         fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
2037         fw_lro->count_version |= Q8_MBX_CMD_VERSION;
2038
2039         fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
2040         fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
2041
2042         fw_lro->cntxt_id = cntxt_id;
2043
2044         if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
2045                 (sizeof (q80_config_fw_lro_t) >> 2),
2046                 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
2047                 device_printf(dev, "%s: failed\n", __func__);
2048                 return -1;
2049         }
2050
2051         fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
2052
2053         err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
2054
2055         if (err) {
2056                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2057         }
2058
2059         return 0;
2060 }
2061
2062 static int
2063 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
2064 {
2065         device_t                dev;
2066         q80_hw_config_t         *hw_config;
2067         q80_hw_config_rsp_t     *hw_config_rsp;
2068         uint32_t                err;
2069
2070         dev = ha->pci_dev;
2071
2072         hw_config = (q80_hw_config_t *)ha->hw.mbox;
2073         bzero(hw_config, sizeof (q80_hw_config_t));
2074
2075         hw_config->opcode = Q8_MBX_HW_CONFIG;
2076         hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
2077         hw_config->count_version |= Q8_MBX_CMD_VERSION;
2078
2079         hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
2080
2081         hw_config->u.set_cam_search_mode.mode = search_mode;
2082
2083         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2084                 (sizeof (q80_hw_config_t) >> 2),
2085                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2086                 device_printf(dev, "%s: failed\n", __func__);
2087                 return -1;
2088         }
2089         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2090
2091         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2092
2093         if (err) {
2094                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2095         }
2096
2097         return 0;
2098 }
2099
2100 static int
2101 qla_get_cam_search_mode(qla_host_t *ha)
2102 {
2103         device_t                dev;
2104         q80_hw_config_t         *hw_config;
2105         q80_hw_config_rsp_t     *hw_config_rsp;
2106         uint32_t                err;
2107
2108         dev = ha->pci_dev;
2109
2110         hw_config = (q80_hw_config_t *)ha->hw.mbox;
2111         bzero(hw_config, sizeof (q80_hw_config_t));
2112
2113         hw_config->opcode = Q8_MBX_HW_CONFIG;
2114         hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
2115         hw_config->count_version |= Q8_MBX_CMD_VERSION;
2116
2117         hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
2118
2119         if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
2120                 (sizeof (q80_hw_config_t) >> 2),
2121                 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
2122                 device_printf(dev, "%s: failed\n", __func__);
2123                 return -1;
2124         }
2125         hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
2126
2127         err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
2128
2129         if (err) {
2130                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2131         } else {
2132                 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
2133                         hw_config_rsp->u.get_cam_search_mode.mode);
2134         }
2135
2136         return 0;
2137 }
2138
2139 static int
2140 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
2141 {
2142         device_t                dev;
2143         q80_get_stats_t         *stat;
2144         q80_get_stats_rsp_t     *stat_rsp;
2145         uint32_t                err;
2146
2147         dev = ha->pci_dev;
2148
2149         stat = (q80_get_stats_t *)ha->hw.mbox;
2150         bzero(stat, (sizeof (q80_get_stats_t)));
2151
2152         stat->opcode = Q8_MBX_GET_STATS;
2153         stat->count_version = 2;
2154         stat->count_version |= Q8_MBX_CMD_VERSION;
2155
2156         stat->cmd = cmd;
2157
2158         if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
2159                 ha->hw.mbox, (rsp_size >> 2), 0)) {
2160                 device_printf(dev, "%s: failed\n", __func__);
2161                 return -1;
2162         }
2163
2164         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2165
2166         err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
2167
2168         if (err) {
2169                 return -1;
2170         }
2171
2172         return 0;
2173 }
2174
2175 void
2176 ql_get_stats(qla_host_t *ha)
2177 {
2178         q80_get_stats_rsp_t     *stat_rsp;
2179         q80_mac_stats_t         *mstat;
2180         q80_xmt_stats_t         *xstat;
2181         q80_rcv_stats_t         *rstat;
2182         uint32_t                cmd;
2183         int                     i;
2184         struct ifnet *ifp = ha->ifp;
2185
2186         if (ifp == NULL)
2187                 return;
2188
2189         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2190                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2191                 return;
2192         }
2193
2194         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2195                 QLA_UNLOCK(ha, __func__);
2196                 return;
2197         }
2198
2199         stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2200         /*
2201          * Get MAC Statistics
2202          */
2203         cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2204 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2205
2206         cmd |= ((ha->pci_func & 0x1) << 16);
2207
2208         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2209                 ha->offline)
2210                 goto ql_get_stats_exit;
2211
2212         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2213                 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2214                 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2215         } else {
2216                 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2217                         __func__, ha->hw.mbox[0]);
2218         }
2219         /*
2220          * Get RCV Statistics
2221          */
2222         cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2223 //      cmd |= Q8_GET_STATS_CMD_CLEAR;
2224         cmd |= (ha->hw.rcv_cntxt_id << 16);
2225
2226         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2227                 ha->offline)
2228                 goto ql_get_stats_exit;
2229
2230         if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2231                 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2232                 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2233         } else {
2234                 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2235                         __func__, ha->hw.mbox[0]);
2236         }
2237
2238         if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2239                 ha->offline)
2240                 goto ql_get_stats_exit;
2241         /*
2242          * Get XMT Statistics
2243          */
2244         for (i = 0 ; (i < ha->hw.num_tx_rings); i++) {
2245                 if (ha->qla_watchdog_pause ||
2246                         (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
2247                         ha->offline)
2248                         goto ql_get_stats_exit;
2249
2250                 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2251 //              cmd |= Q8_GET_STATS_CMD_CLEAR;
2252                 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2253
2254                 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2255                         == 0) {
2256                         xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2257                         bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2258                 } else {
2259                         device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2260                                 __func__, ha->hw.mbox[0]);
2261                 }
2262         }
2263
2264 ql_get_stats_exit:
2265         QLA_UNLOCK(ha, __func__);
2266
2267         return;
2268 }
2269
2270 /*
2271  * Name: qla_tx_tso
2272  * Function: Checks if the packet to be transmitted is a candidate for
2273  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2274  *      Ring Structure are plugged in.
2275  */
2276 static int
2277 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2278 {
2279         struct ether_vlan_header *eh;
2280         struct ip *ip = NULL;
2281         struct ip6_hdr *ip6 = NULL;
2282         struct tcphdr *th = NULL;
2283         uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2284         uint16_t etype, opcode, offload = 1;
2285         device_t dev;
2286
2287         dev = ha->pci_dev;
2288
2289
2290         eh = mtod(mp, struct ether_vlan_header *);
2291
2292         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2293                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2294                 etype = ntohs(eh->evl_proto);
2295         } else {
2296                 ehdrlen = ETHER_HDR_LEN;
2297                 etype = ntohs(eh->evl_encap_proto);
2298         }
2299
2300         hdrlen = 0;
2301
2302         switch (etype) {
2303                 case ETHERTYPE_IP:
2304
2305                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
2306                                         sizeof(struct tcphdr);
2307
2308                         if (mp->m_len < tcp_opt_off) {
2309                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2310                                 ip = (struct ip *)(hdr + ehdrlen);
2311                         } else {
2312                                 ip = (struct ip *)(mp->m_data + ehdrlen);
2313                         }
2314
2315                         ip_hlen = ip->ip_hl << 2;
2316                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2317
2318                                 
2319                         if ((ip->ip_p != IPPROTO_TCP) ||
2320                                 (ip_hlen != sizeof (struct ip))){
2321                                 /* IP Options are not supported */
2322
2323                                 offload = 0;
2324                         } else
2325                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2326
2327                 break;
2328
2329                 case ETHERTYPE_IPV6:
2330
2331                         tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2332                                         sizeof (struct tcphdr);
2333
2334                         if (mp->m_len < tcp_opt_off) {
2335                                 m_copydata(mp, 0, tcp_opt_off, hdr);
2336                                 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2337                         } else {
2338                                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2339                         }
2340
2341                         ip_hlen = sizeof(struct ip6_hdr);
2342                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2343
2344                         if (ip6->ip6_nxt != IPPROTO_TCP) {
2345                                 //device_printf(dev, "%s: ipv6\n", __func__);
2346                                 offload = 0;
2347                         } else
2348                                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2349                 break;
2350
2351                 default:
2352                         QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2353                         offload = 0;
2354                 break;
2355         }
2356
2357         if (!offload)
2358                 return (-1);
2359
2360         tcp_hlen = th->th_off << 2;
2361         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2362
2363         if (mp->m_len < hdrlen) {
2364                 if (mp->m_len < tcp_opt_off) {
2365                         if (tcp_hlen > sizeof(struct tcphdr)) {
2366                                 m_copydata(mp, tcp_opt_off,
2367                                         (tcp_hlen - sizeof(struct tcphdr)),
2368                                         &hdr[tcp_opt_off]);
2369                         }
2370                 } else {
2371                         m_copydata(mp, 0, hdrlen, hdr);
2372                 }
2373         }
2374
2375         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2376
2377         tx_cmd->flags_opcode = opcode ;
2378         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2379         tx_cmd->total_hdr_len = hdrlen;
2380
2381         /* Check for Multicast least significant bit of MSB == 1 */
2382         if (eh->evl_dhost[0] & 0x01) {
2383                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2384         }
2385
2386         if (mp->m_len < hdrlen) {
2387                 printf("%d\n", hdrlen);
2388                 return (1);
2389         }
2390
2391         return (0);
2392 }
2393
2394 /*
2395  * Name: qla_tx_chksum
2396  * Function: Checks if the packet to be transmitted is a candidate for
2397  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2398  *      Ring Structure are plugged in.
2399  */
2400 static int
2401 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2402         uint32_t *tcp_hdr_off)
2403 {
2404         struct ether_vlan_header *eh;
2405         struct ip *ip;
2406         struct ip6_hdr *ip6;
2407         uint32_t ehdrlen, ip_hlen;
2408         uint16_t etype, opcode, offload = 1;
2409         device_t dev;
2410         uint8_t buf[sizeof(struct ip6_hdr)];
2411
2412         dev = ha->pci_dev;
2413
2414         *op_code = 0;
2415
2416         if ((mp->m_pkthdr.csum_flags &
2417                 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2418                 return (-1);
2419
2420         eh = mtod(mp, struct ether_vlan_header *);
2421
2422         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2423                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2424                 etype = ntohs(eh->evl_proto);
2425         } else {
2426                 ehdrlen = ETHER_HDR_LEN;
2427                 etype = ntohs(eh->evl_encap_proto);
2428         }
2429
2430                 
2431         switch (etype) {
2432                 case ETHERTYPE_IP:
2433                         ip = (struct ip *)(mp->m_data + ehdrlen);
2434
2435                         ip_hlen = sizeof (struct ip);
2436
2437                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2438                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2439                                 ip = (struct ip *)buf;
2440                         }
2441
2442                         if (ip->ip_p == IPPROTO_TCP)
2443                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2444                         else if (ip->ip_p == IPPROTO_UDP)
2445                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2446                         else {
2447                                 //device_printf(dev, "%s: ipv4\n", __func__);
2448                                 offload = 0;
2449                         }
2450                 break;
2451
2452                 case ETHERTYPE_IPV6:
2453                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2454
2455                         ip_hlen = sizeof(struct ip6_hdr);
2456
2457                         if (mp->m_len < (ehdrlen + ip_hlen)) {
2458                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2459                                         buf);
2460                                 ip6 = (struct ip6_hdr *)buf;
2461                         }
2462
2463                         if (ip6->ip6_nxt == IPPROTO_TCP)
2464                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2465                         else if (ip6->ip6_nxt == IPPROTO_UDP)
2466                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2467                         else {
2468                                 //device_printf(dev, "%s: ipv6\n", __func__);
2469                                 offload = 0;
2470                         }
2471                 break;
2472
2473                 default:
2474                         offload = 0;
2475                 break;
2476         }
2477         if (!offload)
2478                 return (-1);
2479
2480         *op_code = opcode;
2481         *tcp_hdr_off = (ip_hlen + ehdrlen);
2482
2483         return (0);
2484 }
2485
2486 #define QLA_TX_MIN_FREE 2
2487 /*
2488  * Name: ql_hw_send
2489  * Function: Transmits a packet. It first checks if the packet is a
2490  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2491  *      offload. If either of these creteria are not met, it is transmitted
2492  *      as a regular ethernet frame.
2493  */
2494 int
2495 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2496         uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2497 {
2498         struct ether_vlan_header *eh;
2499         qla_hw_t *hw = &ha->hw;
2500         q80_tx_cmd_t *tx_cmd, tso_cmd;
2501         bus_dma_segment_t *c_seg;
2502         uint32_t num_tx_cmds, hdr_len = 0;
2503         uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2504         device_t dev;
2505         int i, ret;
2506         uint8_t *src = NULL, *dst = NULL;
2507         uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2508         uint32_t op_code = 0;
2509         uint32_t tcp_hdr_off = 0;
2510
2511         dev = ha->pci_dev;
2512
2513         /*
2514          * Always make sure there is atleast one empty slot in the tx_ring
2515          * tx_ring is considered full when there only one entry available
2516          */
2517         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2518
2519         total_length = mp->m_pkthdr.len;
2520         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2521                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2522                         __func__, total_length);
2523                 return (EINVAL);
2524         }
2525         eh = mtod(mp, struct ether_vlan_header *);
2526
2527         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2528
2529                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2530
2531                 src = frame_hdr;
2532                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2533
2534                 if (!(ret & ~1)) {
2535                         /* find the additional tx_cmd descriptors required */
2536
2537                         if (mp->m_flags & M_VLANTAG)
2538                                 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2539
2540                         hdr_len = tso_cmd.total_hdr_len;
2541
2542                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2543                         bytes = QL_MIN(bytes, hdr_len);
2544
2545                         num_tx_cmds++;
2546                         hdr_len -= bytes;
2547
2548                         while (hdr_len) {
2549                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2550                                 hdr_len -= bytes;
2551                                 num_tx_cmds++;
2552                         }
2553                         hdr_len = tso_cmd.total_hdr_len;
2554
2555                         if (ret == 0)
2556                                 src = (uint8_t *)eh;
2557                 } else 
2558                         return (EINVAL);
2559         } else {
2560                 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2561         }
2562
2563         if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2564                 ql_hw_tx_done_locked(ha, txr_idx);
2565                 if (hw->tx_cntxt[txr_idx].txr_free <=
2566                                 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2567                         QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2568                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2569                                 __func__));
2570                         return (-1);
2571                 }
2572         }
2573
2574         for (i = 0; i < num_tx_cmds; i++) {
2575                 int j;
2576
2577                 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1);
2578
2579                 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
2580                         QL_ASSERT(ha, 0, \
2581                                 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\
2582                                 __func__, __LINE__, txr_idx, j,\
2583                                 ha->tx_ring[txr_idx].tx_buf[j].m_head));
2584                         return (EINVAL);
2585                 }
2586         }
2587
2588         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2589
2590         if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2591
2592                 if (nsegs > ha->hw.max_tx_segs)
2593                         ha->hw.max_tx_segs = nsegs;
2594
2595                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2596
2597                 if (op_code) {
2598                         tx_cmd->flags_opcode = op_code;
2599                         tx_cmd->tcp_hdr_off = tcp_hdr_off;
2600
2601                 } else {
2602                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2603                 }
2604         } else {
2605                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2606                 ha->tx_tso_frames++;
2607         }
2608
2609         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2610                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2611
2612                 if (iscsi_pdu)
2613                         eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2614
2615         } else if (mp->m_flags & M_VLANTAG) {
2616
2617                 if (hdr_len) { /* TSO */
2618                         tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2619                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2620                         tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2621                 } else
2622                         tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2623
2624                 ha->hw_vlan_tx_frames++;
2625                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2626
2627                 if (iscsi_pdu) {
2628                         tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2629                         mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2630                 }
2631         }
2632
2633
2634         tx_cmd->n_bufs = (uint8_t)nsegs;
2635         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2636         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2637         tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2638
2639         c_seg = segs;
2640
2641         while (1) {
2642                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2643
2644                         switch (i) {
2645                         case 0:
2646                                 tx_cmd->buf1_addr = c_seg->ds_addr;
2647                                 tx_cmd->buf1_len = c_seg->ds_len;
2648                                 break;
2649
2650                         case 1:
2651                                 tx_cmd->buf2_addr = c_seg->ds_addr;
2652                                 tx_cmd->buf2_len = c_seg->ds_len;
2653                                 break;
2654
2655                         case 2:
2656                                 tx_cmd->buf3_addr = c_seg->ds_addr;
2657                                 tx_cmd->buf3_len = c_seg->ds_len;
2658                                 break;
2659
2660                         case 3:
2661                                 tx_cmd->buf4_addr = c_seg->ds_addr;
2662                                 tx_cmd->buf4_len = c_seg->ds_len;
2663                                 break;
2664                         }
2665
2666                         c_seg++;
2667                         nsegs--;
2668                 }
2669
2670                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2671                         (hw->tx_cntxt[txr_idx].txr_next + 1) &
2672                                 (NUM_TX_DESCRIPTORS - 1);
2673                 tx_cmd_count++;
2674
2675                 if (!nsegs)
2676                         break;
2677                 
2678                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2679                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2680         }
2681
2682         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2683
2684                 /* TSO : Copy the header in the following tx cmd descriptors */
2685
2686                 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2687
2688                 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2689                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2690
2691                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2692                 bytes = QL_MIN(bytes, hdr_len);
2693
2694                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2695
2696                 if (mp->m_flags & M_VLANTAG) {
2697                         /* first copy the src/dst MAC addresses */
2698                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2699                         dst += (ETHER_ADDR_LEN * 2);
2700                         src += (ETHER_ADDR_LEN * 2);
2701                         
2702                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2703                         dst += 2;
2704                         *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2705                         dst += 2;
2706
2707                         /* bytes left in src header */
2708                         hdr_len -= ((ETHER_ADDR_LEN * 2) +
2709                                         ETHER_VLAN_ENCAP_LEN);
2710
2711                         /* bytes left in TxCmd Entry */
2712                         bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2713
2714
2715                         bcopy(src, dst, bytes);
2716                         src += bytes;
2717                         hdr_len -= bytes;
2718                 } else {
2719                         bcopy(src, dst, bytes);
2720                         src += bytes;
2721                         hdr_len -= bytes;
2722                 }
2723
2724                 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2725                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2726                                         (NUM_TX_DESCRIPTORS - 1);
2727                 tx_cmd_count++;
2728                 
2729                 while (hdr_len) {
2730                         tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2731                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2732
2733                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2734
2735                         bcopy(src, tx_cmd, bytes);
2736                         src += bytes;
2737                         hdr_len -= bytes;
2738
2739                         txr_next = hw->tx_cntxt[txr_idx].txr_next =
2740                                 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2741                                         (NUM_TX_DESCRIPTORS - 1);
2742                         tx_cmd_count++;
2743                 }
2744         }
2745
2746         hw->tx_cntxt[txr_idx].txr_free =
2747                 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2748
2749         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2750                 txr_idx);
2751         QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2752
2753         return (0);
2754 }
2755
2756
2757
2758 #define Q8_CONFIG_IND_TBL_SIZE  32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2759 static int
2760 qla_config_rss_ind_table(qla_host_t *ha)
2761 {
2762         uint32_t i, count;
2763         uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2764
2765
2766         for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2767                 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2768         }
2769
2770         for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2771                 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2772
2773                 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2774                         count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2775                 } else {
2776                         count = Q8_CONFIG_IND_TBL_SIZE;
2777                 }
2778
2779                 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2780                         rss_ind_tbl))
2781                         return (-1);
2782         }
2783
2784         return (0);
2785 }
2786
2787 static int
2788 qla_config_soft_lro(qla_host_t *ha)
2789 {
2790         int i;
2791         qla_hw_t *hw = &ha->hw;
2792         struct lro_ctrl *lro;
2793
2794         for (i = 0; i < hw->num_sds_rings; i++) {
2795                 lro = &hw->sds[i].lro;
2796
2797                 bzero(lro, sizeof(struct lro_ctrl));
2798
2799 #if (__FreeBSD_version >= 1100101)
2800                 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2801                         device_printf(ha->pci_dev,
2802                                 "%s: tcp_lro_init_args [%d] failed\n",
2803                                 __func__, i);
2804                         return (-1);
2805                 }
2806 #else
2807                 if (tcp_lro_init(lro)) {
2808                         device_printf(ha->pci_dev,
2809                                 "%s: tcp_lro_init [%d] failed\n",
2810                                 __func__, i);
2811                         return (-1);
2812                 }
2813 #endif /* #if (__FreeBSD_version >= 1100101) */
2814
2815                 lro->ifp = ha->ifp;
2816         }
2817
2818         QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2819         return (0);
2820 }
2821
2822 static void
2823 qla_drain_soft_lro(qla_host_t *ha)
2824 {
2825         int i;
2826         qla_hw_t *hw = &ha->hw;
2827         struct lro_ctrl *lro;
2828
2829         for (i = 0; i < hw->num_sds_rings; i++) {
2830                 lro = &hw->sds[i].lro;
2831
2832 #if (__FreeBSD_version >= 1100101)
2833                 tcp_lro_flush_all(lro);
2834 #else
2835                 struct lro_entry *queued;
2836
2837                 while ((!SLIST_EMPTY(&lro->lro_active))) {
2838                         queued = SLIST_FIRST(&lro->lro_active);
2839                         SLIST_REMOVE_HEAD(&lro->lro_active, next);
2840                         tcp_lro_flush(lro, queued);
2841                 }
2842 #endif /* #if (__FreeBSD_version >= 1100101) */
2843         }
2844
2845         return;
2846 }
2847
2848 static void
2849 qla_free_soft_lro(qla_host_t *ha)
2850 {
2851         int i;
2852         qla_hw_t *hw = &ha->hw;
2853         struct lro_ctrl *lro;
2854
2855         for (i = 0; i < hw->num_sds_rings; i++) {
2856                 lro = &hw->sds[i].lro;
2857                 tcp_lro_free(lro);
2858         }
2859
2860         return;
2861 }
2862
2863
2864 /*
2865  * Name: ql_del_hw_if
2866  * Function: Destroys the hardware specific entities corresponding to an
2867  *      Ethernet Interface
2868  */
2869 void
2870 ql_del_hw_if(qla_host_t *ha)
2871 {
2872         uint32_t i;
2873         uint32_t num_msix;
2874
2875         (void)qla_stop_nic_func(ha);
2876
2877         qla_del_rcv_cntxt(ha);
2878
2879         if(qla_del_xmt_cntxt(ha))
2880                 goto ql_del_hw_if_exit;
2881
2882         if (ha->hw.flags.init_intr_cnxt) {
2883                 for (i = 0; i < ha->hw.num_sds_rings; ) {
2884
2885                         if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2886                                 num_msix = Q8_MAX_INTR_VECTORS;
2887                         else
2888                                 num_msix = ha->hw.num_sds_rings - i;
2889
2890                         if (qla_config_intr_cntxt(ha, i, num_msix, 0))
2891                                 break;
2892
2893                         i += num_msix;
2894                 }
2895
2896                 ha->hw.flags.init_intr_cnxt = 0;
2897         }
2898
2899 ql_del_hw_if_exit:
2900         if (ha->hw.enable_soft_lro) {
2901                 qla_drain_soft_lro(ha);
2902                 qla_free_soft_lro(ha);
2903         }
2904
2905         return;
2906 }
2907
2908 void
2909 qla_confirm_9kb_enable(qla_host_t *ha)
2910 {
2911 //      uint32_t supports_9kb = 0;
2912
2913         ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2914
2915         /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2916         WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2917         WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2918
2919 #if 0
2920         qla_get_nic_partition(ha, &supports_9kb, NULL);
2921
2922         if (!supports_9kb)
2923 #endif
2924         ha->hw.enable_9kb = 0;
2925
2926         return;
2927 }
2928
2929 /*
2930  * Name: ql_init_hw_if
2931  * Function: Creates the hardware specific entities corresponding to an
2932  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2933  *      corresponding to the interface. Enables LRO if allowed.
2934  */
2935 int
2936 ql_init_hw_if(qla_host_t *ha)
2937 {
2938         device_t        dev;
2939         uint32_t        i;
2940         uint8_t         bcast_mac[6];
2941         qla_rdesc_t     *rdesc;
2942         uint32_t        num_msix;
2943
2944         dev = ha->pci_dev;
2945
2946         for (i = 0; i < ha->hw.num_sds_rings; i++) {
2947                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2948                         ha->hw.dma_buf.sds_ring[i].size);
2949         }
2950
2951         for (i = 0; i < ha->hw.num_sds_rings; ) {
2952
2953                 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2954                         num_msix = Q8_MAX_INTR_VECTORS;
2955                 else
2956                         num_msix = ha->hw.num_sds_rings - i;
2957
2958                 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2959
2960                         if (i > 0) {
2961
2962                                 num_msix = i;
2963
2964                                 for (i = 0; i < num_msix; ) {
2965                                         qla_config_intr_cntxt(ha, i,
2966                                                 Q8_MAX_INTR_VECTORS, 0);
2967                                         i += Q8_MAX_INTR_VECTORS;
2968                                 }
2969                         }
2970                         return (-1);
2971                 }
2972
2973                 i = i + num_msix;
2974         }
2975
2976         ha->hw.flags.init_intr_cnxt = 1;
2977
2978         /*
2979          * Create Receive Context
2980          */
2981         if (qla_init_rcv_cntxt(ha)) {
2982                 return (-1);
2983         }
2984
2985         for (i = 0; i < ha->hw.num_rds_rings; i++) {
2986                 rdesc = &ha->hw.rds[i];
2987                 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2988                 rdesc->rx_in = 0;
2989                 /* Update the RDS Producer Indices */
2990                 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2991                         rdesc->rx_next);
2992         }
2993
2994         /*
2995          * Create Transmit Context
2996          */
2997         if (qla_init_xmt_cntxt(ha)) {
2998                 qla_del_rcv_cntxt(ha);
2999                 return (-1);
3000         }
3001         ha->hw.max_tx_segs = 0;
3002
3003         if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
3004                 return(-1);
3005
3006         ha->hw.flags.unicast_mac = 1;
3007
3008         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3009         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3010
3011         if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
3012                 return (-1);
3013
3014         ha->hw.flags.bcast_mac = 1;
3015
3016         /*
3017          * program any cached multicast addresses
3018          */
3019         if (qla_hw_add_all_mcast(ha))
3020                 return (-1);
3021
3022         if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
3023                 return (-1);
3024
3025         if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
3026                 return (-1);
3027
3028         if (qla_config_rss_ind_table(ha))
3029                 return (-1);
3030
3031         if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
3032                 return (-1);
3033
3034         if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
3035                 return (-1);
3036
3037         if (ha->ifp->if_capenable & IFCAP_LRO) {
3038                 if (ha->hw.enable_hw_lro) {
3039                         ha->hw.enable_soft_lro = 0;
3040
3041                         if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
3042                                 return (-1);
3043                 } else {
3044                         ha->hw.enable_soft_lro = 1;
3045
3046                         if (qla_config_soft_lro(ha))
3047                                 return (-1);
3048                 }
3049         }
3050
3051         if (qla_init_nic_func(ha))
3052                 return (-1);
3053
3054         if (qla_query_fw_dcbx_caps(ha))
3055                 return (-1);
3056
3057         for (i = 0; i < ha->hw.num_sds_rings; i++)
3058                 QL_ENABLE_INTERRUPTS(ha, i);
3059
3060         return (0);
3061 }
3062
3063 static int
3064 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
3065 {
3066         device_t                dev = ha->pci_dev;
3067         q80_rq_map_sds_to_rds_t *map_rings;
3068         q80_rsp_map_sds_to_rds_t *map_rings_rsp;
3069         uint32_t                i, err;
3070         qla_hw_t                *hw = &ha->hw;
3071
3072         map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
3073         bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
3074
3075         map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
3076         map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
3077         map_rings->count_version |= Q8_MBX_CMD_VERSION;
3078
3079         map_rings->cntxt_id = hw->rcv_cntxt_id;
3080         map_rings->num_rings = num_idx;
3081
3082         for (i = 0; i < num_idx; i++) {
3083                 map_rings->sds_rds[i].sds_ring = i + start_idx;
3084                 map_rings->sds_rds[i].rds_ring = i + start_idx;
3085         }
3086
3087         if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
3088                 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
3089                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3090                 device_printf(dev, "%s: failed0\n", __func__);
3091                 return (-1);
3092         }
3093
3094         map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
3095
3096         err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
3097
3098         if (err) {
3099                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3100                 return (-1);
3101         }
3102
3103         return (0);
3104 }
3105
3106 /*
3107  * Name: qla_init_rcv_cntxt
3108  * Function: Creates the Receive Context.
3109  */
3110 static int
3111 qla_init_rcv_cntxt(qla_host_t *ha)
3112 {
3113         q80_rq_rcv_cntxt_t      *rcntxt;
3114         q80_rsp_rcv_cntxt_t     *rcntxt_rsp;
3115         q80_stat_desc_t         *sdesc;
3116         int                     i, j;
3117         qla_hw_t                *hw = &ha->hw;
3118         device_t                dev;
3119         uint32_t                err;
3120         uint32_t                rcntxt_sds_rings;
3121         uint32_t                rcntxt_rds_rings;
3122         uint32_t                max_idx;
3123
3124         dev = ha->pci_dev;
3125
3126         /*
3127          * Create Receive Context
3128          */
3129
3130         for (i = 0; i < hw->num_sds_rings; i++) {
3131                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
3132
3133                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
3134                         sdesc->data[0] = 1ULL;
3135                         sdesc->data[1] = 1ULL;
3136                 }
3137         }
3138
3139         rcntxt_sds_rings = hw->num_sds_rings;
3140         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
3141                 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
3142
3143         rcntxt_rds_rings = hw->num_rds_rings;
3144
3145         if (hw->num_rds_rings > MAX_RDS_RING_SETS)
3146                 rcntxt_rds_rings = MAX_RDS_RING_SETS;
3147
3148         rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
3149         bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
3150
3151         rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
3152         rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
3153         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3154
3155         rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
3156                         Q8_RCV_CNTXT_CAP0_LRO |
3157                         Q8_RCV_CNTXT_CAP0_HW_LRO |
3158                         Q8_RCV_CNTXT_CAP0_RSS |
3159                         Q8_RCV_CNTXT_CAP0_SGL_LRO;
3160
3161         if (ha->hw.enable_9kb)
3162                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
3163         else
3164                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
3165
3166         if (ha->hw.num_rds_rings > 1) {
3167                 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
3168                 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
3169         } else
3170                 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
3171
3172         rcntxt->nsds_rings = rcntxt_sds_rings;
3173
3174         rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
3175
3176         rcntxt->rcv_vpid = 0;
3177
3178         for (i = 0; i <  rcntxt_sds_rings; i++) {
3179                 rcntxt->sds[i].paddr =
3180                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
3181                 rcntxt->sds[i].size =
3182                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3183                 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
3184                 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
3185         }
3186
3187         for (i = 0; i <  rcntxt_rds_rings; i++) {
3188                 rcntxt->rds[i].paddr_std =
3189                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
3190
3191                 if (ha->hw.enable_9kb)
3192                         rcntxt->rds[i].std_bsize =
3193                                 qla_host_to_le64(MJUM9BYTES);
3194                 else
3195                         rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3196
3197                 rcntxt->rds[i].std_nentries =
3198                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3199         }
3200
3201         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3202                 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
3203                 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
3204                 device_printf(dev, "%s: failed0\n", __func__);
3205                 return (-1);
3206         }
3207
3208         rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
3209
3210         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3211
3212         if (err) {
3213                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3214                 return (-1);
3215         }
3216
3217         for (i = 0; i <  rcntxt_sds_rings; i++) {
3218                 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3219         }
3220
3221         for (i = 0; i <  rcntxt_rds_rings; i++) {
3222                 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3223         }
3224
3225         hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3226
3227         ha->hw.flags.init_rx_cnxt = 1;
3228
3229         if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3230
3231                 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3232
3233                         if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3234                                 max_idx = MAX_RCNTXT_SDS_RINGS;
3235                         else
3236                                 max_idx = hw->num_sds_rings - i;
3237
3238                         err = qla_add_rcv_rings(ha, i, max_idx);
3239                         if (err)
3240                                 return -1;
3241
3242                         i += max_idx;
3243                 }
3244         }
3245
3246         if (hw->num_rds_rings > 1) {
3247
3248                 for (i = 0; i < hw->num_rds_rings; ) {
3249
3250                         if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3251                                 max_idx = MAX_SDS_TO_RDS_MAP;
3252                         else
3253                                 max_idx = hw->num_rds_rings - i;
3254
3255                         err = qla_map_sds_to_rds(ha, i, max_idx);
3256                         if (err)
3257                                 return -1;
3258
3259                         i += max_idx;
3260                 }
3261         }
3262
3263         return (0);
3264 }
3265
3266 static int
3267 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3268 {
3269         device_t                dev = ha->pci_dev;
3270         q80_rq_add_rcv_rings_t  *add_rcv;
3271         q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3272         uint32_t                i,j, err;
3273         qla_hw_t                *hw = &ha->hw;
3274
3275         add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3276         bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3277
3278         add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3279         add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3280         add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3281
3282         add_rcv->nrds_sets_rings = nsds | (1 << 5);
3283         add_rcv->nsds_rings = nsds;
3284         add_rcv->cntxt_id = hw->rcv_cntxt_id;
3285
3286         for (i = 0; i <  nsds; i++) {
3287
3288                 j = i + sds_idx;
3289
3290                 add_rcv->sds[i].paddr =
3291                         qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3292
3293                 add_rcv->sds[i].size =
3294                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3295
3296                 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3297                 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3298
3299         }
3300
3301         for (i = 0; (i <  nsds); i++) {
3302                 j = i + sds_idx;
3303
3304                 add_rcv->rds[i].paddr_std =
3305                         qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3306
3307                 if (ha->hw.enable_9kb)
3308                         add_rcv->rds[i].std_bsize =
3309                                 qla_host_to_le64(MJUM9BYTES);
3310                 else
3311                         add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3312
3313                 add_rcv->rds[i].std_nentries =
3314                         qla_host_to_le32(NUM_RX_DESCRIPTORS);
3315         }
3316
3317
3318         if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3319                 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3320                 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3321                 device_printf(dev, "%s: failed0\n", __func__);
3322                 return (-1);
3323         }
3324
3325         add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3326
3327         err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3328
3329         if (err) {
3330                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3331                 return (-1);
3332         }
3333
3334         for (i = 0; i < nsds; i++) {
3335                 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3336         }
3337
3338         for (i = 0; i < nsds; i++) {
3339                 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3340         }
3341
3342         return (0);
3343 }
3344
3345 /*
3346  * Name: qla_del_rcv_cntxt
3347  * Function: Destroys the Receive Context.
3348  */
3349 static void
3350 qla_del_rcv_cntxt(qla_host_t *ha)
3351 {
3352         device_t                        dev = ha->pci_dev;
3353         q80_rcv_cntxt_destroy_t         *rcntxt;
3354         q80_rcv_cntxt_destroy_rsp_t     *rcntxt_rsp;
3355         uint32_t                        err;
3356         uint8_t                         bcast_mac[6];
3357
3358         if (!ha->hw.flags.init_rx_cnxt)
3359                 return;
3360
3361         if (qla_hw_del_all_mcast(ha))
3362                 return;
3363
3364         if (ha->hw.flags.bcast_mac) {
3365
3366                 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3367                 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3368
3369                 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3370                         return;
3371                 ha->hw.flags.bcast_mac = 0;
3372
3373         }
3374
3375         if (ha->hw.flags.unicast_mac) {
3376                 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3377                         return;
3378                 ha->hw.flags.unicast_mac = 0;
3379         }
3380
3381         rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3382         bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3383
3384         rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3385         rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3386         rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3387
3388         rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3389
3390         if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3391                 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3392                 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3393                 device_printf(dev, "%s: failed0\n", __func__);
3394                 return;
3395         }
3396         rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3397
3398         err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3399
3400         if (err) {
3401                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3402         }
3403
3404         ha->hw.flags.init_rx_cnxt = 0;
3405         return;
3406 }
3407
3408 /*
3409  * Name: qla_init_xmt_cntxt
3410  * Function: Creates the Transmit Context.
3411  */
3412 static int
3413 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3414 {
3415         device_t                dev;
3416         qla_hw_t                *hw = &ha->hw;
3417         q80_rq_tx_cntxt_t       *tcntxt;
3418         q80_rsp_tx_cntxt_t      *tcntxt_rsp;
3419         uint32_t                err;
3420         qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3421         uint32_t                intr_idx;
3422
3423         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3424
3425         dev = ha->pci_dev;
3426
3427         /*
3428          * Create Transmit Context
3429          */
3430         tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3431         bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3432
3433         tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3434         tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3435         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3436
3437         intr_idx = txr_idx;
3438
3439 #ifdef QL_ENABLE_ISCSI_TLV
3440
3441         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3442                                 Q8_TX_CNTXT_CAP0_TC;
3443
3444         if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3445                 tcntxt->traffic_class = 1;
3446         }
3447
3448         intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3449
3450 #else
3451         tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3452
3453 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3454
3455         tcntxt->ntx_rings = 1;
3456
3457         tcntxt->tx_ring[0].paddr =
3458                 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3459         tcntxt->tx_ring[0].tx_consumer =
3460                 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3461         tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3462
3463         tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3464         tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3465
3466         hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3467         hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3468         *hw_tx_cntxt->tx_cons = 0;
3469
3470         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3471                 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3472                 ha->hw.mbox,
3473                 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3474                 device_printf(dev, "%s: failed0\n", __func__);
3475                 return (-1);
3476         }
3477         tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3478
3479         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3480
3481         if (err) {
3482                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3483                 return -1;
3484         }
3485
3486         hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3487         hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3488
3489         if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3490                 return (-1);
3491
3492         return (0);
3493 }
3494
3495
3496 /*
3497  * Name: qla_del_xmt_cntxt
3498  * Function: Destroys the Transmit Context.
3499  */
3500 static int
3501 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3502 {
3503         device_t                        dev = ha->pci_dev;
3504         q80_tx_cntxt_destroy_t          *tcntxt;
3505         q80_tx_cntxt_destroy_rsp_t      *tcntxt_rsp;
3506         uint32_t                        err;
3507
3508         tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3509         bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3510
3511         tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3512         tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3513         tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3514
3515         tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3516
3517         if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3518                 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3519                 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3520                 device_printf(dev, "%s: failed0\n", __func__);
3521                 return (-1);
3522         }
3523         tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3524
3525         err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3526
3527         if (err) {
3528                 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3529                 return (-1);
3530         }
3531
3532         return (0);
3533 }
3534 static int
3535 qla_del_xmt_cntxt(qla_host_t *ha)
3536 {
3537         uint32_t i;
3538         int ret = 0;
3539
3540         if (!ha->hw.flags.init_tx_cnxt)
3541                 return (ret);
3542
3543         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3544                 if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0)
3545                         break;
3546         }
3547         ha->hw.flags.init_tx_cnxt = 0;
3548
3549         return (ret);
3550 }
3551
3552 static int
3553 qla_init_xmt_cntxt(qla_host_t *ha)
3554 {
3555         uint32_t i, j;
3556
3557         for (i = 0; i < ha->hw.num_tx_rings; i++) {
3558                 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3559                         for (j = 0; j < i; j++) {
3560                                 if (qla_del_xmt_cntxt_i(ha, j))
3561                                         break;
3562                         }
3563                         return (-1);
3564                 }
3565         }
3566         ha->hw.flags.init_tx_cnxt = 1;
3567         return (0);
3568 }
3569
3570 static int
3571 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3572 {
3573         int i, nmcast;
3574         uint32_t count = 0;
3575         uint8_t *mcast;
3576
3577         nmcast = ha->hw.nmcast;
3578
3579         QL_DPRINT2(ha, (ha->pci_dev,
3580                 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3581
3582         mcast = ha->hw.mac_addr_arr;
3583         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3584
3585         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3586                 if ((ha->hw.mcast[i].addr[0] != 0) || 
3587                         (ha->hw.mcast[i].addr[1] != 0) ||
3588                         (ha->hw.mcast[i].addr[2] != 0) ||
3589                         (ha->hw.mcast[i].addr[3] != 0) ||
3590                         (ha->hw.mcast[i].addr[4] != 0) ||
3591                         (ha->hw.mcast[i].addr[5] != 0)) {
3592
3593                         bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3594                         mcast = mcast + ETHER_ADDR_LEN;
3595                         count++;
3596
3597                         device_printf(ha->pci_dev,
3598                                 "%s: %x:%x:%x:%x:%x:%x \n",
3599                                 __func__, ha->hw.mcast[i].addr[0],
3600                                 ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2],
3601                                 ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4],
3602                                 ha->hw.mcast[i].addr[5]);
3603                         
3604                         if (count == Q8_MAX_MAC_ADDRS) {
3605                                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3606                                         add_mcast, count)) {
3607                                         device_printf(ha->pci_dev,
3608                                                 "%s: failed\n", __func__);
3609                                         return (-1);
3610                                 }
3611
3612                                 count = 0;
3613                                 mcast = ha->hw.mac_addr_arr;
3614                                 memset(mcast, 0,
3615                                         (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3616                         }
3617
3618                         nmcast--;
3619                 }
3620         }
3621
3622         if (count) {
3623                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3624                         count)) {
3625                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3626                         return (-1);
3627                 }
3628         }
3629         QL_DPRINT2(ha, (ha->pci_dev,
3630                 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3631
3632         return 0;
3633 }
3634
3635 static int
3636 qla_hw_add_all_mcast(qla_host_t *ha)
3637 {
3638         int ret;
3639
3640         ret = qla_hw_all_mcast(ha, 1);
3641
3642         return (ret);
3643 }
3644
3645 int
3646 qla_hw_del_all_mcast(qla_host_t *ha)
3647 {
3648         int ret;
3649
3650         ret = qla_hw_all_mcast(ha, 0);
3651
3652         bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3653         ha->hw.nmcast = 0;
3654
3655         return (ret);
3656 }
3657
3658 static int
3659 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3660 {
3661         int i;
3662
3663         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3664                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3665                         return (0); /* its been already added */
3666         }
3667         return (-1);
3668 }
3669
3670 static int
3671 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3672 {
3673         int i;
3674
3675         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3676
3677                 if ((ha->hw.mcast[i].addr[0] == 0) && 
3678                         (ha->hw.mcast[i].addr[1] == 0) &&
3679                         (ha->hw.mcast[i].addr[2] == 0) &&
3680                         (ha->hw.mcast[i].addr[3] == 0) &&
3681                         (ha->hw.mcast[i].addr[4] == 0) &&
3682                         (ha->hw.mcast[i].addr[5] == 0)) {
3683
3684                         bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3685                         ha->hw.nmcast++;        
3686
3687                         mta = mta + ETHER_ADDR_LEN;
3688                         nmcast--;
3689
3690                         if (nmcast == 0)
3691                                 break;
3692                 }
3693
3694         }
3695         return 0;
3696 }
3697
3698 static int
3699 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3700 {
3701         int i;
3702
3703         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3704                 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3705
3706                         ha->hw.mcast[i].addr[0] = 0;
3707                         ha->hw.mcast[i].addr[1] = 0;
3708                         ha->hw.mcast[i].addr[2] = 0;
3709                         ha->hw.mcast[i].addr[3] = 0;
3710                         ha->hw.mcast[i].addr[4] = 0;
3711                         ha->hw.mcast[i].addr[5] = 0;
3712
3713                         ha->hw.nmcast--;        
3714
3715                         mta = mta + ETHER_ADDR_LEN;
3716                         nmcast--;
3717
3718                         if (nmcast == 0)
3719                                 break;
3720                 }
3721         }
3722         return 0;
3723 }
3724
3725 /*
3726  * Name: ql_hw_set_multi
3727  * Function: Sets the Multicast Addresses provided by the host O.S into the
3728  *      hardware (for the given interface)
3729  */
3730 int
3731 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3732         uint32_t add_mac)
3733 {
3734         uint8_t *mta = mcast_addr;
3735         int i;
3736         int ret = 0;
3737         uint32_t count = 0;
3738         uint8_t *mcast;
3739
3740         mcast = ha->hw.mac_addr_arr;
3741         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3742
3743         for (i = 0; i < mcnt; i++) {
3744                 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3745                         if (add_mac) {
3746                                 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3747                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3748                                         mcast = mcast + ETHER_ADDR_LEN;
3749                                         count++;
3750                                 }
3751                         } else {
3752                                 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3753                                         bcopy(mta, mcast, ETHER_ADDR_LEN);
3754                                         mcast = mcast + ETHER_ADDR_LEN;
3755                                         count++;
3756                                 }
3757                         }
3758                 }
3759                 if (count == Q8_MAX_MAC_ADDRS) {
3760                         if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3761                                 add_mac, count)) {
3762                                 device_printf(ha->pci_dev, "%s: failed\n",
3763                                         __func__);
3764                                 return (-1);
3765                         }
3766
3767                         if (add_mac) {
3768                                 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3769                                         count);
3770                         } else {
3771                                 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3772                                         count);
3773                         }
3774
3775                         count = 0;
3776                         mcast = ha->hw.mac_addr_arr;
3777                         memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3778                 }
3779                         
3780                 mta += Q8_MAC_ADDR_LEN;
3781         }
3782
3783         if (count) {
3784                 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3785                         count)) {
3786                         device_printf(ha->pci_dev, "%s: failed\n", __func__);
3787                         return (-1);
3788                 }
3789                 if (add_mac) {
3790                         qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3791                 } else {
3792                         qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3793                 }
3794         }
3795
3796         return (ret);
3797 }
3798
3799 /*
3800  * Name: ql_hw_tx_done_locked
3801  * Function: Handle Transmit Completions
3802  */
3803 void
3804 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3805 {
3806         qla_tx_buf_t *txb;
3807         qla_hw_t *hw = &ha->hw;
3808         uint32_t comp_idx, comp_count = 0;
3809         qla_hw_tx_cntxt_t *hw_tx_cntxt;
3810
3811         hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3812
3813         /* retrieve index of last entry in tx ring completed */
3814         comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3815
3816         while (comp_idx != hw_tx_cntxt->txr_comp) {
3817
3818                 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3819
3820                 hw_tx_cntxt->txr_comp++;
3821                 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3822                         hw_tx_cntxt->txr_comp = 0;
3823
3824                 comp_count++;
3825
3826                 if (txb->m_head) {
3827                         ha->ifp->if_opackets++;
3828
3829                         bus_dmamap_sync(ha->tx_tag, txb->map,
3830                                 BUS_DMASYNC_POSTWRITE);
3831                         bus_dmamap_unload(ha->tx_tag, txb->map);
3832                         m_freem(txb->m_head);
3833
3834                         txb->m_head = NULL;
3835                 }
3836         }
3837
3838         hw_tx_cntxt->txr_free += comp_count;
3839
3840         if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS)
3841                 device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d"
3842                         "txr_next = %d txr_comp = %d\n", __func__, __LINE__,
3843                         txr_idx, hw_tx_cntxt->txr_free,
3844                         hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp);
3845
3846         QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \
3847                 ("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
3848                 __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
3849                 hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
3850         
3851         return;
3852 }
3853
3854 void
3855 ql_update_link_state(qla_host_t *ha)
3856 {
3857         uint32_t link_state = 0;
3858         uint32_t prev_link_state;
3859
3860         prev_link_state =  ha->hw.link_up;
3861
3862         if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) {
3863                 link_state = READ_REG32(ha, Q8_LINK_STATE);
3864
3865                 if (ha->pci_func == 0) {
3866                         link_state = (((link_state & 0xF) == 1)? 1 : 0);
3867                 } else {
3868                         link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3869                 }
3870         }
3871
3872         atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state);
3873
3874         if (prev_link_state !=  ha->hw.link_up) {
3875                 if (ha->hw.link_up) {
3876                         if_link_state_change(ha->ifp, LINK_STATE_UP);
3877                 } else {
3878                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3879                 }
3880         }
3881         return;
3882 }
3883
3884 int
3885 ql_hw_check_health(qla_host_t *ha)
3886 {
3887         uint32_t val;
3888
3889         ha->hw.health_count++;
3890
3891         if (ha->hw.health_count < 500)
3892                 return 0;
3893
3894         ha->hw.health_count = 0;
3895
3896         val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3897
3898         if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3899                 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3900                 device_printf(ha->pci_dev, "%s: Temperature Alert"
3901                         " at ts_usecs %ld ts_reg = 0x%08x\n",
3902                         __func__, qla_get_usec_timestamp(), val);
3903
3904                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE)
3905                         ha->hw.sp_log_stop = -1;
3906
3907                 QL_INITIATE_RECOVERY(ha);
3908                 return -1;
3909         }
3910
3911         val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3912
3913         if ((val != ha->hw.hbeat_value) &&
3914                 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3915                 ha->hw.hbeat_value = val;
3916                 ha->hw.hbeat_failure = 0;
3917                 return 0;
3918         }
3919
3920         ha->hw.hbeat_failure++;
3921
3922         
3923         if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3924                 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3925                         __func__, val);
3926         if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3927                 return 0;
3928         else {
3929                 uint32_t peg_halt_status1;
3930                 uint32_t peg_halt_status2;
3931
3932                 peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1);
3933                 peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2);
3934
3935                 device_printf(ha->pci_dev,
3936                         "%s: Heartbeat Failue at ts_usecs = %ld "
3937                         "fw_heart_beat = 0x%08x "
3938                         "peg_halt_status1 = 0x%08x "
3939                         "peg_halt_status2 = 0x%08x\n",
3940                         __func__, qla_get_usec_timestamp(), val,
3941                         peg_halt_status1, peg_halt_status2);
3942
3943                 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE)
3944                         ha->hw.sp_log_stop = -1;
3945         }
3946         QL_INITIATE_RECOVERY(ha);
3947
3948         return -1;
3949 }
3950
3951 static int
3952 qla_init_nic_func(qla_host_t *ha)
3953 {
3954         device_t                dev;
3955         q80_init_nic_func_t     *init_nic;
3956         q80_init_nic_func_rsp_t *init_nic_rsp;
3957         uint32_t                err;
3958
3959         dev = ha->pci_dev;
3960
3961         init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3962         bzero(init_nic, sizeof(q80_init_nic_func_t));
3963
3964         init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3965         init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3966         init_nic->count_version |= Q8_MBX_CMD_VERSION;
3967
3968         init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3969         init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3970         init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3971
3972 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3973         if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3974                 (sizeof (q80_init_nic_func_t) >> 2),
3975                 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3976                 device_printf(dev, "%s: failed\n", __func__);
3977                 return -1;
3978         }
3979
3980         init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3981 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3982
3983         err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3984
3985         if (err) {
3986                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3987         } else {
3988                 device_printf(dev, "%s: successful\n", __func__);
3989         }
3990
3991         return 0;
3992 }
3993
3994 static int
3995 qla_stop_nic_func(qla_host_t *ha)
3996 {
3997         device_t                dev;
3998         q80_stop_nic_func_t     *stop_nic;
3999         q80_stop_nic_func_rsp_t *stop_nic_rsp;
4000         uint32_t                err;
4001
4002         dev = ha->pci_dev;
4003
4004         stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
4005         bzero(stop_nic, sizeof(q80_stop_nic_func_t));
4006
4007         stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
4008         stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
4009         stop_nic->count_version |= Q8_MBX_CMD_VERSION;
4010
4011         stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
4012         stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
4013
4014 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
4015         if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
4016                 (sizeof (q80_stop_nic_func_t) >> 2),
4017                 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
4018                 device_printf(dev, "%s: failed\n", __func__);
4019                 return -1;
4020         }
4021
4022         stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
4023 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
4024
4025         err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
4026
4027         if (err) {
4028                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4029         }
4030
4031         return 0;
4032 }
4033
4034 static int
4035 qla_query_fw_dcbx_caps(qla_host_t *ha)
4036 {
4037         device_t                        dev;
4038         q80_query_fw_dcbx_caps_t        *fw_dcbx;
4039         q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
4040         uint32_t                        err;
4041
4042         dev = ha->pci_dev;
4043
4044         fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
4045         bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
4046
4047         fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
4048         fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
4049         fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
4050
4051         ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
4052         if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
4053                 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
4054                 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
4055                 device_printf(dev, "%s: failed\n", __func__);
4056                 return -1;
4057         }
4058
4059         fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
4060         ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
4061                 sizeof (q80_query_fw_dcbx_caps_rsp_t));
4062
4063         err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
4064
4065         if (err) {
4066                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4067         }
4068
4069         return 0;
4070 }
4071
4072 static int
4073 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
4074         uint32_t aen_mb3, uint32_t aen_mb4)
4075 {
4076         device_t                dev;
4077         q80_idc_ack_t           *idc_ack;
4078         q80_idc_ack_rsp_t       *idc_ack_rsp;
4079         uint32_t                err;
4080         int                     count = 300;
4081
4082         dev = ha->pci_dev;
4083
4084         idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
4085         bzero(idc_ack, sizeof(q80_idc_ack_t));
4086
4087         idc_ack->opcode = Q8_MBX_IDC_ACK;
4088         idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
4089         idc_ack->count_version |= Q8_MBX_CMD_VERSION;
4090
4091         idc_ack->aen_mb1 = aen_mb1;
4092         idc_ack->aen_mb2 = aen_mb2;
4093         idc_ack->aen_mb3 = aen_mb3;
4094         idc_ack->aen_mb4 = aen_mb4;
4095
4096         ha->hw.imd_compl= 0;
4097
4098         if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
4099                 (sizeof (q80_idc_ack_t) >> 2),
4100                 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
4101                 device_printf(dev, "%s: failed\n", __func__);
4102                 return -1;
4103         }
4104
4105         idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
4106
4107         err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
4108
4109         if (err) {
4110                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4111                 return(-1);
4112         }
4113
4114         while (count && !ha->hw.imd_compl) {
4115                 qla_mdelay(__func__, 100);
4116                 count--;
4117         }
4118
4119         if (!count)
4120                 return -1;
4121         else
4122                 device_printf(dev, "%s: count %d\n", __func__, count);
4123
4124         return (0);
4125 }
4126
4127 static int
4128 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
4129 {
4130         device_t                dev;
4131         q80_set_port_cfg_t      *pcfg;
4132         q80_set_port_cfg_rsp_t  *pfg_rsp;
4133         uint32_t                err;
4134         int                     count = 300;
4135
4136         dev = ha->pci_dev;
4137
4138         pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
4139         bzero(pcfg, sizeof(q80_set_port_cfg_t));
4140
4141         pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
4142         pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
4143         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4144
4145         pcfg->cfg_bits = cfg_bits;
4146
4147         device_printf(dev, "%s: cfg_bits"
4148                 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4149                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4150                 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4151                 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4152                 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
4153
4154         ha->hw.imd_compl= 0;
4155
4156         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4157                 (sizeof (q80_set_port_cfg_t) >> 2),
4158                 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
4159                 device_printf(dev, "%s: failed\n", __func__);
4160                 return -1;
4161         }
4162
4163         pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
4164
4165         err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
4166
4167         if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
4168                 while (count && !ha->hw.imd_compl) {
4169                         qla_mdelay(__func__, 100);
4170                         count--;
4171                 }
4172                 if (count) {
4173                         device_printf(dev, "%s: count %d\n", __func__, count);
4174
4175                         err = 0;
4176                 }
4177         }
4178
4179         if (err) {
4180                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4181                 return(-1);
4182         }
4183
4184         return (0);
4185 }
4186
4187
4188 static int
4189 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
4190 {
4191         uint32_t                        err;
4192         device_t                        dev = ha->pci_dev;
4193         q80_config_md_templ_size_t      *md_size;
4194         q80_config_md_templ_size_rsp_t  *md_size_rsp;
4195
4196 #ifndef QL_LDFLASH_FW
4197
4198         ql_minidump_template_hdr_t *hdr;
4199
4200         hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
4201         *size = hdr->size_of_template;
4202         return (0);
4203
4204 #endif /* #ifdef QL_LDFLASH_FW */
4205
4206         md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
4207         bzero(md_size, sizeof(q80_config_md_templ_size_t));
4208
4209         md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
4210         md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
4211         md_size->count_version |= Q8_MBX_CMD_VERSION;
4212
4213         if (qla_mbx_cmd(ha, (uint32_t *) md_size,
4214                 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
4215                 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
4216
4217                 device_printf(dev, "%s: failed\n", __func__);
4218
4219                 return (-1);
4220         }
4221
4222         md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
4223
4224         err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
4225
4226         if (err) {
4227                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4228                 return(-1);
4229         }
4230
4231         *size = md_size_rsp->templ_size;
4232
4233         return (0);
4234 }
4235
4236 static int
4237 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
4238 {
4239         device_t                dev;
4240         q80_get_port_cfg_t      *pcfg;
4241         q80_get_port_cfg_rsp_t  *pcfg_rsp;
4242         uint32_t                err;
4243
4244         dev = ha->pci_dev;
4245
4246         pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
4247         bzero(pcfg, sizeof(q80_get_port_cfg_t));
4248
4249         pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
4250         pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
4251         pcfg->count_version |= Q8_MBX_CMD_VERSION;
4252
4253         if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
4254                 (sizeof (q80_get_port_cfg_t) >> 2),
4255                 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
4256                 device_printf(dev, "%s: failed\n", __func__);
4257                 return -1;
4258         }
4259
4260         pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
4261
4262         err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
4263
4264         if (err) {
4265                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4266                 return(-1);
4267         }
4268
4269         device_printf(dev, "%s: [cfg_bits, port type]"
4270                 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4271                 " [0x%x, 0x%x, 0x%x]\n", __func__,
4272                 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4273                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4274                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4275                 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4276                 );
4277
4278         *cfg_bits = pcfg_rsp->cfg_bits;
4279
4280         return (0);
4281 }
4282
4283 int
4284 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4285 {
4286         struct ether_vlan_header        *eh;
4287         uint16_t                        etype;
4288         struct ip                       *ip = NULL;
4289         struct ip6_hdr                  *ip6 = NULL;
4290         struct tcphdr                   *th = NULL;
4291         uint32_t                        hdrlen;
4292         uint32_t                        offset;
4293         uint8_t                         buf[sizeof(struct ip6_hdr)];
4294
4295         eh = mtod(mp, struct ether_vlan_header *);
4296
4297         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4298                 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4299                 etype = ntohs(eh->evl_proto);
4300         } else {
4301                 hdrlen = ETHER_HDR_LEN;
4302                 etype = ntohs(eh->evl_encap_proto);
4303         }
4304
4305         if (etype == ETHERTYPE_IP) {
4306
4307                 offset = (hdrlen + sizeof (struct ip));
4308
4309                 if (mp->m_len >= offset) {
4310                         ip = (struct ip *)(mp->m_data + hdrlen);
4311                 } else {
4312                         m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4313                         ip = (struct ip *)buf;
4314                 }
4315
4316                 if (ip->ip_p == IPPROTO_TCP) {
4317
4318                         hdrlen += ip->ip_hl << 2;
4319                         offset = hdrlen + 4;
4320         
4321                         if (mp->m_len >= offset) {
4322                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4323                         } else {
4324                                 m_copydata(mp, hdrlen, 4, buf);
4325                                 th = (struct tcphdr *)buf;
4326                         }
4327                 }
4328
4329         } else if (etype == ETHERTYPE_IPV6) {
4330
4331                 offset = (hdrlen + sizeof (struct ip6_hdr));
4332
4333                 if (mp->m_len >= offset) {
4334                         ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4335                 } else {
4336                         m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4337                         ip6 = (struct ip6_hdr *)buf;
4338                 }
4339
4340                 if (ip6->ip6_nxt == IPPROTO_TCP) {
4341
4342                         hdrlen += sizeof(struct ip6_hdr);
4343                         offset = hdrlen + 4;
4344
4345                         if (mp->m_len >= offset) {
4346                                 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4347                         } else {
4348                                 m_copydata(mp, hdrlen, 4, buf);
4349                                 th = (struct tcphdr *)buf;
4350                         }
4351                 }
4352         }
4353
4354         if (th != NULL) {
4355                 if ((th->th_sport == htons(3260)) ||
4356                         (th->th_dport == htons(3260)))
4357                         return 0;
4358         }
4359         return (-1);
4360 }
4361
4362 void
4363 qla_hw_async_event(qla_host_t *ha)
4364 {
4365         switch (ha->hw.aen_mb0) {
4366         case 0x8101:
4367                 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4368                         ha->hw.aen_mb3, ha->hw.aen_mb4);
4369
4370                 break;
4371
4372         default:
4373                 break;
4374         }
4375
4376         return;
4377 }
4378
4379 #ifdef QL_LDFLASH_FW
4380 static int
4381 ql_get_minidump_template(qla_host_t *ha)
4382 {
4383         uint32_t                        err;
4384         device_t                        dev = ha->pci_dev;
4385         q80_config_md_templ_cmd_t       *md_templ;
4386         q80_config_md_templ_cmd_rsp_t   *md_templ_rsp;
4387
4388         md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4389         bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4390
4391         md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4392         md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4393         md_templ->count_version |= Q8_MBX_CMD_VERSION;
4394
4395         md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4396         md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4397
4398         if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4399                 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4400                  ha->hw.mbox,
4401                 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4402
4403                 device_printf(dev, "%s: failed\n", __func__);
4404
4405                 return (-1);
4406         }
4407
4408         md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4409
4410         err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4411
4412         if (err) {
4413                 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4414                 return (-1);
4415         }
4416
4417         return (0);
4418
4419 }
4420 #endif /* #ifdef QL_LDFLASH_FW */
4421
4422 /*
4423  * Minidump related functionality 
4424  */
4425
4426 static int ql_parse_template(qla_host_t *ha);
4427
4428 static uint32_t ql_rdcrb(qla_host_t *ha,
4429                         ql_minidump_entry_rdcrb_t *crb_entry,
4430                         uint32_t * data_buff);
4431
4432 static uint32_t ql_pollrd(qla_host_t *ha,
4433                         ql_minidump_entry_pollrd_t *entry,
4434                         uint32_t * data_buff);
4435
4436 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4437                         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4438                         uint32_t *data_buff);
4439
4440 static uint32_t ql_L2Cache(qla_host_t *ha,
4441                         ql_minidump_entry_cache_t *cacheEntry,
4442                         uint32_t * data_buff);
4443
4444 static uint32_t ql_L1Cache(qla_host_t *ha,
4445                         ql_minidump_entry_cache_t *cacheEntry,
4446                         uint32_t *data_buff);
4447
4448 static uint32_t ql_rdocm(qla_host_t *ha,
4449                         ql_minidump_entry_rdocm_t *ocmEntry,
4450                         uint32_t *data_buff);
4451
4452 static uint32_t ql_rdmem(qla_host_t *ha,
4453                         ql_minidump_entry_rdmem_t *mem_entry,
4454                         uint32_t *data_buff);
4455
4456 static uint32_t ql_rdrom(qla_host_t *ha,
4457                         ql_minidump_entry_rdrom_t *romEntry,
4458                         uint32_t *data_buff);
4459
4460 static uint32_t ql_rdmux(qla_host_t *ha,
4461                         ql_minidump_entry_mux_t *muxEntry,
4462                         uint32_t *data_buff);
4463
4464 static uint32_t ql_rdmux2(qla_host_t *ha,
4465                         ql_minidump_entry_mux2_t *muxEntry,
4466                         uint32_t *data_buff);
4467
4468 static uint32_t ql_rdqueue(qla_host_t *ha,
4469                         ql_minidump_entry_queue_t *queueEntry,
4470                         uint32_t *data_buff);
4471
4472 static uint32_t ql_cntrl(qla_host_t *ha,
4473                         ql_minidump_template_hdr_t *template_hdr,
4474                         ql_minidump_entry_cntrl_t *crbEntry);
4475
4476
4477 static uint32_t
4478 ql_minidump_size(qla_host_t *ha)
4479 {
4480         uint32_t i, k;
4481         uint32_t size = 0;
4482         ql_minidump_template_hdr_t *hdr;
4483
4484         hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4485
4486         i = 0x2;
4487
4488         for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4489                 if (i & ha->hw.mdump_capture_mask)
4490                         size += hdr->capture_size_array[k];
4491                 i = i << 1;
4492         }
4493         return (size);
4494 }
4495
4496 static void
4497 ql_free_minidump_buffer(qla_host_t *ha)
4498 {
4499         if (ha->hw.mdump_buffer != NULL) {
4500                 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4501                 ha->hw.mdump_buffer = NULL;
4502                 ha->hw.mdump_buffer_size = 0;
4503         }
4504         return;
4505 }
4506
4507 static int
4508 ql_alloc_minidump_buffer(qla_host_t *ha)
4509 {
4510         ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4511
4512         if (!ha->hw.mdump_buffer_size)
4513                 return (-1);
4514
4515         ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4516                                         M_NOWAIT);
4517
4518         if (ha->hw.mdump_buffer == NULL)
4519                 return (-1);
4520
4521         return (0);
4522 }
4523
4524 static void
4525 ql_free_minidump_template_buffer(qla_host_t *ha)
4526 {
4527         if (ha->hw.mdump_template != NULL) {
4528                 free(ha->hw.mdump_template, M_QLA83XXBUF);
4529                 ha->hw.mdump_template = NULL;
4530                 ha->hw.mdump_template_size = 0;
4531         }
4532         return;
4533 }
4534
4535 static int
4536 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4537 {
4538         ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4539
4540         ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4541                                         M_QLA83XXBUF, M_NOWAIT);
4542
4543         if (ha->hw.mdump_template == NULL)
4544                 return (-1);
4545
4546         return (0);
4547 }
4548
4549 static int
4550 ql_alloc_minidump_buffers(qla_host_t *ha)
4551 {
4552         int ret;
4553
4554         ret = ql_alloc_minidump_template_buffer(ha);
4555
4556         if (ret)
4557                 return (ret);
4558
4559         ret = ql_alloc_minidump_buffer(ha);
4560
4561         if (ret)
4562                 ql_free_minidump_template_buffer(ha);
4563
4564         return (ret);
4565 }
4566
4567
4568 static uint32_t
4569 ql_validate_minidump_checksum(qla_host_t *ha)
4570 {
4571         uint64_t sum = 0;
4572         int count;
4573         uint32_t *template_buff;
4574
4575         count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4576         template_buff = ha->hw.dma_buf.minidump.dma_b;
4577
4578         while (count-- > 0) {
4579                 sum += *template_buff++;
4580         }
4581
4582         while (sum >> 32) {
4583                 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4584         }
4585
4586         return (~sum);
4587 }
4588
4589 int
4590 ql_minidump_init(qla_host_t *ha)
4591 {
4592         int             ret = 0;
4593         uint32_t        template_size = 0;
4594         device_t        dev = ha->pci_dev;
4595
4596         /*
4597          * Get Minidump Template Size
4598          */
4599         ret = qla_get_minidump_tmplt_size(ha, &template_size);
4600
4601         if (ret || (template_size == 0)) {
4602                 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4603                         template_size);
4604                 return (-1);
4605         }
4606
4607         /*
4608          * Allocate Memory for Minidump Template
4609          */
4610
4611         ha->hw.dma_buf.minidump.alignment = 8;
4612         ha->hw.dma_buf.minidump.size = template_size;
4613
4614 #ifdef QL_LDFLASH_FW
4615         if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4616
4617                 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4618
4619                 return (-1);
4620         }
4621         ha->hw.dma_buf.flags.minidump = 1;
4622
4623         /*
4624          * Retrieve Minidump Template
4625          */
4626         ret = ql_get_minidump_template(ha);
4627 #else
4628         ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4629
4630 #endif /* #ifdef QL_LDFLASH_FW */
4631
4632         if (ret == 0) {
4633
4634                 ret = ql_validate_minidump_checksum(ha);
4635
4636                 if (ret == 0) {
4637
4638                         ret = ql_alloc_minidump_buffers(ha);
4639
4640                         if (ret == 0)
4641                 ha->hw.mdump_init = 1;
4642                         else
4643                                 device_printf(dev,
4644                                         "%s: ql_alloc_minidump_buffers"
4645                                         " failed\n", __func__);
4646                 } else {
4647                         device_printf(dev, "%s: ql_validate_minidump_checksum"
4648                                 " failed\n", __func__);
4649                 }
4650         } else {
4651                 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4652                          __func__);
4653         }
4654
4655         if (ret)
4656                 ql_minidump_free(ha);
4657
4658         return (ret);
4659 }
4660
4661 static void
4662 ql_minidump_free(qla_host_t *ha)
4663 {
4664         ha->hw.mdump_init = 0;
4665         if (ha->hw.dma_buf.flags.minidump) {
4666                 ha->hw.dma_buf.flags.minidump = 0;
4667                 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4668         }
4669
4670         ql_free_minidump_template_buffer(ha);
4671         ql_free_minidump_buffer(ha);
4672
4673         return;
4674 }
4675
4676 void
4677 ql_minidump(qla_host_t *ha)
4678 {
4679         if (!ha->hw.mdump_init)
4680                 return;
4681
4682         if (ha->hw.mdump_done)
4683                 return;
4684         ha->hw.mdump_usec_ts = qla_get_usec_timestamp();
4685         ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4686
4687         bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4688         bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4689
4690         bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4691                 ha->hw.mdump_template_size);
4692
4693         ql_parse_template(ha);
4694  
4695         ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4696
4697         ha->hw.mdump_done = 1;
4698
4699         return;
4700 }
4701
4702
4703 /*
4704  * helper routines
4705  */
4706 static void 
4707 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4708 {
4709         if (esize != entry->hdr.entry_capture_size) {
4710                 entry->hdr.entry_capture_size = esize;
4711                 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4712         }
4713         return;
4714 }
4715
4716
4717 static int 
4718 ql_parse_template(qla_host_t *ha)
4719 {
4720         uint32_t num_of_entries, buff_level, e_cnt, esize;
4721         uint32_t end_cnt, rv = 0;
4722         char *dump_buff, *dbuff;
4723         int sane_start = 0, sane_end = 0;
4724         ql_minidump_template_hdr_t *template_hdr;
4725         ql_minidump_entry_t *entry;
4726         uint32_t capture_mask; 
4727         uint32_t dump_size; 
4728
4729         /* Setup parameters */
4730         template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4731
4732         if (template_hdr->entry_type == TLHDR)
4733                 sane_start = 1;
4734         
4735         dump_buff = (char *) ha->hw.mdump_buffer;
4736
4737         num_of_entries = template_hdr->num_of_entries;
4738
4739         entry = (ql_minidump_entry_t *) ((char *)template_hdr 
4740                         + template_hdr->first_entry_offset );
4741
4742         template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4743                 template_hdr->ocm_window_array[ha->pci_func];
4744         template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4745
4746         capture_mask = ha->hw.mdump_capture_mask;
4747         dump_size = ha->hw.mdump_buffer_size;
4748
4749         template_hdr->driver_capture_mask = capture_mask;
4750
4751         QL_DPRINT80(ha, (ha->pci_dev,
4752                 "%s: sane_start = %d num_of_entries = %d "
4753                 "capture_mask = 0x%x dump_size = %d \n", 
4754                 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4755
4756         for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4757
4758                 /*
4759                  * If the capture_mask of the entry does not match capture mask
4760                  * skip the entry after marking the driver_flags indicator.
4761                  */
4762                 
4763                 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4764
4765                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4766                         entry = (ql_minidump_entry_t *) ((char *) entry
4767                                         + entry->hdr.entry_size);
4768                         continue;
4769                 }
4770
4771                 /*
4772                  * This is ONLY needed in implementations where
4773                  * the capture buffer allocated is too small to capture
4774                  * all of the required entries for a given capture mask.
4775                  * We need to empty the buffer contents to a file
4776                  * if possible, before processing the next entry
4777                  * If the buff_full_flag is set, no further capture will happen
4778                  * and all remaining non-control entries will be skipped.
4779                  */
4780                 if (entry->hdr.entry_capture_size != 0) {
4781                         if ((buff_level + entry->hdr.entry_capture_size) >
4782                                 dump_size) {
4783                                 /*  Try to recover by emptying buffer to file */
4784                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4785                                 entry = (ql_minidump_entry_t *) ((char *) entry
4786                                                 + entry->hdr.entry_size);
4787                                 continue;
4788                         }
4789                 }
4790
4791                 /*
4792                  * Decode the entry type and process it accordingly
4793                  */
4794
4795                 switch (entry->hdr.entry_type) {
4796                 case RDNOP:
4797                         break;
4798
4799                 case RDEND:
4800                         if (sane_end == 0) {
4801                                 end_cnt = e_cnt;
4802                         }
4803                         sane_end++;
4804                         break;
4805
4806                 case RDCRB:
4807                         dbuff = dump_buff + buff_level;
4808                         esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4809                         ql_entry_err_chk(entry, esize);
4810                         buff_level += esize;
4811                         break;
4812
4813                 case POLLRD:
4814                         dbuff = dump_buff + buff_level;
4815                         esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4816                         ql_entry_err_chk(entry, esize);
4817                         buff_level += esize;
4818                         break;
4819
4820                 case POLLRDMWR:
4821                         dbuff = dump_buff + buff_level;
4822                         esize = ql_pollrd_modify_write(ha, (void *)entry,
4823                                         (void *)dbuff);
4824                         ql_entry_err_chk(entry, esize);
4825                         buff_level += esize;
4826                         break;
4827
4828                 case L2ITG:
4829                 case L2DTG:
4830                 case L2DAT:
4831                 case L2INS:
4832                         dbuff = dump_buff + buff_level;
4833                         esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4834                         if (esize == -1) {
4835                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4836                         } else {
4837                                 ql_entry_err_chk(entry, esize);
4838                                 buff_level += esize;
4839                         }
4840                         break;
4841
4842                 case L1DAT:
4843                 case L1INS:
4844                         dbuff = dump_buff + buff_level;
4845                         esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4846                         ql_entry_err_chk(entry, esize);
4847                         buff_level += esize;
4848                         break;
4849
4850                 case RDOCM:
4851                         dbuff = dump_buff + buff_level;
4852                         esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4853                         ql_entry_err_chk(entry, esize);
4854                         buff_level += esize;
4855                         break;
4856
4857                 case RDMEM:
4858                         dbuff = dump_buff + buff_level;
4859                         esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4860                         ql_entry_err_chk(entry, esize);
4861                         buff_level += esize;
4862                         break;
4863
4864                 case BOARD:
4865                 case RDROM:
4866                         dbuff = dump_buff + buff_level;
4867                         esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4868                         ql_entry_err_chk(entry, esize);
4869                         buff_level += esize;
4870                         break;
4871
4872                 case RDMUX:
4873                         dbuff = dump_buff + buff_level;
4874                         esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4875                         ql_entry_err_chk(entry, esize);
4876                         buff_level += esize;
4877                         break;
4878
4879                 case RDMUX2:
4880                         dbuff = dump_buff + buff_level;
4881                         esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4882                         ql_entry_err_chk(entry, esize);
4883                         buff_level += esize;
4884                         break;
4885
4886                 case QUEUE:
4887                         dbuff = dump_buff + buff_level;
4888                         esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4889                         ql_entry_err_chk(entry, esize);
4890                         buff_level += esize;
4891                         break;
4892
4893                 case CNTRL:
4894                         if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4895                                 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4896                         }
4897                         break;
4898                 default:
4899                         entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4900                         break;
4901                 }
4902                 /*  next entry in the template */
4903                 entry = (ql_minidump_entry_t *) ((char *) entry
4904                                                 + entry->hdr.entry_size);
4905         }
4906
4907         if (!sane_start || (sane_end > 1)) {
4908                 device_printf(ha->pci_dev,
4909                         "\n%s: Template configuration error. Check Template\n",
4910                         __func__);
4911         }
4912         
4913         QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4914                 __func__, template_hdr->num_of_entries));
4915
4916         return 0;
4917 }
4918
4919 /*
4920  * Read CRB operation.
4921  */
4922 static uint32_t
4923 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4924         uint32_t * data_buff)
4925 {
4926         int loop_cnt;
4927         int ret;
4928         uint32_t op_count, addr, stride, value = 0;
4929
4930         addr = crb_entry->addr;
4931         op_count = crb_entry->op_count;
4932         stride = crb_entry->addr_stride;
4933
4934         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4935
4936                 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4937
4938                 if (ret)
4939                         return (0);
4940
4941                 *data_buff++ = addr;
4942                 *data_buff++ = value;
4943                 addr = addr + stride;
4944         }
4945
4946         /*
4947          * for testing purpose we return amount of data written
4948          */
4949         return (op_count * (2 * sizeof(uint32_t)));
4950 }
4951
4952 /*
4953  * Handle L2 Cache.
4954  */
4955
4956 static uint32_t 
4957 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4958         uint32_t * data_buff)
4959 {
4960         int i, k;
4961         int loop_cnt;
4962         int ret;
4963
4964         uint32_t read_value;
4965         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4966         uint32_t tag_value, read_cnt;
4967         volatile uint8_t cntl_value_r;
4968         long timeout;
4969         uint32_t data;
4970
4971         loop_cnt = cacheEntry->op_count;
4972
4973         read_addr = cacheEntry->read_addr;
4974         cntrl_addr = cacheEntry->control_addr;
4975         cntl_value_w = (uint32_t) cacheEntry->write_value;
4976
4977         tag_reg_addr = cacheEntry->tag_reg_addr;
4978
4979         tag_value = cacheEntry->init_tag_value;
4980         read_cnt = cacheEntry->read_addr_cnt;
4981
4982         for (i = 0; i < loop_cnt; i++) {
4983
4984                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4985                 if (ret)
4986                         return (0);
4987
4988                 if (cacheEntry->write_value != 0) { 
4989
4990                         ret = ql_rdwr_indreg32(ha, cntrl_addr,
4991                                         &cntl_value_w, 0);
4992                         if (ret)
4993                                 return (0);
4994                 }
4995
4996                 if (cacheEntry->poll_mask != 0) { 
4997
4998                         timeout = cacheEntry->poll_wait;
4999
5000                         ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
5001                         if (ret)
5002                                 return (0);
5003
5004                         cntl_value_r = (uint8_t)data;
5005
5006                         while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
5007
5008                                 if (timeout) {
5009                                         qla_mdelay(__func__, 1);
5010                                         timeout--;
5011                                 } else
5012                                         break;
5013
5014                                 ret = ql_rdwr_indreg32(ha, cntrl_addr,
5015                                                 &data, 1);
5016                                 if (ret)
5017                                         return (0);
5018
5019                                 cntl_value_r = (uint8_t)data;
5020                         }
5021                         if (!timeout) {
5022                                 /* Report timeout error. 
5023                                  * core dump capture failed
5024                                  * Skip remaining entries.
5025                                  * Write buffer out to file
5026                                  * Use driver specific fields in template header
5027                                  * to report this error.
5028                                  */
5029                                 return (-1);
5030                         }
5031                 }
5032
5033                 addr = read_addr;
5034                 for (k = 0; k < read_cnt; k++) {
5035
5036                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5037                         if (ret)
5038                                 return (0);
5039
5040                         *data_buff++ = read_value;
5041                         addr += cacheEntry->read_addr_stride;
5042                 }
5043
5044                 tag_value += cacheEntry->tag_value_stride;
5045         }
5046
5047         return (read_cnt * loop_cnt * sizeof(uint32_t));
5048 }
5049
5050 /*
5051  * Handle L1 Cache.
5052  */
5053
5054 static uint32_t 
5055 ql_L1Cache(qla_host_t *ha,
5056         ql_minidump_entry_cache_t *cacheEntry,
5057         uint32_t *data_buff)
5058 {
5059         int ret;
5060         int i, k;
5061         int loop_cnt;
5062
5063         uint32_t read_value;
5064         uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
5065         uint32_t tag_value, read_cnt;
5066         uint32_t cntl_value_w;
5067
5068         loop_cnt = cacheEntry->op_count;
5069
5070         read_addr = cacheEntry->read_addr;
5071         cntrl_addr = cacheEntry->control_addr;
5072         cntl_value_w = (uint32_t) cacheEntry->write_value;
5073
5074         tag_reg_addr = cacheEntry->tag_reg_addr;
5075
5076         tag_value = cacheEntry->init_tag_value;
5077         read_cnt = cacheEntry->read_addr_cnt;
5078
5079         for (i = 0; i < loop_cnt; i++) {
5080
5081                 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
5082                 if (ret)
5083                         return (0);
5084
5085                 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
5086                 if (ret)
5087                         return (0);
5088
5089                 addr = read_addr;
5090                 for (k = 0; k < read_cnt; k++) {
5091
5092                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5093                         if (ret)
5094                                 return (0);
5095
5096                         *data_buff++ = read_value;
5097                         addr += cacheEntry->read_addr_stride;
5098                 }
5099
5100                 tag_value += cacheEntry->tag_value_stride;
5101         }
5102
5103         return (read_cnt * loop_cnt * sizeof(uint32_t));
5104 }
5105
5106 /*
5107  * Reading OCM memory
5108  */
5109
5110 static uint32_t 
5111 ql_rdocm(qla_host_t *ha,
5112         ql_minidump_entry_rdocm_t *ocmEntry,
5113         uint32_t *data_buff)
5114 {
5115         int i, loop_cnt;
5116         volatile uint32_t addr;
5117         volatile uint32_t value;
5118
5119         addr = ocmEntry->read_addr;
5120         loop_cnt = ocmEntry->op_count;
5121
5122         for (i = 0; i < loop_cnt; i++) {
5123                 value = READ_REG32(ha, addr);
5124                 *data_buff++ = value;
5125                 addr += ocmEntry->read_addr_stride;
5126         }
5127         return (loop_cnt * sizeof(value));
5128 }
5129
5130 /*
5131  * Read memory
5132  */
5133
5134 static uint32_t 
5135 ql_rdmem(qla_host_t *ha,
5136         ql_minidump_entry_rdmem_t *mem_entry,
5137         uint32_t *data_buff)
5138 {
5139         int ret;
5140         int i, loop_cnt;
5141         volatile uint32_t addr;
5142         q80_offchip_mem_val_t val;
5143
5144         addr = mem_entry->read_addr;
5145
5146         /* size in bytes / 16 */
5147         loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
5148
5149         for (i = 0; i < loop_cnt; i++) {
5150
5151                 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
5152                 if (ret)
5153                         return (0);
5154
5155                 *data_buff++ = val.data_lo;
5156                 *data_buff++ = val.data_hi;
5157                 *data_buff++ = val.data_ulo;
5158                 *data_buff++ = val.data_uhi;
5159
5160                 addr += (sizeof(uint32_t) * 4);
5161         }
5162
5163         return (loop_cnt * (sizeof(uint32_t) * 4));
5164 }
5165
5166 /*
5167  * Read Rom
5168  */
5169
5170 static uint32_t 
5171 ql_rdrom(qla_host_t *ha,
5172         ql_minidump_entry_rdrom_t *romEntry,
5173         uint32_t *data_buff)
5174 {
5175         int ret;
5176         int i, loop_cnt;
5177         uint32_t addr;
5178         uint32_t value;
5179
5180         addr = romEntry->read_addr;
5181         loop_cnt = romEntry->read_data_size; /* This is size in bytes */
5182         loop_cnt /= sizeof(value);
5183
5184         for (i = 0; i < loop_cnt; i++) {
5185
5186                 ret = ql_rd_flash32(ha, addr, &value);
5187                 if (ret)
5188                         return (0);
5189
5190                 *data_buff++ = value;
5191                 addr += sizeof(value);
5192         }
5193
5194         return (loop_cnt * sizeof(value));
5195 }
5196
5197 /*
5198  * Read MUX data
5199  */
5200
5201 static uint32_t 
5202 ql_rdmux(qla_host_t *ha,
5203         ql_minidump_entry_mux_t *muxEntry,
5204         uint32_t *data_buff)
5205 {
5206         int ret;
5207         int loop_cnt;
5208         uint32_t read_value, sel_value;
5209         uint32_t read_addr, select_addr;
5210
5211         select_addr = muxEntry->select_addr;
5212         sel_value = muxEntry->select_value;
5213         read_addr = muxEntry->read_addr;
5214
5215         for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
5216
5217                 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
5218                 if (ret)
5219                         return (0);
5220
5221                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5222                 if (ret)
5223                         return (0);
5224
5225                 *data_buff++ = sel_value;
5226                 *data_buff++ = read_value;
5227
5228                 sel_value += muxEntry->select_value_stride;
5229         }
5230
5231         return (loop_cnt * (2 * sizeof(uint32_t)));
5232 }
5233
5234 static uint32_t
5235 ql_rdmux2(qla_host_t *ha,
5236         ql_minidump_entry_mux2_t *muxEntry,
5237         uint32_t *data_buff)
5238 {
5239         int ret;
5240         int loop_cnt;
5241
5242         uint32_t select_addr_1, select_addr_2;
5243         uint32_t select_value_1, select_value_2;
5244         uint32_t select_value_count, select_value_mask;
5245         uint32_t read_addr, read_value;
5246
5247         select_addr_1 = muxEntry->select_addr_1;
5248         select_addr_2 = muxEntry->select_addr_2;
5249         select_value_1 = muxEntry->select_value_1;
5250         select_value_2 = muxEntry->select_value_2;
5251         select_value_count = muxEntry->select_value_count;
5252         select_value_mask  = muxEntry->select_value_mask;
5253
5254         read_addr = muxEntry->read_addr;
5255
5256         for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
5257                 loop_cnt++) {
5258
5259                 uint32_t temp_sel_val;
5260
5261                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
5262                 if (ret)
5263                         return (0);
5264
5265                 temp_sel_val = select_value_1 & select_value_mask;
5266
5267                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5268                 if (ret)
5269                         return (0);
5270
5271                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5272                 if (ret)
5273                         return (0);
5274
5275                 *data_buff++ = temp_sel_val;
5276                 *data_buff++ = read_value;
5277
5278                 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5279                 if (ret)
5280                         return (0);
5281
5282                 temp_sel_val = select_value_2 & select_value_mask;
5283
5284                 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5285                 if (ret)
5286                         return (0);
5287
5288                 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5289                 if (ret)
5290                         return (0);
5291
5292                 *data_buff++ = temp_sel_val;
5293                 *data_buff++ = read_value;
5294
5295                 select_value_1 += muxEntry->select_value_stride;
5296                 select_value_2 += muxEntry->select_value_stride;
5297         }
5298
5299         return (loop_cnt * (4 * sizeof(uint32_t)));
5300 }
5301
5302 /*
5303  * Handling Queue State Reads.
5304  */
5305
5306 static uint32_t 
5307 ql_rdqueue(qla_host_t *ha,
5308         ql_minidump_entry_queue_t *queueEntry,
5309         uint32_t *data_buff)
5310 {
5311         int ret;
5312         int loop_cnt, k;
5313         uint32_t read_value;
5314         uint32_t read_addr, read_stride, select_addr;
5315         uint32_t queue_id, read_cnt;
5316
5317         read_cnt = queueEntry->read_addr_cnt;
5318         read_stride = queueEntry->read_addr_stride;
5319         select_addr = queueEntry->select_addr;
5320
5321         for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5322                 loop_cnt++) {
5323
5324                 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5325                 if (ret)
5326                         return (0);
5327
5328                 read_addr = queueEntry->read_addr;
5329
5330                 for (k = 0; k < read_cnt; k++) {
5331
5332                         ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5333                         if (ret)
5334                                 return (0);
5335
5336                         *data_buff++ = read_value;
5337                         read_addr += read_stride;
5338                 }
5339
5340                 queue_id += queueEntry->queue_id_stride;
5341         }
5342
5343         return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5344 }
5345
5346 /*
5347  * Handling control entries.
5348  */
5349
5350 static uint32_t 
5351 ql_cntrl(qla_host_t *ha,
5352         ql_minidump_template_hdr_t *template_hdr,
5353         ql_minidump_entry_cntrl_t *crbEntry)
5354 {
5355         int ret;
5356         int count;
5357         uint32_t opcode, read_value, addr, entry_addr;
5358         long timeout;
5359
5360         entry_addr = crbEntry->addr;
5361
5362         for (count = 0; count < crbEntry->op_count; count++) {
5363                 opcode = crbEntry->opcode;
5364
5365                 if (opcode & QL_DBG_OPCODE_WR) {
5366
5367                         ret = ql_rdwr_indreg32(ha, entry_addr,
5368                                         &crbEntry->value_1, 0);
5369                         if (ret)
5370                                 return (0);
5371
5372                         opcode &= ~QL_DBG_OPCODE_WR;
5373                 }
5374
5375                 if (opcode & QL_DBG_OPCODE_RW) {
5376
5377                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5378                         if (ret)
5379                                 return (0);
5380
5381                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5382                         if (ret)
5383                                 return (0);
5384
5385                         opcode &= ~QL_DBG_OPCODE_RW;
5386                 }
5387
5388                 if (opcode & QL_DBG_OPCODE_AND) {
5389
5390                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5391                         if (ret)
5392                                 return (0);
5393
5394                         read_value &= crbEntry->value_2;
5395                         opcode &= ~QL_DBG_OPCODE_AND;
5396
5397                         if (opcode & QL_DBG_OPCODE_OR) {
5398                                 read_value |= crbEntry->value_3;
5399                                 opcode &= ~QL_DBG_OPCODE_OR;
5400                         }
5401
5402                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5403                         if (ret)
5404                                 return (0);
5405                 }
5406
5407                 if (opcode & QL_DBG_OPCODE_OR) {
5408
5409                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5410                         if (ret)
5411                                 return (0);
5412
5413                         read_value |= crbEntry->value_3;
5414
5415                         ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5416                         if (ret)
5417                                 return (0);
5418
5419                         opcode &= ~QL_DBG_OPCODE_OR;
5420                 }
5421
5422                 if (opcode & QL_DBG_OPCODE_POLL) {
5423
5424                         opcode &= ~QL_DBG_OPCODE_POLL;
5425                         timeout = crbEntry->poll_timeout;
5426                         addr = entry_addr;
5427
5428                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5429                         if (ret)
5430                                 return (0);
5431
5432                         while ((read_value & crbEntry->value_2)
5433                                 != crbEntry->value_1) {
5434
5435                                 if (timeout) {
5436                                         qla_mdelay(__func__, 1);
5437                                         timeout--;
5438                                 } else
5439                                         break;
5440
5441                                 ret = ql_rdwr_indreg32(ha, addr,
5442                                                 &read_value, 1);
5443                                 if (ret)
5444                                         return (0);
5445                         }
5446
5447                         if (!timeout) {
5448                                 /*
5449                                  * Report timeout error.
5450                                  * core dump capture failed
5451                                  * Skip remaining entries.
5452                                  * Write buffer out to file
5453                                  * Use driver specific fields in template header
5454                                  * to report this error.
5455                                  */
5456                                 return (-1);
5457                         }
5458                 }
5459
5460                 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5461                         /*
5462                          * decide which address to use.
5463                          */
5464                         if (crbEntry->state_index_a) {
5465                                 addr = template_hdr->saved_state_array[
5466                                                 crbEntry-> state_index_a];
5467                         } else {
5468                                 addr = entry_addr;
5469                         }
5470
5471                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5472                         if (ret)
5473                                 return (0);
5474
5475                         template_hdr->saved_state_array[crbEntry->state_index_v]
5476                                         = read_value;
5477                         opcode &= ~QL_DBG_OPCODE_RDSTATE;
5478                 }
5479
5480                 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5481                         /*
5482                          * decide which value to use.
5483                          */
5484                         if (crbEntry->state_index_v) {
5485                                 read_value = template_hdr->saved_state_array[
5486                                                 crbEntry->state_index_v];
5487                         } else {
5488                                 read_value = crbEntry->value_1;
5489                         }
5490                         /*
5491                          * decide which address to use.
5492                          */
5493                         if (crbEntry->state_index_a) {
5494                                 addr = template_hdr->saved_state_array[
5495                                                 crbEntry-> state_index_a];
5496                         } else {
5497                                 addr = entry_addr;
5498                         }
5499
5500                         ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5501                         if (ret)
5502                                 return (0);
5503
5504                         opcode &= ~QL_DBG_OPCODE_WRSTATE;
5505                 }
5506
5507                 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5508                         /*  Read value from saved state using index */
5509                         read_value = template_hdr->saved_state_array[
5510                                                 crbEntry->state_index_v];
5511
5512                         read_value <<= crbEntry->shl; /*Shift left operation */
5513                         read_value >>= crbEntry->shr; /*Shift right operation */
5514
5515                         if (crbEntry->value_2) {
5516                                 /* check if AND mask is provided */
5517                                 read_value &= crbEntry->value_2;
5518                         }
5519
5520                         read_value |= crbEntry->value_3; /* OR operation */
5521                         read_value += crbEntry->value_1; /* increment op */
5522
5523                         /* Write value back to state area. */
5524
5525                         template_hdr->saved_state_array[crbEntry->state_index_v]
5526                                         = read_value;
5527                         opcode &= ~QL_DBG_OPCODE_MDSTATE;
5528                 }
5529
5530                 entry_addr += crbEntry->addr_stride;
5531         }
5532
5533         return (0);
5534 }
5535
5536 /*
5537  * Handling rd poll entry.
5538  */
5539
5540 static uint32_t 
5541 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5542         uint32_t *data_buff)
5543 {
5544         int ret;
5545         int loop_cnt;
5546         uint32_t op_count, select_addr, select_value_stride, select_value;
5547         uint32_t read_addr, poll, mask, data_size, data;
5548         uint32_t wait_count = 0;
5549
5550         select_addr            = entry->select_addr;
5551         read_addr              = entry->read_addr;
5552         select_value           = entry->select_value;
5553         select_value_stride    = entry->select_value_stride;
5554         op_count               = entry->op_count;
5555         poll                   = entry->poll;
5556         mask                   = entry->mask;
5557         data_size              = entry->data_size;
5558
5559         for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5560
5561                 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5562                 if (ret)
5563                         return (0);
5564
5565                 wait_count = 0;
5566
5567                 while (wait_count < poll) {
5568
5569                         uint32_t temp;
5570
5571                         ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5572                         if (ret)
5573                                 return (0);
5574
5575                         if ( (temp & mask) != 0 ) {
5576                                 break;
5577                         }
5578                         wait_count++;
5579                 }
5580
5581                 if (wait_count == poll) {
5582                         device_printf(ha->pci_dev,
5583                                 "%s: Error in processing entry\n", __func__);
5584                         device_printf(ha->pci_dev,
5585                                 "%s: wait_count <0x%x> poll <0x%x>\n",
5586                                 __func__, wait_count, poll);
5587                         return 0;
5588                 }
5589
5590                 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5591                 if (ret)
5592                         return (0);
5593
5594                 *data_buff++ = select_value;
5595                 *data_buff++ = data;
5596                 select_value = select_value + select_value_stride;
5597         }
5598
5599         /*
5600          * for testing purpose we return amount of data written
5601          */
5602         return (loop_cnt * (2 * sizeof(uint32_t)));
5603 }
5604
5605
5606 /*
5607  * Handling rd modify write poll entry.
5608  */
5609
5610 static uint32_t 
5611 ql_pollrd_modify_write(qla_host_t *ha,
5612         ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5613         uint32_t *data_buff)
5614 {
5615         int ret;
5616         uint32_t addr_1, addr_2, value_1, value_2, data;
5617         uint32_t poll, mask, data_size, modify_mask;
5618         uint32_t wait_count = 0;
5619
5620         addr_1          = entry->addr_1;
5621         addr_2          = entry->addr_2;
5622         value_1         = entry->value_1;
5623         value_2         = entry->value_2;
5624
5625         poll            = entry->poll;
5626         mask            = entry->mask;
5627         modify_mask     = entry->modify_mask;
5628         data_size       = entry->data_size;
5629
5630
5631         ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5632         if (ret)
5633                 return (0);
5634
5635         wait_count = 0;
5636         while (wait_count < poll) {
5637
5638                 uint32_t temp;
5639
5640                 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5641                 if (ret)
5642                         return (0);
5643
5644                 if ( (temp & mask) != 0 ) {
5645                         break;
5646                 }
5647                 wait_count++;
5648         }
5649
5650         if (wait_count == poll) {
5651                 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5652                         __func__);
5653         } else {
5654
5655                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5656                 if (ret)
5657                         return (0);
5658
5659                 data = (data & modify_mask);
5660
5661                 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5662                 if (ret)
5663                         return (0);
5664
5665                 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5666                 if (ret)
5667                         return (0);
5668
5669                 /* Poll again */
5670                 wait_count = 0;
5671                 while (wait_count < poll) {
5672
5673                         uint32_t temp;
5674
5675                         ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5676                         if (ret)
5677                                 return (0);
5678
5679                         if ( (temp & mask) != 0 ) {
5680                                 break;
5681                         }
5682                         wait_count++;
5683                 }
5684                 *data_buff++ = addr_2;
5685                 *data_buff++ = data;
5686         }
5687
5688         /*
5689          * for testing purpose we return amount of data written
5690          */
5691         return (2 * sizeof(uint32_t));
5692 }
5693
5694