2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
44 #include "ql_minidump.h"
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57 uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60 int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69 static int qla_init_nic_func(qla_host_t *ha);
70 static int qla_stop_nic_func(qla_host_t *ha);
71 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
72 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75 static int qla_get_cam_search_mode(qla_host_t *ha);
77 static void ql_minidump_free(qla_host_t *ha);
82 qla_stop_pegs(qla_host_t *ha)
86 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
91 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
95 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
100 err = sysctl_handle_int(oidp, &ret, 0, req);
103 if (err || !req->newptr)
107 ha = (qla_host_t *)arg1;
108 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
110 QLA_UNLOCK(ha, __func__);
116 #endif /* #ifdef QL_DBG */
119 qla_validate_set_port_cfg_bit(uint32_t bits)
121 if ((bits & 0xF) > 1)
124 if (((bits >> 4) & 0xF) > 2)
127 if (((bits >> 8) & 0xF) > 2)
134 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
140 err = sysctl_handle_int(oidp, &ret, 0, req);
142 if (err || !req->newptr)
145 ha = (qla_host_t *)arg1;
147 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
149 err = qla_get_port_config(ha, &cfg_bits);
152 goto qla_sysctl_set_port_cfg_exit;
155 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
157 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
161 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
163 if ((ret & 0xF) == 0) {
164 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
165 } else if ((ret & 0xF) == 1){
166 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
168 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
172 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
175 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176 } else if (ret == 1){
177 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
179 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
182 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
183 err = qla_set_port_config(ha, cfg_bits);
184 QLA_UNLOCK(ha, __func__);
186 device_printf(ha->pci_dev, "%s: failed\n", __func__);
189 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190 err = qla_get_port_config(ha, &cfg_bits);
191 QLA_UNLOCK(ha, __func__);
193 device_printf(ha->pci_dev, "%s: failed\n", __func__);
197 qla_sysctl_set_port_cfg_exit:
202 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
207 err = sysctl_handle_int(oidp, &ret, 0, req);
209 if (err || !req->newptr)
212 ha = (qla_host_t *)arg1;
214 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
217 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
218 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
219 QLA_UNLOCK(ha, __func__);
221 device_printf(ha->pci_dev, "%s: failed\n", __func__);
225 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
232 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
237 err = sysctl_handle_int(oidp, &ret, 0, req);
239 if (err || !req->newptr)
242 ha = (qla_host_t *)arg1;
243 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
244 err = qla_get_cam_search_mode(ha);
245 QLA_UNLOCK(ha, __func__);
247 device_printf(ha->pci_dev, "%s: failed\n", __func__);
254 qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
256 struct sysctl_ctx_list *ctx;
257 struct sysctl_oid_list *children;
258 struct sysctl_oid *ctx_oid;
260 ctx = device_get_sysctl_ctx(ha->pci_dev);
261 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
263 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
264 CTLFLAG_RD, NULL, "stats_hw_mac");
265 children = SYSCTL_CHILDREN(ctx_oid);
267 SYSCTL_ADD_QUAD(ctx, children,
268 OID_AUTO, "xmt_frames",
269 CTLFLAG_RD, &ha->hw.mac.xmt_frames,
272 SYSCTL_ADD_QUAD(ctx, children,
273 OID_AUTO, "xmt_bytes",
274 CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
277 SYSCTL_ADD_QUAD(ctx, children,
278 OID_AUTO, "xmt_mcast_pkts",
279 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
282 SYSCTL_ADD_QUAD(ctx, children,
283 OID_AUTO, "xmt_bcast_pkts",
284 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
287 SYSCTL_ADD_QUAD(ctx, children,
288 OID_AUTO, "xmt_pause_frames",
289 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
292 SYSCTL_ADD_QUAD(ctx, children,
293 OID_AUTO, "xmt_cntrl_pkts",
294 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
297 SYSCTL_ADD_QUAD(ctx, children,
298 OID_AUTO, "xmt_pkt_lt_64bytes",
299 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
300 "xmt_pkt_lt_64bytes");
302 SYSCTL_ADD_QUAD(ctx, children,
303 OID_AUTO, "xmt_pkt_lt_127bytes",
304 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
305 "xmt_pkt_lt_127bytes");
307 SYSCTL_ADD_QUAD(ctx, children,
308 OID_AUTO, "xmt_pkt_lt_255bytes",
309 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
310 "xmt_pkt_lt_255bytes");
312 SYSCTL_ADD_QUAD(ctx, children,
313 OID_AUTO, "xmt_pkt_lt_511bytes",
314 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
315 "xmt_pkt_lt_511bytes");
317 SYSCTL_ADD_QUAD(ctx, children,
318 OID_AUTO, "xmt_pkt_lt_1023bytes",
319 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
320 "xmt_pkt_lt_1023bytes");
322 SYSCTL_ADD_QUAD(ctx, children,
323 OID_AUTO, "xmt_pkt_lt_1518bytes",
324 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
325 "xmt_pkt_lt_1518bytes");
327 SYSCTL_ADD_QUAD(ctx, children,
328 OID_AUTO, "xmt_pkt_gt_1518bytes",
329 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
330 "xmt_pkt_gt_1518bytes");
332 SYSCTL_ADD_QUAD(ctx, children,
333 OID_AUTO, "rcv_frames",
334 CTLFLAG_RD, &ha->hw.mac.rcv_frames,
337 SYSCTL_ADD_QUAD(ctx, children,
338 OID_AUTO, "rcv_bytes",
339 CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
342 SYSCTL_ADD_QUAD(ctx, children,
343 OID_AUTO, "rcv_mcast_pkts",
344 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
347 SYSCTL_ADD_QUAD(ctx, children,
348 OID_AUTO, "rcv_bcast_pkts",
349 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
352 SYSCTL_ADD_QUAD(ctx, children,
353 OID_AUTO, "rcv_pause_frames",
354 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
357 SYSCTL_ADD_QUAD(ctx, children,
358 OID_AUTO, "rcv_cntrl_pkts",
359 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
362 SYSCTL_ADD_QUAD(ctx, children,
363 OID_AUTO, "rcv_pkt_lt_64bytes",
364 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
365 "rcv_pkt_lt_64bytes");
367 SYSCTL_ADD_QUAD(ctx, children,
368 OID_AUTO, "rcv_pkt_lt_127bytes",
369 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
370 "rcv_pkt_lt_127bytes");
372 SYSCTL_ADD_QUAD(ctx, children,
373 OID_AUTO, "rcv_pkt_lt_255bytes",
374 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
375 "rcv_pkt_lt_255bytes");
377 SYSCTL_ADD_QUAD(ctx, children,
378 OID_AUTO, "rcv_pkt_lt_511bytes",
379 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
380 "rcv_pkt_lt_511bytes");
382 SYSCTL_ADD_QUAD(ctx, children,
383 OID_AUTO, "rcv_pkt_lt_1023bytes",
384 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
385 "rcv_pkt_lt_1023bytes");
387 SYSCTL_ADD_QUAD(ctx, children,
388 OID_AUTO, "rcv_pkt_lt_1518bytes",
389 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
390 "rcv_pkt_lt_1518bytes");
392 SYSCTL_ADD_QUAD(ctx, children,
393 OID_AUTO, "rcv_pkt_gt_1518bytes",
394 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
395 "rcv_pkt_gt_1518bytes");
397 SYSCTL_ADD_QUAD(ctx, children,
398 OID_AUTO, "rcv_len_error",
399 CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
402 SYSCTL_ADD_QUAD(ctx, children,
403 OID_AUTO, "rcv_len_small",
404 CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
407 SYSCTL_ADD_QUAD(ctx, children,
408 OID_AUTO, "rcv_len_large",
409 CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
412 SYSCTL_ADD_QUAD(ctx, children,
413 OID_AUTO, "rcv_jabber",
414 CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
417 SYSCTL_ADD_QUAD(ctx, children,
418 OID_AUTO, "rcv_dropped",
419 CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
422 SYSCTL_ADD_QUAD(ctx, children,
423 OID_AUTO, "fcs_error",
424 CTLFLAG_RD, &ha->hw.mac.fcs_error,
427 SYSCTL_ADD_QUAD(ctx, children,
428 OID_AUTO, "align_error",
429 CTLFLAG_RD, &ha->hw.mac.align_error,
432 SYSCTL_ADD_QUAD(ctx, children,
433 OID_AUTO, "eswitched_frames",
434 CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
437 SYSCTL_ADD_QUAD(ctx, children,
438 OID_AUTO, "eswitched_bytes",
439 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
442 SYSCTL_ADD_QUAD(ctx, children,
443 OID_AUTO, "eswitched_mcast_frames",
444 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
445 "eswitched_mcast_frames");
447 SYSCTL_ADD_QUAD(ctx, children,
448 OID_AUTO, "eswitched_bcast_frames",
449 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
450 "eswitched_bcast_frames");
452 SYSCTL_ADD_QUAD(ctx, children,
453 OID_AUTO, "eswitched_ucast_frames",
454 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
455 "eswitched_ucast_frames");
457 SYSCTL_ADD_QUAD(ctx, children,
458 OID_AUTO, "eswitched_err_free_frames",
459 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
460 "eswitched_err_free_frames");
462 SYSCTL_ADD_QUAD(ctx, children,
463 OID_AUTO, "eswitched_err_free_bytes",
464 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
465 "eswitched_err_free_bytes");
471 qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
473 struct sysctl_ctx_list *ctx;
474 struct sysctl_oid_list *children;
475 struct sysctl_oid *ctx_oid;
477 ctx = device_get_sysctl_ctx(ha->pci_dev);
478 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
480 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
481 CTLFLAG_RD, NULL, "stats_hw_rcv");
482 children = SYSCTL_CHILDREN(ctx_oid);
484 SYSCTL_ADD_QUAD(ctx, children,
485 OID_AUTO, "total_bytes",
486 CTLFLAG_RD, &ha->hw.rcv.total_bytes,
489 SYSCTL_ADD_QUAD(ctx, children,
490 OID_AUTO, "total_pkts",
491 CTLFLAG_RD, &ha->hw.rcv.total_pkts,
494 SYSCTL_ADD_QUAD(ctx, children,
495 OID_AUTO, "lro_pkt_count",
496 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
499 SYSCTL_ADD_QUAD(ctx, children,
500 OID_AUTO, "sw_pkt_count",
501 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
504 SYSCTL_ADD_QUAD(ctx, children,
505 OID_AUTO, "ip_chksum_err",
506 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
509 SYSCTL_ADD_QUAD(ctx, children,
510 OID_AUTO, "pkts_wo_acntxts",
511 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
514 SYSCTL_ADD_QUAD(ctx, children,
515 OID_AUTO, "pkts_dropped_no_sds_card",
516 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
517 "pkts_dropped_no_sds_card");
519 SYSCTL_ADD_QUAD(ctx, children,
520 OID_AUTO, "pkts_dropped_no_sds_host",
521 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
522 "pkts_dropped_no_sds_host");
524 SYSCTL_ADD_QUAD(ctx, children,
525 OID_AUTO, "oversized_pkts",
526 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
529 SYSCTL_ADD_QUAD(ctx, children,
530 OID_AUTO, "pkts_dropped_no_rds",
531 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
532 "pkts_dropped_no_rds");
534 SYSCTL_ADD_QUAD(ctx, children,
535 OID_AUTO, "unxpctd_mcast_pkts",
536 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
537 "unxpctd_mcast_pkts");
539 SYSCTL_ADD_QUAD(ctx, children,
540 OID_AUTO, "re1_fbq_error",
541 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
544 SYSCTL_ADD_QUAD(ctx, children,
545 OID_AUTO, "invalid_mac_addr",
546 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
549 SYSCTL_ADD_QUAD(ctx, children,
550 OID_AUTO, "rds_prime_trys",
551 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
554 SYSCTL_ADD_QUAD(ctx, children,
555 OID_AUTO, "rds_prime_success",
556 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
557 "rds_prime_success");
559 SYSCTL_ADD_QUAD(ctx, children,
560 OID_AUTO, "lro_flows_added",
561 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
564 SYSCTL_ADD_QUAD(ctx, children,
565 OID_AUTO, "lro_flows_deleted",
566 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
567 "lro_flows_deleted");
569 SYSCTL_ADD_QUAD(ctx, children,
570 OID_AUTO, "lro_flows_active",
571 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
574 SYSCTL_ADD_QUAD(ctx, children,
575 OID_AUTO, "pkts_droped_unknown",
576 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
577 "pkts_droped_unknown");
579 SYSCTL_ADD_QUAD(ctx, children,
580 OID_AUTO, "pkts_cnt_oversized",
581 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
582 "pkts_cnt_oversized");
588 qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
590 struct sysctl_ctx_list *ctx;
591 struct sysctl_oid_list *children;
592 struct sysctl_oid_list *node_children;
593 struct sysctl_oid *ctx_oid;
595 uint8_t name_str[16];
597 ctx = device_get_sysctl_ctx(ha->pci_dev);
598 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
600 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
601 CTLFLAG_RD, NULL, "stats_hw_xmt");
602 children = SYSCTL_CHILDREN(ctx_oid);
604 for (i = 0; i < ha->hw.num_tx_rings; i++) {
606 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
607 snprintf(name_str, sizeof(name_str), "%d", i);
609 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
610 CTLFLAG_RD, NULL, name_str);
611 node_children = SYSCTL_CHILDREN(ctx_oid);
615 SYSCTL_ADD_QUAD(ctx, node_children,
616 OID_AUTO, "total_bytes",
617 CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
620 SYSCTL_ADD_QUAD(ctx, node_children,
621 OID_AUTO, "total_pkts",
622 CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
625 SYSCTL_ADD_QUAD(ctx, node_children,
627 CTLFLAG_RD, &ha->hw.xmt[i].errors,
630 SYSCTL_ADD_QUAD(ctx, node_children,
631 OID_AUTO, "pkts_dropped",
632 CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
635 SYSCTL_ADD_QUAD(ctx, node_children,
636 OID_AUTO, "switch_pkts",
637 CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
640 SYSCTL_ADD_QUAD(ctx, node_children,
641 OID_AUTO, "num_buffers",
642 CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
650 qlnx_add_hw_stats_sysctls(qla_host_t *ha)
652 qlnx_add_hw_mac_stats_sysctls(ha);
653 qlnx_add_hw_rcv_stats_sysctls(ha);
654 qlnx_add_hw_xmt_stats_sysctls(ha);
660 qlnx_add_drvr_sds_stats(qla_host_t *ha)
662 struct sysctl_ctx_list *ctx;
663 struct sysctl_oid_list *children;
664 struct sysctl_oid_list *node_children;
665 struct sysctl_oid *ctx_oid;
667 uint8_t name_str[16];
669 ctx = device_get_sysctl_ctx(ha->pci_dev);
670 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
672 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
673 CTLFLAG_RD, NULL, "stats_drvr_sds");
674 children = SYSCTL_CHILDREN(ctx_oid);
676 for (i = 0; i < ha->hw.num_sds_rings; i++) {
678 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
679 snprintf(name_str, sizeof(name_str), "%d", i);
681 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
682 CTLFLAG_RD, NULL, name_str);
683 node_children = SYSCTL_CHILDREN(ctx_oid);
685 SYSCTL_ADD_QUAD(ctx, node_children,
686 OID_AUTO, "intr_count",
687 CTLFLAG_RD, &ha->hw.sds[i].intr_count,
690 SYSCTL_ADD_UINT(ctx, node_children,
692 CTLFLAG_RD, &ha->hw.sds[i].rx_free,
693 ha->hw.sds[i].rx_free, "rx_free");
699 qlnx_add_drvr_rds_stats(qla_host_t *ha)
701 struct sysctl_ctx_list *ctx;
702 struct sysctl_oid_list *children;
703 struct sysctl_oid_list *node_children;
704 struct sysctl_oid *ctx_oid;
706 uint8_t name_str[16];
708 ctx = device_get_sysctl_ctx(ha->pci_dev);
709 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
711 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
712 CTLFLAG_RD, NULL, "stats_drvr_rds");
713 children = SYSCTL_CHILDREN(ctx_oid);
715 for (i = 0; i < ha->hw.num_rds_rings; i++) {
717 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
718 snprintf(name_str, sizeof(name_str), "%d", i);
720 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
721 CTLFLAG_RD, NULL, name_str);
722 node_children = SYSCTL_CHILDREN(ctx_oid);
724 SYSCTL_ADD_QUAD(ctx, node_children,
726 CTLFLAG_RD, &ha->hw.rds[i].count,
729 SYSCTL_ADD_QUAD(ctx, node_children,
730 OID_AUTO, "lro_pkt_count",
731 CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
734 SYSCTL_ADD_QUAD(ctx, node_children,
735 OID_AUTO, "lro_bytes",
736 CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
744 qlnx_add_drvr_tx_stats(qla_host_t *ha)
746 struct sysctl_ctx_list *ctx;
747 struct sysctl_oid_list *children;
748 struct sysctl_oid_list *node_children;
749 struct sysctl_oid *ctx_oid;
751 uint8_t name_str[16];
753 ctx = device_get_sysctl_ctx(ha->pci_dev);
754 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
756 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
757 CTLFLAG_RD, NULL, "stats_drvr_xmt");
758 children = SYSCTL_CHILDREN(ctx_oid);
760 for (i = 0; i < ha->hw.num_tx_rings; i++) {
762 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
763 snprintf(name_str, sizeof(name_str), "%d", i);
765 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
766 CTLFLAG_RD, NULL, name_str);
767 node_children = SYSCTL_CHILDREN(ctx_oid);
769 SYSCTL_ADD_QUAD(ctx, node_children,
771 CTLFLAG_RD, &ha->tx_ring[i].count,
774 #ifdef QL_ENABLE_ISCSI_TLV
775 SYSCTL_ADD_QUAD(ctx, node_children,
776 OID_AUTO, "iscsi_pkt_count",
777 CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
779 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
786 qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
788 qlnx_add_drvr_sds_stats(ha);
789 qlnx_add_drvr_rds_stats(ha);
790 qlnx_add_drvr_tx_stats(ha);
795 * Name: ql_hw_add_sysctls
796 * Function: Add P3Plus specific sysctls
799 ql_hw_add_sysctls(qla_host_t *ha)
805 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
806 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
807 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
808 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
810 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
811 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
812 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
813 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
815 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
816 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
817 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
818 ha->hw.num_tx_rings, "Number of Transmit Rings");
820 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
821 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
823 ha->txr_idx, "Tx Ring Used");
825 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
826 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
828 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
830 ha->hw.sds_cidx_thres = 32;
831 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
832 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
833 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
834 ha->hw.sds_cidx_thres,
835 "Number of SDS entries to process before updating"
836 " SDS Ring Consumer Index");
838 ha->hw.rds_pidx_thres = 32;
839 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
840 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
841 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
842 ha->hw.rds_pidx_thres,
843 "Number of Rcv Rings Entries to post before updating"
844 " RDS Ring Producer Index");
846 ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
847 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
848 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
849 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
850 &ha->hw.rcv_intr_coalesce,
851 ha->hw.rcv_intr_coalesce,
852 "Rcv Intr Coalescing Parameters\n"
853 "\tbits 15:0 max packets\n"
854 "\tbits 31:16 max micro-seconds to wait\n"
856 "\tifconfig <if> down && ifconfig <if> up\n"
857 "\tto take effect \n");
859 ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
860 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
861 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
862 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
863 &ha->hw.xmt_intr_coalesce,
864 ha->hw.xmt_intr_coalesce,
865 "Xmt Intr Coalescing Parameters\n"
866 "\tbits 15:0 max packets\n"
867 "\tbits 31:16 max micro-seconds to wait\n"
869 "\tifconfig <if> down && ifconfig <if> up\n"
870 "\tto take effect \n");
872 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
873 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
874 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
876 qla_sysctl_port_cfg, "I",
877 "Set Port Configuration if values below "
878 "otherwise Get Port Configuration\n"
879 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
880 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
881 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
882 " 1 = xmt only; 2 = rcv only;\n"
885 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
886 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
887 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
889 qla_sysctl_set_cam_search_mode, "I",
890 "Set CAM Search Mode"
891 "\t 1 = search mode internal\n"
892 "\t 2 = search mode auto\n");
894 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
895 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
896 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
898 qla_sysctl_get_cam_search_mode, "I",
899 "Get CAM Search Mode"
900 "\t 1 = search mode internal\n"
901 "\t 2 = search mode auto\n");
903 ha->hw.enable_9kb = 1;
905 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
906 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
907 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
908 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
910 ha->hw.enable_hw_lro = 1;
912 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
915 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
916 "\t 1 : Hardware LRO if LRO is enabled\n"
917 "\t 0 : Software LRO if LRO is enabled\n"
918 "\t Any change requires ifconfig down/up to take effect\n"
919 "\t Note that LRO may be turned off/on via ifconfig\n");
921 ha->hw.mdump_active = 0;
922 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
926 "Minidump retrieval is Active");
928 ha->hw.mdump_done = 0;
929 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931 OID_AUTO, "mdump_done", CTLFLAG_RW,
932 &ha->hw.mdump_done, ha->hw.mdump_done,
933 "Minidump has been done and available for retrieval");
935 ha->hw.mdump_capture_mask = 0xF;
936 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
937 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
938 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
939 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
940 "Minidump capture mask");
944 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
945 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
946 OID_AUTO, "err_inject",
947 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
948 "Error to be injected\n"
949 "\t\t\t 0: No Errors\n"
950 "\t\t\t 1: rcv: rxb struct invalid\n"
951 "\t\t\t 2: rcv: mp == NULL\n"
952 "\t\t\t 3: lro: rxb struct invalid\n"
953 "\t\t\t 4: lro: mp == NULL\n"
954 "\t\t\t 5: rcv: num handles invalid\n"
955 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
956 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
957 "\t\t\t 8: mbx: mailbox command failure\n"
958 "\t\t\t 9: heartbeat failure\n"
959 "\t\t\t A: temperature failure\n"
960 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
962 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
963 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
964 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
966 qla_sysctl_stop_pegs, "I", "Peg Stop");
968 #endif /* #ifdef QL_DBG */
970 ha->hw.user_pri_nic = 0;
971 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
972 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
973 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
975 "VLAN Tag User Priority for Normal Ethernet Packets");
977 ha->hw.user_pri_iscsi = 4;
978 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
979 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
980 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
981 ha->hw.user_pri_iscsi,
982 "VLAN Tag User Priority for iSCSI Packets");
984 qlnx_add_hw_stats_sysctls(ha);
985 qlnx_add_drvr_stats_sysctls(ha);
991 ql_hw_link_status(qla_host_t *ha)
993 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
995 if (ha->hw.link_up) {
996 device_printf(ha->pci_dev, "link Up\n");
998 device_printf(ha->pci_dev, "link Down\n");
1001 if (ha->hw.flags.fduplex) {
1002 device_printf(ha->pci_dev, "Full Duplex\n");
1004 device_printf(ha->pci_dev, "Half Duplex\n");
1007 if (ha->hw.flags.autoneg) {
1008 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1010 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1013 switch (ha->hw.link_speed) {
1015 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1019 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1023 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1027 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1031 switch (ha->hw.module_type) {
1034 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1038 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1042 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1046 device_printf(ha->pci_dev,
1047 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1048 ha->hw.cable_length);
1052 device_printf(ha->pci_dev, "Module Type 10GE Active"
1053 " Limiting Copper(Compliant)[%d m]\n",
1054 ha->hw.cable_length);
1058 device_printf(ha->pci_dev,
1059 "Module Type 10GE Passive Copper"
1060 " (Legacy, Best Effort)[%d m]\n",
1061 ha->hw.cable_length);
1065 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1069 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1073 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1077 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1081 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1082 "(Legacy, Best Effort)\n");
1086 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1087 ha->hw.module_type);
1091 if (ha->hw.link_faults == 1)
1092 device_printf(ha->pci_dev, "SFP Power Fault\n");
1097 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1100 ql_free_dma(qla_host_t *ha)
1104 if (ha->hw.dma_buf.flags.sds_ring) {
1105 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1106 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1108 ha->hw.dma_buf.flags.sds_ring = 0;
1111 if (ha->hw.dma_buf.flags.rds_ring) {
1112 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1113 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1115 ha->hw.dma_buf.flags.rds_ring = 0;
1118 if (ha->hw.dma_buf.flags.tx_ring) {
1119 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1120 ha->hw.dma_buf.flags.tx_ring = 0;
1122 ql_minidump_free(ha);
1126 * Name: ql_alloc_dma
1127 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1130 ql_alloc_dma(qla_host_t *ha)
1133 uint32_t i, j, size, tx_ring_size;
1135 qla_hw_tx_cntxt_t *tx_cntxt;
1141 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1145 * Allocate Transmit Ring
1147 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1148 size = (tx_ring_size * ha->hw.num_tx_rings);
1150 hw->dma_buf.tx_ring.alignment = 8;
1151 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1153 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1154 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1155 goto ql_alloc_dma_exit;
1158 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1159 paddr = hw->dma_buf.tx_ring.dma_addr;
1161 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1162 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1164 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1165 tx_cntxt->tx_ring_paddr = paddr;
1167 vaddr += tx_ring_size;
1168 paddr += tx_ring_size;
1171 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1172 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1174 tx_cntxt->tx_cons = (uint32_t *)vaddr;
1175 tx_cntxt->tx_cons_paddr = paddr;
1177 vaddr += sizeof (uint32_t);
1178 paddr += sizeof (uint32_t);
1181 ha->hw.dma_buf.flags.tx_ring = 1;
1183 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1184 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1185 hw->dma_buf.tx_ring.dma_b));
1187 * Allocate Receive Descriptor Rings
1190 for (i = 0; i < hw->num_rds_rings; i++) {
1192 hw->dma_buf.rds_ring[i].alignment = 8;
1193 hw->dma_buf.rds_ring[i].size =
1194 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1196 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1197 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1200 for (j = 0; j < i; j++)
1201 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1203 goto ql_alloc_dma_exit;
1205 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1206 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1207 hw->dma_buf.rds_ring[i].dma_b));
1210 hw->dma_buf.flags.rds_ring = 1;
1213 * Allocate Status Descriptor Rings
1216 for (i = 0; i < hw->num_sds_rings; i++) {
1217 hw->dma_buf.sds_ring[i].alignment = 8;
1218 hw->dma_buf.sds_ring[i].size =
1219 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1221 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1222 device_printf(dev, "%s: sds ring alloc failed\n",
1225 for (j = 0; j < i; j++)
1226 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1228 goto ql_alloc_dma_exit;
1230 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1232 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
1233 hw->dma_buf.sds_ring[i].dma_b));
1235 for (i = 0; i < hw->num_sds_rings; i++) {
1236 hw->sds[i].sds_ring_base =
1237 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1240 hw->dma_buf.flags.sds_ring = 1;
1249 #define Q8_MBX_MSEC_DELAY 5000
1252 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1253 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1259 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1261 ha->qla_initiate_recovery = 1;
1262 goto exit_qla_mbx_cmd;
1268 i = Q8_MBX_MSEC_DELAY;
1271 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1277 qla_mdelay(__func__, 1);
1283 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1286 ha->qla_initiate_recovery = 1;
1287 goto exit_qla_mbx_cmd;
1290 for (i = 0; i < n_hmbox; i++) {
1291 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1295 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1298 i = Q8_MBX_MSEC_DELAY;
1300 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1302 if ((data & 0x3) == 1) {
1303 data = READ_REG32(ha, Q8_FW_MBOX0);
1304 if ((data & 0xF000) != 0x8000)
1310 qla_mdelay(__func__, 1);
1315 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1318 ha->qla_initiate_recovery = 1;
1319 goto exit_qla_mbx_cmd;
1322 for (i = 0; i < n_fwmbox; i++) {
1323 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1326 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1327 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1334 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1337 uint32_t *mbox, err;
1338 device_t dev = ha->pci_dev;
1340 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1344 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1346 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1347 device_printf(dev, "%s: failed0\n", __func__);
1350 err = mbox[0] >> 25;
1352 if (supports_9kb != NULL) {
1353 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1359 if (num_rcvq != NULL)
1360 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF);
1362 if ((err != 1) && (err != 0)) {
1363 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1370 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1374 device_t dev = ha->pci_dev;
1375 q80_config_intr_t *c_intr;
1376 q80_config_intr_rsp_t *c_intr_rsp;
1378 c_intr = (q80_config_intr_t *)ha->hw.mbox;
1379 bzero(c_intr, (sizeof (q80_config_intr_t)));
1381 c_intr->opcode = Q8_MBX_CONFIG_INTR;
1383 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1384 c_intr->count_version |= Q8_MBX_CMD_VERSION;
1386 c_intr->nentries = num_intrs;
1388 for (i = 0; i < num_intrs; i++) {
1390 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1391 c_intr->intr[i].msix_index = start_idx + 1 + i;
1393 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1394 c_intr->intr[i].msix_index =
1395 ha->hw.intr_id[(start_idx + i)];
1398 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1401 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1402 (sizeof (q80_config_intr_t) >> 2),
1403 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1404 device_printf(dev, "%s: failed0\n", __func__);
1408 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1410 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1413 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1414 c_intr_rsp->nentries);
1416 for (i = 0; i < c_intr_rsp->nentries; i++) {
1417 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1419 c_intr_rsp->intr[i].status,
1420 c_intr_rsp->intr[i].intr_id,
1421 c_intr_rsp->intr[i].intr_src);
1427 for (i = 0; ((i < num_intrs) && create); i++) {
1428 if (!c_intr_rsp->intr[i].status) {
1429 ha->hw.intr_id[(start_idx + i)] =
1430 c_intr_rsp->intr[i].intr_id;
1431 ha->hw.intr_src[(start_idx + i)] =
1432 c_intr_rsp->intr[i].intr_src;
1440 * Name: qla_config_rss
1441 * Function: Configure RSS for the context/interface.
1443 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1444 0x8030f20c77cb2da3ULL,
1445 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1446 0x255b0ec26d5a56daULL };
1449 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1451 q80_config_rss_t *c_rss;
1452 q80_config_rss_rsp_t *c_rss_rsp;
1454 device_t dev = ha->pci_dev;
1456 c_rss = (q80_config_rss_t *)ha->hw.mbox;
1457 bzero(c_rss, (sizeof (q80_config_rss_t)));
1459 c_rss->opcode = Q8_MBX_CONFIG_RSS;
1461 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1462 c_rss->count_version |= Q8_MBX_CMD_VERSION;
1464 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1465 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1466 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1467 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1469 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1470 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1472 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1474 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1475 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1477 c_rss->cntxt_id = cntxt_id;
1479 for (i = 0; i < 5; i++) {
1480 c_rss->rss_key[i] = rss_key[i];
1483 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1484 (sizeof (q80_config_rss_t) >> 2),
1485 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1486 device_printf(dev, "%s: failed0\n", __func__);
1489 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1491 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1494 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1501 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1502 uint16_t cntxt_id, uint8_t *ind_table)
1504 q80_config_rss_ind_table_t *c_rss_ind;
1505 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
1507 device_t dev = ha->pci_dev;
1509 if ((count > Q8_RSS_IND_TBL_SIZE) ||
1510 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1511 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1516 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1517 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1519 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1520 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1521 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1523 c_rss_ind->start_idx = start_idx;
1524 c_rss_ind->end_idx = start_idx + count - 1;
1525 c_rss_ind->cntxt_id = cntxt_id;
1526 bcopy(ind_table, c_rss_ind->ind_table, count);
1528 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1529 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1530 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1531 device_printf(dev, "%s: failed0\n", __func__);
1535 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1536 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1539 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1546 * Name: qla_config_intr_coalesce
1547 * Function: Configure Interrupt Coalescing.
1550 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1553 q80_config_intr_coalesc_t *intrc;
1554 q80_config_intr_coalesc_rsp_t *intrc_rsp;
1556 device_t dev = ha->pci_dev;
1558 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1559 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1561 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1562 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1563 intrc->count_version |= Q8_MBX_CMD_VERSION;
1566 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1567 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1568 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1570 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1571 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1572 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1575 intrc->cntxt_id = cntxt_id;
1578 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1579 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1581 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1582 intrc->sds_ring_mask |= (1 << i);
1584 intrc->ms_timeout = 1000;
1587 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1588 (sizeof (q80_config_intr_coalesc_t) >> 2),
1589 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1590 device_printf(dev, "%s: failed0\n", __func__);
1593 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1595 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1598 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1607 * Name: qla_config_mac_addr
1608 * Function: binds a MAC address to the context/interface.
1609 * Can be unicast, multicast or broadcast.
1612 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1615 q80_config_mac_addr_t *cmac;
1616 q80_config_mac_addr_rsp_t *cmac_rsp;
1618 device_t dev = ha->pci_dev;
1620 uint8_t *mac_cpy = mac_addr;
1622 if (num_mac > Q8_MAX_MAC_ADDRS) {
1623 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1624 __func__, (add_mac ? "Add" : "Del"), num_mac);
1628 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1629 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1631 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1632 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1633 cmac->count_version |= Q8_MBX_CMD_VERSION;
1636 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1638 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1640 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1642 cmac->nmac_entries = num_mac;
1643 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1645 for (i = 0; i < num_mac; i++) {
1646 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1647 mac_addr = mac_addr + ETHER_ADDR_LEN;
1650 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1651 (sizeof (q80_config_mac_addr_t) >> 2),
1652 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1653 device_printf(dev, "%s: %s failed0\n", __func__,
1654 (add_mac ? "Add" : "Del"));
1657 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1659 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1662 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1663 (add_mac ? "Add" : "Del"), err);
1664 for (i = 0; i < num_mac; i++) {
1665 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1666 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1667 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1668 mac_cpy += ETHER_ADDR_LEN;
1678 * Name: qla_set_mac_rcv_mode
1679 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1682 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1684 q80_config_mac_rcv_mode_t *rcv_mode;
1686 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
1687 device_t dev = ha->pci_dev;
1689 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1690 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1692 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1693 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1694 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1696 rcv_mode->mode = mode;
1698 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1700 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1701 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1702 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1703 device_printf(dev, "%s: failed0\n", __func__);
1706 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1708 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1711 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1719 ql_set_promisc(qla_host_t *ha)
1723 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1724 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1729 qla_reset_promisc(qla_host_t *ha)
1731 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1732 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1736 ql_set_allmulti(qla_host_t *ha)
1740 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1741 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1746 qla_reset_allmulti(qla_host_t *ha)
1748 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1749 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1753 * Name: ql_set_max_mtu
1755 * Sets the maximum transfer unit size for the specified rcv context.
1758 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1761 q80_set_max_mtu_t *max_mtu;
1762 q80_set_max_mtu_rsp_t *max_mtu_rsp;
1767 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1768 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1770 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1771 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1772 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1774 max_mtu->cntxt_id = cntxt_id;
1777 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1778 (sizeof (q80_set_max_mtu_t) >> 2),
1779 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1780 device_printf(dev, "%s: failed\n", __func__);
1784 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1786 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1789 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1796 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1799 q80_link_event_t *lnk;
1800 q80_link_event_rsp_t *lnk_rsp;
1805 lnk = (q80_link_event_t *)ha->hw.mbox;
1806 bzero(lnk, (sizeof (q80_link_event_t)));
1808 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1809 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1810 lnk->count_version |= Q8_MBX_CMD_VERSION;
1812 lnk->cntxt_id = cntxt_id;
1813 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1815 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1816 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1817 device_printf(dev, "%s: failed\n", __func__);
1821 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1823 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1826 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1833 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1836 q80_config_fw_lro_t *fw_lro;
1837 q80_config_fw_lro_rsp_t *fw_lro_rsp;
1842 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1843 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1845 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1846 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1847 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1849 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1850 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1852 fw_lro->cntxt_id = cntxt_id;
1854 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1855 (sizeof (q80_config_fw_lro_t) >> 2),
1856 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1857 device_printf(dev, "%s: failed\n", __func__);
1861 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1863 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1866 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1873 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1876 q80_hw_config_t *hw_config;
1877 q80_hw_config_rsp_t *hw_config_rsp;
1882 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1883 bzero(hw_config, sizeof (q80_hw_config_t));
1885 hw_config->opcode = Q8_MBX_HW_CONFIG;
1886 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1887 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1889 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1891 hw_config->u.set_cam_search_mode.mode = search_mode;
1893 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1894 (sizeof (q80_hw_config_t) >> 2),
1895 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1896 device_printf(dev, "%s: failed\n", __func__);
1899 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1901 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1904 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1911 qla_get_cam_search_mode(qla_host_t *ha)
1914 q80_hw_config_t *hw_config;
1915 q80_hw_config_rsp_t *hw_config_rsp;
1920 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1921 bzero(hw_config, sizeof (q80_hw_config_t));
1923 hw_config->opcode = Q8_MBX_HW_CONFIG;
1924 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1925 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1927 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1929 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1930 (sizeof (q80_hw_config_t) >> 2),
1931 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1932 device_printf(dev, "%s: failed\n", __func__);
1935 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1937 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1940 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1942 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1943 hw_config_rsp->u.get_cam_search_mode.mode);
1950 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1953 q80_get_stats_t *stat;
1954 q80_get_stats_rsp_t *stat_rsp;
1959 stat = (q80_get_stats_t *)ha->hw.mbox;
1960 bzero(stat, (sizeof (q80_get_stats_t)));
1962 stat->opcode = Q8_MBX_GET_STATS;
1963 stat->count_version = 2;
1964 stat->count_version |= Q8_MBX_CMD_VERSION;
1968 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1969 ha->hw.mbox, (rsp_size >> 2), 0)) {
1970 device_printf(dev, "%s: failed\n", __func__);
1974 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1976 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1986 ql_get_stats(qla_host_t *ha)
1988 q80_get_stats_rsp_t *stat_rsp;
1989 q80_mac_stats_t *mstat;
1990 q80_xmt_stats_t *xstat;
1991 q80_rcv_stats_t *rstat;
1994 struct ifnet *ifp = ha->ifp;
1999 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2000 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2004 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2005 QLA_UNLOCK(ha, __func__);
2009 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2011 * Get MAC Statistics
2013 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2014 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2016 cmd |= ((ha->pci_func & 0x1) << 16);
2018 if (ha->qla_watchdog_pause)
2019 goto ql_get_stats_exit;
2021 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2022 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2023 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2025 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2026 __func__, ha->hw.mbox[0]);
2029 * Get RCV Statistics
2031 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2032 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2033 cmd |= (ha->hw.rcv_cntxt_id << 16);
2035 if (ha->qla_watchdog_pause)
2036 goto ql_get_stats_exit;
2038 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2039 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2040 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2042 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2043 __func__, ha->hw.mbox[0]);
2046 if (ha->qla_watchdog_pause)
2047 goto ql_get_stats_exit;
2049 * Get XMT Statistics
2051 for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2053 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2054 // cmd |= Q8_GET_STATS_CMD_CLEAR;
2055 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2057 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2059 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2060 bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2062 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2063 __func__, ha->hw.mbox[0]);
2068 QLA_UNLOCK(ha, __func__);
2075 * Function: Checks if the packet to be transmitted is a candidate for
2076 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2077 * Ring Structure are plugged in.
2080 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2082 struct ether_vlan_header *eh;
2083 struct ip *ip = NULL;
2084 struct ip6_hdr *ip6 = NULL;
2085 struct tcphdr *th = NULL;
2086 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2087 uint16_t etype, opcode, offload = 1;
2093 eh = mtod(mp, struct ether_vlan_header *);
2095 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2096 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2097 etype = ntohs(eh->evl_proto);
2099 ehdrlen = ETHER_HDR_LEN;
2100 etype = ntohs(eh->evl_encap_proto);
2108 tcp_opt_off = ehdrlen + sizeof(struct ip) +
2109 sizeof(struct tcphdr);
2111 if (mp->m_len < tcp_opt_off) {
2112 m_copydata(mp, 0, tcp_opt_off, hdr);
2113 ip = (struct ip *)(hdr + ehdrlen);
2115 ip = (struct ip *)(mp->m_data + ehdrlen);
2118 ip_hlen = ip->ip_hl << 2;
2119 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2122 if ((ip->ip_p != IPPROTO_TCP) ||
2123 (ip_hlen != sizeof (struct ip))){
2124 /* IP Options are not supported */
2128 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2132 case ETHERTYPE_IPV6:
2134 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2135 sizeof (struct tcphdr);
2137 if (mp->m_len < tcp_opt_off) {
2138 m_copydata(mp, 0, tcp_opt_off, hdr);
2139 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2141 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2144 ip_hlen = sizeof(struct ip6_hdr);
2145 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2147 if (ip6->ip6_nxt != IPPROTO_TCP) {
2148 //device_printf(dev, "%s: ipv6\n", __func__);
2151 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2155 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2163 tcp_hlen = th->th_off << 2;
2164 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2166 if (mp->m_len < hdrlen) {
2167 if (mp->m_len < tcp_opt_off) {
2168 if (tcp_hlen > sizeof(struct tcphdr)) {
2169 m_copydata(mp, tcp_opt_off,
2170 (tcp_hlen - sizeof(struct tcphdr)),
2174 m_copydata(mp, 0, hdrlen, hdr);
2178 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2180 tx_cmd->flags_opcode = opcode ;
2181 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2182 tx_cmd->total_hdr_len = hdrlen;
2184 /* Check for Multicast least significant bit of MSB == 1 */
2185 if (eh->evl_dhost[0] & 0x01) {
2186 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2189 if (mp->m_len < hdrlen) {
2190 printf("%d\n", hdrlen);
2198 * Name: qla_tx_chksum
2199 * Function: Checks if the packet to be transmitted is a candidate for
2200 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2201 * Ring Structure are plugged in.
2204 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2205 uint32_t *tcp_hdr_off)
2207 struct ether_vlan_header *eh;
2209 struct ip6_hdr *ip6;
2210 uint32_t ehdrlen, ip_hlen;
2211 uint16_t etype, opcode, offload = 1;
2213 uint8_t buf[sizeof(struct ip6_hdr)];
2219 if ((mp->m_pkthdr.csum_flags &
2220 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2223 eh = mtod(mp, struct ether_vlan_header *);
2225 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2226 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227 etype = ntohs(eh->evl_proto);
2229 ehdrlen = ETHER_HDR_LEN;
2230 etype = ntohs(eh->evl_encap_proto);
2236 ip = (struct ip *)(mp->m_data + ehdrlen);
2238 ip_hlen = sizeof (struct ip);
2240 if (mp->m_len < (ehdrlen + ip_hlen)) {
2241 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2242 ip = (struct ip *)buf;
2245 if (ip->ip_p == IPPROTO_TCP)
2246 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2247 else if (ip->ip_p == IPPROTO_UDP)
2248 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2250 //device_printf(dev, "%s: ipv4\n", __func__);
2255 case ETHERTYPE_IPV6:
2256 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2258 ip_hlen = sizeof(struct ip6_hdr);
2260 if (mp->m_len < (ehdrlen + ip_hlen)) {
2261 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2263 ip6 = (struct ip6_hdr *)buf;
2266 if (ip6->ip6_nxt == IPPROTO_TCP)
2267 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2268 else if (ip6->ip6_nxt == IPPROTO_UDP)
2269 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2271 //device_printf(dev, "%s: ipv6\n", __func__);
2284 *tcp_hdr_off = (ip_hlen + ehdrlen);
2289 #define QLA_TX_MIN_FREE 2
2292 * Function: Transmits a packet. It first checks if the packet is a
2293 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2294 * offload. If either of these creteria are not met, it is transmitted
2295 * as a regular ethernet frame.
2298 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2299 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2301 struct ether_vlan_header *eh;
2302 qla_hw_t *hw = &ha->hw;
2303 q80_tx_cmd_t *tx_cmd, tso_cmd;
2304 bus_dma_segment_t *c_seg;
2305 uint32_t num_tx_cmds, hdr_len = 0;
2306 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2309 uint8_t *src = NULL, *dst = NULL;
2310 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2311 uint32_t op_code = 0;
2312 uint32_t tcp_hdr_off = 0;
2317 * Always make sure there is atleast one empty slot in the tx_ring
2318 * tx_ring is considered full when there only one entry available
2320 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2322 total_length = mp->m_pkthdr.len;
2323 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2324 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2325 __func__, total_length);
2328 eh = mtod(mp, struct ether_vlan_header *);
2330 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2332 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2335 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2338 /* find the additional tx_cmd descriptors required */
2340 if (mp->m_flags & M_VLANTAG)
2341 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2343 hdr_len = tso_cmd.total_hdr_len;
2345 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2346 bytes = QL_MIN(bytes, hdr_len);
2352 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2356 hdr_len = tso_cmd.total_hdr_len;
2359 src = (uint8_t *)eh;
2363 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2366 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2367 ql_hw_tx_done_locked(ha, txr_idx);
2368 if (hw->tx_cntxt[txr_idx].txr_free <=
2369 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2370 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2371 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2377 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2379 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2381 if (nsegs > ha->hw.max_tx_segs)
2382 ha->hw.max_tx_segs = nsegs;
2384 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2387 tx_cmd->flags_opcode = op_code;
2388 tx_cmd->tcp_hdr_off = tcp_hdr_off;
2391 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2394 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2395 ha->tx_tso_frames++;
2398 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2399 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2402 eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2404 } else if (mp->m_flags & M_VLANTAG) {
2406 if (hdr_len) { /* TSO */
2407 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2408 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2409 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2411 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2413 ha->hw_vlan_tx_frames++;
2414 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2417 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2418 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2423 tx_cmd->n_bufs = (uint8_t)nsegs;
2424 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2425 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2426 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2431 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2435 tx_cmd->buf1_addr = c_seg->ds_addr;
2436 tx_cmd->buf1_len = c_seg->ds_len;
2440 tx_cmd->buf2_addr = c_seg->ds_addr;
2441 tx_cmd->buf2_len = c_seg->ds_len;
2445 tx_cmd->buf3_addr = c_seg->ds_addr;
2446 tx_cmd->buf3_len = c_seg->ds_len;
2450 tx_cmd->buf4_addr = c_seg->ds_addr;
2451 tx_cmd->buf4_len = c_seg->ds_len;
2459 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2460 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2461 (NUM_TX_DESCRIPTORS - 1);
2467 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2468 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2471 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2473 /* TSO : Copy the header in the following tx cmd descriptors */
2475 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2477 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2478 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2480 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2481 bytes = QL_MIN(bytes, hdr_len);
2483 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2485 if (mp->m_flags & M_VLANTAG) {
2486 /* first copy the src/dst MAC addresses */
2487 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2488 dst += (ETHER_ADDR_LEN * 2);
2489 src += (ETHER_ADDR_LEN * 2);
2491 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2493 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2496 /* bytes left in src header */
2497 hdr_len -= ((ETHER_ADDR_LEN * 2) +
2498 ETHER_VLAN_ENCAP_LEN);
2500 /* bytes left in TxCmd Entry */
2501 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2504 bcopy(src, dst, bytes);
2508 bcopy(src, dst, bytes);
2513 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2514 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2515 (NUM_TX_DESCRIPTORS - 1);
2519 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2520 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2522 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2524 bcopy(src, tx_cmd, bytes);
2528 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2529 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2530 (NUM_TX_DESCRIPTORS - 1);
2535 hw->tx_cntxt[txr_idx].txr_free =
2536 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2538 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2540 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2547 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2549 qla_config_rss_ind_table(qla_host_t *ha)
2552 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2555 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2556 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2559 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2560 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2562 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2563 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2565 count = Q8_CONFIG_IND_TBL_SIZE;
2568 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2577 qla_config_soft_lro(qla_host_t *ha)
2580 qla_hw_t *hw = &ha->hw;
2581 struct lro_ctrl *lro;
2583 for (i = 0; i < hw->num_sds_rings; i++) {
2584 lro = &hw->sds[i].lro;
2586 bzero(lro, sizeof(struct lro_ctrl));
2588 #if (__FreeBSD_version >= 1100101)
2589 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2590 device_printf(ha->pci_dev,
2591 "%s: tcp_lro_init_args [%d] failed\n",
2596 if (tcp_lro_init(lro)) {
2597 device_printf(ha->pci_dev,
2598 "%s: tcp_lro_init [%d] failed\n",
2602 #endif /* #if (__FreeBSD_version >= 1100101) */
2607 QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2612 qla_drain_soft_lro(qla_host_t *ha)
2615 qla_hw_t *hw = &ha->hw;
2616 struct lro_ctrl *lro;
2618 for (i = 0; i < hw->num_sds_rings; i++) {
2619 lro = &hw->sds[i].lro;
2621 #if (__FreeBSD_version >= 1100101)
2622 tcp_lro_flush_all(lro);
2624 struct lro_entry *queued;
2626 while ((!SLIST_EMPTY(&lro->lro_active))) {
2627 queued = SLIST_FIRST(&lro->lro_active);
2628 SLIST_REMOVE_HEAD(&lro->lro_active, next);
2629 tcp_lro_flush(lro, queued);
2631 #endif /* #if (__FreeBSD_version >= 1100101) */
2638 qla_free_soft_lro(qla_host_t *ha)
2641 qla_hw_t *hw = &ha->hw;
2642 struct lro_ctrl *lro;
2644 for (i = 0; i < hw->num_sds_rings; i++) {
2645 lro = &hw->sds[i].lro;
2654 * Name: ql_del_hw_if
2655 * Function: Destroys the hardware specific entities corresponding to an
2656 * Ethernet Interface
2659 ql_del_hw_if(qla_host_t *ha)
2664 (void)qla_stop_nic_func(ha);
2666 qla_del_rcv_cntxt(ha);
2668 qla_del_xmt_cntxt(ha);
2670 if (ha->hw.flags.init_intr_cnxt) {
2671 for (i = 0; i < ha->hw.num_sds_rings; ) {
2673 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2674 num_msix = Q8_MAX_INTR_VECTORS;
2676 num_msix = ha->hw.num_sds_rings - i;
2677 qla_config_intr_cntxt(ha, i, num_msix, 0);
2682 ha->hw.flags.init_intr_cnxt = 0;
2685 if (ha->hw.enable_soft_lro) {
2686 qla_drain_soft_lro(ha);
2687 qla_free_soft_lro(ha);
2694 qla_confirm_9kb_enable(qla_host_t *ha)
2696 uint32_t supports_9kb = 0;
2698 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2700 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2701 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2702 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2704 qla_get_nic_partition(ha, &supports_9kb, NULL);
2707 ha->hw.enable_9kb = 0;
2713 * Name: ql_init_hw_if
2714 * Function: Creates the hardware specific entities corresponding to an
2715 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2716 * corresponding to the interface. Enables LRO if allowed.
2719 ql_init_hw_if(qla_host_t *ha)
2723 uint8_t bcast_mac[6];
2729 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2730 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2731 ha->hw.dma_buf.sds_ring[i].size);
2734 for (i = 0; i < ha->hw.num_sds_rings; ) {
2736 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2737 num_msix = Q8_MAX_INTR_VECTORS;
2739 num_msix = ha->hw.num_sds_rings - i;
2741 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2747 for (i = 0; i < num_msix; ) {
2748 qla_config_intr_cntxt(ha, i,
2749 Q8_MAX_INTR_VECTORS, 0);
2750 i += Q8_MAX_INTR_VECTORS;
2759 ha->hw.flags.init_intr_cnxt = 1;
2762 * Create Receive Context
2764 if (qla_init_rcv_cntxt(ha)) {
2768 for (i = 0; i < ha->hw.num_rds_rings; i++) {
2769 rdesc = &ha->hw.rds[i];
2770 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2772 /* Update the RDS Producer Indices */
2773 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2778 * Create Transmit Context
2780 if (qla_init_xmt_cntxt(ha)) {
2781 qla_del_rcv_cntxt(ha);
2784 ha->hw.max_tx_segs = 0;
2786 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2789 ha->hw.flags.unicast_mac = 1;
2791 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2792 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2794 if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2797 ha->hw.flags.bcast_mac = 1;
2800 * program any cached multicast addresses
2802 if (qla_hw_add_all_mcast(ha))
2805 if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2808 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2811 if (qla_config_rss_ind_table(ha))
2814 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2817 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2820 if (ha->ifp->if_capenable & IFCAP_LRO) {
2821 if (ha->hw.enable_hw_lro) {
2822 ha->hw.enable_soft_lro = 0;
2824 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2827 ha->hw.enable_soft_lro = 1;
2829 if (qla_config_soft_lro(ha))
2834 if (qla_init_nic_func(ha))
2837 if (qla_query_fw_dcbx_caps(ha))
2840 for (i = 0; i < ha->hw.num_sds_rings; i++)
2841 QL_ENABLE_INTERRUPTS(ha, i);
2847 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2849 device_t dev = ha->pci_dev;
2850 q80_rq_map_sds_to_rds_t *map_rings;
2851 q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2853 qla_hw_t *hw = &ha->hw;
2855 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2856 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2858 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2859 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2860 map_rings->count_version |= Q8_MBX_CMD_VERSION;
2862 map_rings->cntxt_id = hw->rcv_cntxt_id;
2863 map_rings->num_rings = num_idx;
2865 for (i = 0; i < num_idx; i++) {
2866 map_rings->sds_rds[i].sds_ring = i + start_idx;
2867 map_rings->sds_rds[i].rds_ring = i + start_idx;
2870 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2871 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2872 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2873 device_printf(dev, "%s: failed0\n", __func__);
2877 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2879 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2882 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2890 * Name: qla_init_rcv_cntxt
2891 * Function: Creates the Receive Context.
2894 qla_init_rcv_cntxt(qla_host_t *ha)
2896 q80_rq_rcv_cntxt_t *rcntxt;
2897 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
2898 q80_stat_desc_t *sdesc;
2900 qla_hw_t *hw = &ha->hw;
2903 uint32_t rcntxt_sds_rings;
2904 uint32_t rcntxt_rds_rings;
2910 * Create Receive Context
2913 for (i = 0; i < hw->num_sds_rings; i++) {
2914 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2916 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2917 sdesc->data[0] = 1ULL;
2918 sdesc->data[1] = 1ULL;
2922 rcntxt_sds_rings = hw->num_sds_rings;
2923 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2924 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2926 rcntxt_rds_rings = hw->num_rds_rings;
2928 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2929 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2931 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2932 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2934 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2935 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2936 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2938 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2939 Q8_RCV_CNTXT_CAP0_LRO |
2940 Q8_RCV_CNTXT_CAP0_HW_LRO |
2941 Q8_RCV_CNTXT_CAP0_RSS |
2942 Q8_RCV_CNTXT_CAP0_SGL_LRO;
2944 if (ha->hw.enable_9kb)
2945 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2947 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2949 if (ha->hw.num_rds_rings > 1) {
2950 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2951 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2953 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2955 rcntxt->nsds_rings = rcntxt_sds_rings;
2957 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2959 rcntxt->rcv_vpid = 0;
2961 for (i = 0; i < rcntxt_sds_rings; i++) {
2962 rcntxt->sds[i].paddr =
2963 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2964 rcntxt->sds[i].size =
2965 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2966 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2967 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2970 for (i = 0; i < rcntxt_rds_rings; i++) {
2971 rcntxt->rds[i].paddr_std =
2972 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2974 if (ha->hw.enable_9kb)
2975 rcntxt->rds[i].std_bsize =
2976 qla_host_to_le64(MJUM9BYTES);
2978 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2980 rcntxt->rds[i].std_nentries =
2981 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2984 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2985 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2986 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2987 device_printf(dev, "%s: failed0\n", __func__);
2991 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2993 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2996 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3000 for (i = 0; i < rcntxt_sds_rings; i++) {
3001 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3004 for (i = 0; i < rcntxt_rds_rings; i++) {
3005 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3008 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3010 ha->hw.flags.init_rx_cnxt = 1;
3012 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3014 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3016 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3017 max_idx = MAX_RCNTXT_SDS_RINGS;
3019 max_idx = hw->num_sds_rings - i;
3021 err = qla_add_rcv_rings(ha, i, max_idx);
3029 if (hw->num_rds_rings > 1) {
3031 for (i = 0; i < hw->num_rds_rings; ) {
3033 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3034 max_idx = MAX_SDS_TO_RDS_MAP;
3036 max_idx = hw->num_rds_rings - i;
3038 err = qla_map_sds_to_rds(ha, i, max_idx);
3050 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3052 device_t dev = ha->pci_dev;
3053 q80_rq_add_rcv_rings_t *add_rcv;
3054 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
3056 qla_hw_t *hw = &ha->hw;
3058 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3059 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3061 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3062 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3063 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3065 add_rcv->nrds_sets_rings = nsds | (1 << 5);
3066 add_rcv->nsds_rings = nsds;
3067 add_rcv->cntxt_id = hw->rcv_cntxt_id;
3069 for (i = 0; i < nsds; i++) {
3073 add_rcv->sds[i].paddr =
3074 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3076 add_rcv->sds[i].size =
3077 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3079 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3080 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3084 for (i = 0; (i < nsds); i++) {
3087 add_rcv->rds[i].paddr_std =
3088 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3090 if (ha->hw.enable_9kb)
3091 add_rcv->rds[i].std_bsize =
3092 qla_host_to_le64(MJUM9BYTES);
3094 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3096 add_rcv->rds[i].std_nentries =
3097 qla_host_to_le32(NUM_RX_DESCRIPTORS);
3101 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3102 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
3103 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3104 device_printf(dev, "%s: failed0\n", __func__);
3108 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3110 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3113 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3117 for (i = 0; i < nsds; i++) {
3118 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3121 for (i = 0; i < nsds; i++) {
3122 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3129 * Name: qla_del_rcv_cntxt
3130 * Function: Destroys the Receive Context.
3133 qla_del_rcv_cntxt(qla_host_t *ha)
3135 device_t dev = ha->pci_dev;
3136 q80_rcv_cntxt_destroy_t *rcntxt;
3137 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
3139 uint8_t bcast_mac[6];
3141 if (!ha->hw.flags.init_rx_cnxt)
3144 if (qla_hw_del_all_mcast(ha))
3147 if (ha->hw.flags.bcast_mac) {
3149 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3150 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3152 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3154 ha->hw.flags.bcast_mac = 0;
3158 if (ha->hw.flags.unicast_mac) {
3159 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3161 ha->hw.flags.unicast_mac = 0;
3164 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3165 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3167 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3168 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3169 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3171 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3173 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3174 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3175 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3176 device_printf(dev, "%s: failed0\n", __func__);
3179 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3181 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3184 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3187 ha->hw.flags.init_rx_cnxt = 0;
3192 * Name: qla_init_xmt_cntxt
3193 * Function: Creates the Transmit Context.
3196 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3199 qla_hw_t *hw = &ha->hw;
3200 q80_rq_tx_cntxt_t *tcntxt;
3201 q80_rsp_tx_cntxt_t *tcntxt_rsp;
3203 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3206 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3211 * Create Transmit Context
3213 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3214 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3216 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3217 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3218 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3222 #ifdef QL_ENABLE_ISCSI_TLV
3224 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3225 Q8_TX_CNTXT_CAP0_TC;
3227 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3228 tcntxt->traffic_class = 1;
3231 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3234 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3236 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3238 tcntxt->ntx_rings = 1;
3240 tcntxt->tx_ring[0].paddr =
3241 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3242 tcntxt->tx_ring[0].tx_consumer =
3243 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3244 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3246 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3247 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3249 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3250 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3251 *hw_tx_cntxt->tx_cons = 0;
3253 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3254 (sizeof (q80_rq_tx_cntxt_t) >> 2),
3256 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3257 device_printf(dev, "%s: failed0\n", __func__);
3260 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3262 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3265 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3269 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3270 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3272 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3280 * Name: qla_del_xmt_cntxt
3281 * Function: Destroys the Transmit Context.
3284 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3286 device_t dev = ha->pci_dev;
3287 q80_tx_cntxt_destroy_t *tcntxt;
3288 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
3291 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3292 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3294 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3295 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3296 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3298 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3300 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3301 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
3302 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3303 device_printf(dev, "%s: failed0\n", __func__);
3306 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3308 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3311 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3318 qla_del_xmt_cntxt(qla_host_t *ha)
3322 if (!ha->hw.flags.init_tx_cnxt)
3325 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3326 if (qla_del_xmt_cntxt_i(ha, i))
3329 ha->hw.flags.init_tx_cnxt = 0;
3333 qla_init_xmt_cntxt(qla_host_t *ha)
3337 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3338 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3339 for (j = 0; j < i; j++)
3340 qla_del_xmt_cntxt_i(ha, j);
3344 ha->hw.flags.init_tx_cnxt = 1;
3349 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3355 nmcast = ha->hw.nmcast;
3357 QL_DPRINT2(ha, (ha->pci_dev,
3358 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3360 mcast = ha->hw.mac_addr_arr;
3361 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3363 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3364 if ((ha->hw.mcast[i].addr[0] != 0) ||
3365 (ha->hw.mcast[i].addr[1] != 0) ||
3366 (ha->hw.mcast[i].addr[2] != 0) ||
3367 (ha->hw.mcast[i].addr[3] != 0) ||
3368 (ha->hw.mcast[i].addr[4] != 0) ||
3369 (ha->hw.mcast[i].addr[5] != 0)) {
3371 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3372 mcast = mcast + ETHER_ADDR_LEN;
3375 if (count == Q8_MAX_MAC_ADDRS) {
3376 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3377 add_mcast, count)) {
3378 device_printf(ha->pci_dev,
3379 "%s: failed\n", __func__);
3384 mcast = ha->hw.mac_addr_arr;
3386 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3394 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3396 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3400 QL_DPRINT2(ha, (ha->pci_dev,
3401 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3407 qla_hw_add_all_mcast(qla_host_t *ha)
3411 ret = qla_hw_all_mcast(ha, 1);
3417 qla_hw_del_all_mcast(qla_host_t *ha)
3421 ret = qla_hw_all_mcast(ha, 0);
3423 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3430 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3434 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3435 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3436 return (0); /* its been already added */
3442 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3446 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3448 if ((ha->hw.mcast[i].addr[0] == 0) &&
3449 (ha->hw.mcast[i].addr[1] == 0) &&
3450 (ha->hw.mcast[i].addr[2] == 0) &&
3451 (ha->hw.mcast[i].addr[3] == 0) &&
3452 (ha->hw.mcast[i].addr[4] == 0) &&
3453 (ha->hw.mcast[i].addr[5] == 0)) {
3455 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3458 mta = mta + ETHER_ADDR_LEN;
3470 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3474 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3475 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3477 ha->hw.mcast[i].addr[0] = 0;
3478 ha->hw.mcast[i].addr[1] = 0;
3479 ha->hw.mcast[i].addr[2] = 0;
3480 ha->hw.mcast[i].addr[3] = 0;
3481 ha->hw.mcast[i].addr[4] = 0;
3482 ha->hw.mcast[i].addr[5] = 0;
3486 mta = mta + ETHER_ADDR_LEN;
3497 * Name: ql_hw_set_multi
3498 * Function: Sets the Multicast Addresses provided by the host O.S into the
3499 * hardware (for the given interface)
3502 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3505 uint8_t *mta = mcast_addr;
3511 mcast = ha->hw.mac_addr_arr;
3512 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3514 for (i = 0; i < mcnt; i++) {
3515 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3517 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3518 bcopy(mta, mcast, ETHER_ADDR_LEN);
3519 mcast = mcast + ETHER_ADDR_LEN;
3523 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3524 bcopy(mta, mcast, ETHER_ADDR_LEN);
3525 mcast = mcast + ETHER_ADDR_LEN;
3530 if (count == Q8_MAX_MAC_ADDRS) {
3531 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3533 device_printf(ha->pci_dev, "%s: failed\n",
3539 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3542 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3547 mcast = ha->hw.mac_addr_arr;
3548 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3551 mta += Q8_MAC_ADDR_LEN;
3555 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3557 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3561 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3563 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3571 * Name: ql_hw_tx_done_locked
3572 * Function: Handle Transmit Completions
3575 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3578 qla_hw_t *hw = &ha->hw;
3579 uint32_t comp_idx, comp_count = 0;
3580 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3582 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3584 /* retrieve index of last entry in tx ring completed */
3585 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3587 while (comp_idx != hw_tx_cntxt->txr_comp) {
3589 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3591 hw_tx_cntxt->txr_comp++;
3592 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3593 hw_tx_cntxt->txr_comp = 0;
3598 ha->ifp->if_opackets++;
3600 bus_dmamap_sync(ha->tx_tag, txb->map,
3601 BUS_DMASYNC_POSTWRITE);
3602 bus_dmamap_unload(ha->tx_tag, txb->map);
3603 m_freem(txb->m_head);
3609 hw_tx_cntxt->txr_free += comp_count;
3614 ql_update_link_state(qla_host_t *ha)
3616 uint32_t link_state;
3617 uint32_t prev_link_state;
3619 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3623 link_state = READ_REG32(ha, Q8_LINK_STATE);
3625 prev_link_state = ha->hw.link_up;
3627 if (ha->pci_func == 0)
3628 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3630 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3632 if (prev_link_state != ha->hw.link_up) {
3633 if (ha->hw.link_up) {
3634 if_link_state_change(ha->ifp, LINK_STATE_UP);
3636 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3643 ql_hw_check_health(qla_host_t *ha)
3647 ha->hw.health_count++;
3649 if (ha->hw.health_count < 500)
3652 ha->hw.health_count = 0;
3654 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3656 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3657 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3658 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3663 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3665 if ((val != ha->hw.hbeat_value) &&
3666 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3667 ha->hw.hbeat_value = val;
3668 ha->hw.hbeat_failure = 0;
3672 ha->hw.hbeat_failure++;
3675 if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3676 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3678 if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3681 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3688 qla_init_nic_func(qla_host_t *ha)
3691 q80_init_nic_func_t *init_nic;
3692 q80_init_nic_func_rsp_t *init_nic_rsp;
3697 init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3698 bzero(init_nic, sizeof(q80_init_nic_func_t));
3700 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3701 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3702 init_nic->count_version |= Q8_MBX_CMD_VERSION;
3704 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3705 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3706 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3708 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3709 if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3710 (sizeof (q80_init_nic_func_t) >> 2),
3711 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3712 device_printf(dev, "%s: failed\n", __func__);
3716 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3717 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3719 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3722 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3729 qla_stop_nic_func(qla_host_t *ha)
3732 q80_stop_nic_func_t *stop_nic;
3733 q80_stop_nic_func_rsp_t *stop_nic_rsp;
3738 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3739 bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3741 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3742 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3743 stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3745 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3746 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3748 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3749 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3750 (sizeof (q80_stop_nic_func_t) >> 2),
3751 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3752 device_printf(dev, "%s: failed\n", __func__);
3756 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3757 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3759 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3762 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3769 qla_query_fw_dcbx_caps(qla_host_t *ha)
3772 q80_query_fw_dcbx_caps_t *fw_dcbx;
3773 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp;
3778 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3779 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3781 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3782 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3783 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3785 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3786 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3787 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3788 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3789 device_printf(dev, "%s: failed\n", __func__);
3793 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3794 ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3795 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3797 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3800 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3807 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3808 uint32_t aen_mb3, uint32_t aen_mb4)
3811 q80_idc_ack_t *idc_ack;
3812 q80_idc_ack_rsp_t *idc_ack_rsp;
3818 idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3819 bzero(idc_ack, sizeof(q80_idc_ack_t));
3821 idc_ack->opcode = Q8_MBX_IDC_ACK;
3822 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3823 idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3825 idc_ack->aen_mb1 = aen_mb1;
3826 idc_ack->aen_mb2 = aen_mb2;
3827 idc_ack->aen_mb3 = aen_mb3;
3828 idc_ack->aen_mb4 = aen_mb4;
3830 ha->hw.imd_compl= 0;
3832 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3833 (sizeof (q80_idc_ack_t) >> 2),
3834 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3835 device_printf(dev, "%s: failed\n", __func__);
3839 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3841 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3844 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3848 while (count && !ha->hw.imd_compl) {
3849 qla_mdelay(__func__, 100);
3856 device_printf(dev, "%s: count %d\n", __func__, count);
3862 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3865 q80_set_port_cfg_t *pcfg;
3866 q80_set_port_cfg_rsp_t *pfg_rsp;
3872 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3873 bzero(pcfg, sizeof(q80_set_port_cfg_t));
3875 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3876 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3877 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3879 pcfg->cfg_bits = cfg_bits;
3881 device_printf(dev, "%s: cfg_bits"
3882 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3883 " [0x%x, 0x%x, 0x%x]\n", __func__,
3884 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3885 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3886 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3888 ha->hw.imd_compl= 0;
3890 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3891 (sizeof (q80_set_port_cfg_t) >> 2),
3892 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3893 device_printf(dev, "%s: failed\n", __func__);
3897 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3899 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3901 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3902 while (count && !ha->hw.imd_compl) {
3903 qla_mdelay(__func__, 100);
3907 device_printf(dev, "%s: count %d\n", __func__, count);
3914 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3923 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3926 device_t dev = ha->pci_dev;
3927 q80_config_md_templ_size_t *md_size;
3928 q80_config_md_templ_size_rsp_t *md_size_rsp;
3930 #ifndef QL_LDFLASH_FW
3932 ql_minidump_template_hdr_t *hdr;
3934 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3935 *size = hdr->size_of_template;
3938 #endif /* #ifdef QL_LDFLASH_FW */
3940 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3941 bzero(md_size, sizeof(q80_config_md_templ_size_t));
3943 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3944 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3945 md_size->count_version |= Q8_MBX_CMD_VERSION;
3947 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3948 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3949 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3951 device_printf(dev, "%s: failed\n", __func__);
3956 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3958 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3961 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3965 *size = md_size_rsp->templ_size;
3971 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3974 q80_get_port_cfg_t *pcfg;
3975 q80_get_port_cfg_rsp_t *pcfg_rsp;
3980 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3981 bzero(pcfg, sizeof(q80_get_port_cfg_t));
3983 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3984 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3985 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3987 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3988 (sizeof (q80_get_port_cfg_t) >> 2),
3989 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3990 device_printf(dev, "%s: failed\n", __func__);
3994 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3996 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3999 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4003 device_printf(dev, "%s: [cfg_bits, port type]"
4004 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4005 " [0x%x, 0x%x, 0x%x]\n", __func__,
4006 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4007 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4008 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4009 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4012 *cfg_bits = pcfg_rsp->cfg_bits;
4018 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4020 struct ether_vlan_header *eh;
4022 struct ip *ip = NULL;
4023 struct ip6_hdr *ip6 = NULL;
4024 struct tcphdr *th = NULL;
4027 uint8_t buf[sizeof(struct ip6_hdr)];
4029 eh = mtod(mp, struct ether_vlan_header *);
4031 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4032 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4033 etype = ntohs(eh->evl_proto);
4035 hdrlen = ETHER_HDR_LEN;
4036 etype = ntohs(eh->evl_encap_proto);
4039 if (etype == ETHERTYPE_IP) {
4041 offset = (hdrlen + sizeof (struct ip));
4043 if (mp->m_len >= offset) {
4044 ip = (struct ip *)(mp->m_data + hdrlen);
4046 m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4047 ip = (struct ip *)buf;
4050 if (ip->ip_p == IPPROTO_TCP) {
4052 hdrlen += ip->ip_hl << 2;
4053 offset = hdrlen + 4;
4055 if (mp->m_len >= offset) {
4056 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4058 m_copydata(mp, hdrlen, 4, buf);
4059 th = (struct tcphdr *)buf;
4063 } else if (etype == ETHERTYPE_IPV6) {
4065 offset = (hdrlen + sizeof (struct ip6_hdr));
4067 if (mp->m_len >= offset) {
4068 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4070 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4071 ip6 = (struct ip6_hdr *)buf;
4074 if (ip6->ip6_nxt == IPPROTO_TCP) {
4076 hdrlen += sizeof(struct ip6_hdr);
4077 offset = hdrlen + 4;
4079 if (mp->m_len >= offset) {
4080 th = (struct tcphdr *)(mp->m_data + hdrlen);;
4082 m_copydata(mp, hdrlen, 4, buf);
4083 th = (struct tcphdr *)buf;
4089 if ((th->th_sport == htons(3260)) ||
4090 (th->th_dport == htons(3260)))
4097 qla_hw_async_event(qla_host_t *ha)
4099 switch (ha->hw.aen_mb0) {
4101 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4102 ha->hw.aen_mb3, ha->hw.aen_mb4);
4113 #ifdef QL_LDFLASH_FW
4115 ql_get_minidump_template(qla_host_t *ha)
4118 device_t dev = ha->pci_dev;
4119 q80_config_md_templ_cmd_t *md_templ;
4120 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
4122 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4123 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4125 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4126 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4127 md_templ->count_version |= Q8_MBX_CMD_VERSION;
4129 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4130 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4132 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4133 (sizeof(q80_config_md_templ_cmd_t) >> 2),
4135 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4137 device_printf(dev, "%s: failed\n", __func__);
4142 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4144 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4147 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4154 #endif /* #ifdef QL_LDFLASH_FW */
4157 * Minidump related functionality
4160 static int ql_parse_template(qla_host_t *ha);
4162 static uint32_t ql_rdcrb(qla_host_t *ha,
4163 ql_minidump_entry_rdcrb_t *crb_entry,
4164 uint32_t * data_buff);
4166 static uint32_t ql_pollrd(qla_host_t *ha,
4167 ql_minidump_entry_pollrd_t *entry,
4168 uint32_t * data_buff);
4170 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4171 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4172 uint32_t *data_buff);
4174 static uint32_t ql_L2Cache(qla_host_t *ha,
4175 ql_minidump_entry_cache_t *cacheEntry,
4176 uint32_t * data_buff);
4178 static uint32_t ql_L1Cache(qla_host_t *ha,
4179 ql_minidump_entry_cache_t *cacheEntry,
4180 uint32_t *data_buff);
4182 static uint32_t ql_rdocm(qla_host_t *ha,
4183 ql_minidump_entry_rdocm_t *ocmEntry,
4184 uint32_t *data_buff);
4186 static uint32_t ql_rdmem(qla_host_t *ha,
4187 ql_minidump_entry_rdmem_t *mem_entry,
4188 uint32_t *data_buff);
4190 static uint32_t ql_rdrom(qla_host_t *ha,
4191 ql_minidump_entry_rdrom_t *romEntry,
4192 uint32_t *data_buff);
4194 static uint32_t ql_rdmux(qla_host_t *ha,
4195 ql_minidump_entry_mux_t *muxEntry,
4196 uint32_t *data_buff);
4198 static uint32_t ql_rdmux2(qla_host_t *ha,
4199 ql_minidump_entry_mux2_t *muxEntry,
4200 uint32_t *data_buff);
4202 static uint32_t ql_rdqueue(qla_host_t *ha,
4203 ql_minidump_entry_queue_t *queueEntry,
4204 uint32_t *data_buff);
4206 static uint32_t ql_cntrl(qla_host_t *ha,
4207 ql_minidump_template_hdr_t *template_hdr,
4208 ql_minidump_entry_cntrl_t *crbEntry);
4212 ql_minidump_size(qla_host_t *ha)
4216 ql_minidump_template_hdr_t *hdr;
4218 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4222 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4223 if (i & ha->hw.mdump_capture_mask)
4224 size += hdr->capture_size_array[k];
4231 ql_free_minidump_buffer(qla_host_t *ha)
4233 if (ha->hw.mdump_buffer != NULL) {
4234 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4235 ha->hw.mdump_buffer = NULL;
4236 ha->hw.mdump_buffer_size = 0;
4242 ql_alloc_minidump_buffer(qla_host_t *ha)
4244 ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4246 if (!ha->hw.mdump_buffer_size)
4249 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4252 if (ha->hw.mdump_buffer == NULL)
4259 ql_free_minidump_template_buffer(qla_host_t *ha)
4261 if (ha->hw.mdump_template != NULL) {
4262 free(ha->hw.mdump_template, M_QLA83XXBUF);
4263 ha->hw.mdump_template = NULL;
4264 ha->hw.mdump_template_size = 0;
4270 ql_alloc_minidump_template_buffer(qla_host_t *ha)
4272 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4274 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4275 M_QLA83XXBUF, M_NOWAIT);
4277 if (ha->hw.mdump_template == NULL)
4284 ql_alloc_minidump_buffers(qla_host_t *ha)
4288 ret = ql_alloc_minidump_template_buffer(ha);
4293 ret = ql_alloc_minidump_buffer(ha);
4296 ql_free_minidump_template_buffer(ha);
4303 ql_validate_minidump_checksum(qla_host_t *ha)
4307 uint32_t *template_buff;
4309 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4310 template_buff = ha->hw.dma_buf.minidump.dma_b;
4312 while (count-- > 0) {
4313 sum += *template_buff++;
4317 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4324 ql_minidump_init(qla_host_t *ha)
4327 uint32_t template_size = 0;
4328 device_t dev = ha->pci_dev;
4331 * Get Minidump Template Size
4333 ret = qla_get_minidump_tmplt_size(ha, &template_size);
4335 if (ret || (template_size == 0)) {
4336 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4342 * Allocate Memory for Minidump Template
4345 ha->hw.dma_buf.minidump.alignment = 8;
4346 ha->hw.dma_buf.minidump.size = template_size;
4348 #ifdef QL_LDFLASH_FW
4349 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4351 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4355 ha->hw.dma_buf.flags.minidump = 1;
4358 * Retrieve Minidump Template
4360 ret = ql_get_minidump_template(ha);
4362 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4364 #endif /* #ifdef QL_LDFLASH_FW */
4368 ret = ql_validate_minidump_checksum(ha);
4372 ret = ql_alloc_minidump_buffers(ha);
4375 ha->hw.mdump_init = 1;
4378 "%s: ql_alloc_minidump_buffers"
4379 " failed\n", __func__);
4381 device_printf(dev, "%s: ql_validate_minidump_checksum"
4382 " failed\n", __func__);
4385 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4390 ql_minidump_free(ha);
4396 ql_minidump_free(qla_host_t *ha)
4398 ha->hw.mdump_init = 0;
4399 if (ha->hw.dma_buf.flags.minidump) {
4400 ha->hw.dma_buf.flags.minidump = 0;
4401 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4404 ql_free_minidump_template_buffer(ha);
4405 ql_free_minidump_buffer(ha);
4411 ql_minidump(qla_host_t *ha)
4413 if (!ha->hw.mdump_init)
4416 if (ha->hw.mdump_done)
4419 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4421 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4422 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4424 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4425 ha->hw.mdump_template_size);
4427 ql_parse_template(ha);
4429 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4431 ha->hw.mdump_done = 1;
4441 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4443 if (esize != entry->hdr.entry_capture_size) {
4444 entry->hdr.entry_capture_size = esize;
4445 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4452 ql_parse_template(qla_host_t *ha)
4454 uint32_t num_of_entries, buff_level, e_cnt, esize;
4455 uint32_t end_cnt, rv = 0;
4456 char *dump_buff, *dbuff;
4457 int sane_start = 0, sane_end = 0;
4458 ql_minidump_template_hdr_t *template_hdr;
4459 ql_minidump_entry_t *entry;
4460 uint32_t capture_mask;
4463 /* Setup parameters */
4464 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4466 if (template_hdr->entry_type == TLHDR)
4469 dump_buff = (char *) ha->hw.mdump_buffer;
4471 num_of_entries = template_hdr->num_of_entries;
4473 entry = (ql_minidump_entry_t *) ((char *)template_hdr
4474 + template_hdr->first_entry_offset );
4476 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4477 template_hdr->ocm_window_array[ha->pci_func];
4478 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4480 capture_mask = ha->hw.mdump_capture_mask;
4481 dump_size = ha->hw.mdump_buffer_size;
4483 template_hdr->driver_capture_mask = capture_mask;
4485 QL_DPRINT80(ha, (ha->pci_dev,
4486 "%s: sane_start = %d num_of_entries = %d "
4487 "capture_mask = 0x%x dump_size = %d \n",
4488 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4490 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4493 * If the capture_mask of the entry does not match capture mask
4494 * skip the entry after marking the driver_flags indicator.
4497 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4499 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4500 entry = (ql_minidump_entry_t *) ((char *) entry
4501 + entry->hdr.entry_size);
4506 * This is ONLY needed in implementations where
4507 * the capture buffer allocated is too small to capture
4508 * all of the required entries for a given capture mask.
4509 * We need to empty the buffer contents to a file
4510 * if possible, before processing the next entry
4511 * If the buff_full_flag is set, no further capture will happen
4512 * and all remaining non-control entries will be skipped.
4514 if (entry->hdr.entry_capture_size != 0) {
4515 if ((buff_level + entry->hdr.entry_capture_size) >
4517 /* Try to recover by emptying buffer to file */
4518 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4519 entry = (ql_minidump_entry_t *) ((char *) entry
4520 + entry->hdr.entry_size);
4526 * Decode the entry type and process it accordingly
4529 switch (entry->hdr.entry_type) {
4534 if (sane_end == 0) {
4541 dbuff = dump_buff + buff_level;
4542 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4543 ql_entry_err_chk(entry, esize);
4544 buff_level += esize;
4548 dbuff = dump_buff + buff_level;
4549 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4550 ql_entry_err_chk(entry, esize);
4551 buff_level += esize;
4555 dbuff = dump_buff + buff_level;
4556 esize = ql_pollrd_modify_write(ha, (void *)entry,
4558 ql_entry_err_chk(entry, esize);
4559 buff_level += esize;
4566 dbuff = dump_buff + buff_level;
4567 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4569 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4571 ql_entry_err_chk(entry, esize);
4572 buff_level += esize;
4578 dbuff = dump_buff + buff_level;
4579 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4580 ql_entry_err_chk(entry, esize);
4581 buff_level += esize;
4585 dbuff = dump_buff + buff_level;
4586 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4587 ql_entry_err_chk(entry, esize);
4588 buff_level += esize;
4592 dbuff = dump_buff + buff_level;
4593 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4594 ql_entry_err_chk(entry, esize);
4595 buff_level += esize;
4600 dbuff = dump_buff + buff_level;
4601 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4602 ql_entry_err_chk(entry, esize);
4603 buff_level += esize;
4607 dbuff = dump_buff + buff_level;
4608 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4609 ql_entry_err_chk(entry, esize);
4610 buff_level += esize;
4614 dbuff = dump_buff + buff_level;
4615 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4616 ql_entry_err_chk(entry, esize);
4617 buff_level += esize;
4621 dbuff = dump_buff + buff_level;
4622 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4623 ql_entry_err_chk(entry, esize);
4624 buff_level += esize;
4628 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4629 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4633 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4636 /* next entry in the template */
4637 entry = (ql_minidump_entry_t *) ((char *) entry
4638 + entry->hdr.entry_size);
4641 if (!sane_start || (sane_end > 1)) {
4642 device_printf(ha->pci_dev,
4643 "\n%s: Template configuration error. Check Template\n",
4647 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4648 __func__, template_hdr->num_of_entries));
4654 * Read CRB operation.
4657 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4658 uint32_t * data_buff)
4662 uint32_t op_count, addr, stride, value = 0;
4664 addr = crb_entry->addr;
4665 op_count = crb_entry->op_count;
4666 stride = crb_entry->addr_stride;
4668 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4670 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4675 *data_buff++ = addr;
4676 *data_buff++ = value;
4677 addr = addr + stride;
4681 * for testing purpose we return amount of data written
4683 return (op_count * (2 * sizeof(uint32_t)));
4691 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4692 uint32_t * data_buff)
4698 uint32_t read_value;
4699 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4700 uint32_t tag_value, read_cnt;
4701 volatile uint8_t cntl_value_r;
4705 loop_cnt = cacheEntry->op_count;
4707 read_addr = cacheEntry->read_addr;
4708 cntrl_addr = cacheEntry->control_addr;
4709 cntl_value_w = (uint32_t) cacheEntry->write_value;
4711 tag_reg_addr = cacheEntry->tag_reg_addr;
4713 tag_value = cacheEntry->init_tag_value;
4714 read_cnt = cacheEntry->read_addr_cnt;
4716 for (i = 0; i < loop_cnt; i++) {
4718 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4722 if (cacheEntry->write_value != 0) {
4724 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4730 if (cacheEntry->poll_mask != 0) {
4732 timeout = cacheEntry->poll_wait;
4734 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4738 cntl_value_r = (uint8_t)data;
4740 while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4743 qla_mdelay(__func__, 1);
4748 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4753 cntl_value_r = (uint8_t)data;
4756 /* Report timeout error.
4757 * core dump capture failed
4758 * Skip remaining entries.
4759 * Write buffer out to file
4760 * Use driver specific fields in template header
4761 * to report this error.
4768 for (k = 0; k < read_cnt; k++) {
4770 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4774 *data_buff++ = read_value;
4775 addr += cacheEntry->read_addr_stride;
4778 tag_value += cacheEntry->tag_value_stride;
4781 return (read_cnt * loop_cnt * sizeof(uint32_t));
4789 ql_L1Cache(qla_host_t *ha,
4790 ql_minidump_entry_cache_t *cacheEntry,
4791 uint32_t *data_buff)
4797 uint32_t read_value;
4798 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4799 uint32_t tag_value, read_cnt;
4800 uint32_t cntl_value_w;
4802 loop_cnt = cacheEntry->op_count;
4804 read_addr = cacheEntry->read_addr;
4805 cntrl_addr = cacheEntry->control_addr;
4806 cntl_value_w = (uint32_t) cacheEntry->write_value;
4808 tag_reg_addr = cacheEntry->tag_reg_addr;
4810 tag_value = cacheEntry->init_tag_value;
4811 read_cnt = cacheEntry->read_addr_cnt;
4813 for (i = 0; i < loop_cnt; i++) {
4815 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4819 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4824 for (k = 0; k < read_cnt; k++) {
4826 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4830 *data_buff++ = read_value;
4831 addr += cacheEntry->read_addr_stride;
4834 tag_value += cacheEntry->tag_value_stride;
4837 return (read_cnt * loop_cnt * sizeof(uint32_t));
4841 * Reading OCM memory
4845 ql_rdocm(qla_host_t *ha,
4846 ql_minidump_entry_rdocm_t *ocmEntry,
4847 uint32_t *data_buff)
4850 volatile uint32_t addr;
4851 volatile uint32_t value;
4853 addr = ocmEntry->read_addr;
4854 loop_cnt = ocmEntry->op_count;
4856 for (i = 0; i < loop_cnt; i++) {
4857 value = READ_REG32(ha, addr);
4858 *data_buff++ = value;
4859 addr += ocmEntry->read_addr_stride;
4861 return (loop_cnt * sizeof(value));
4869 ql_rdmem(qla_host_t *ha,
4870 ql_minidump_entry_rdmem_t *mem_entry,
4871 uint32_t *data_buff)
4875 volatile uint32_t addr;
4876 q80_offchip_mem_val_t val;
4878 addr = mem_entry->read_addr;
4880 /* size in bytes / 16 */
4881 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4883 for (i = 0; i < loop_cnt; i++) {
4885 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4889 *data_buff++ = val.data_lo;
4890 *data_buff++ = val.data_hi;
4891 *data_buff++ = val.data_ulo;
4892 *data_buff++ = val.data_uhi;
4894 addr += (sizeof(uint32_t) * 4);
4897 return (loop_cnt * (sizeof(uint32_t) * 4));
4905 ql_rdrom(qla_host_t *ha,
4906 ql_minidump_entry_rdrom_t *romEntry,
4907 uint32_t *data_buff)
4914 addr = romEntry->read_addr;
4915 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4916 loop_cnt /= sizeof(value);
4918 for (i = 0; i < loop_cnt; i++) {
4920 ret = ql_rd_flash32(ha, addr, &value);
4924 *data_buff++ = value;
4925 addr += sizeof(value);
4928 return (loop_cnt * sizeof(value));
4936 ql_rdmux(qla_host_t *ha,
4937 ql_minidump_entry_mux_t *muxEntry,
4938 uint32_t *data_buff)
4942 uint32_t read_value, sel_value;
4943 uint32_t read_addr, select_addr;
4945 select_addr = muxEntry->select_addr;
4946 sel_value = muxEntry->select_value;
4947 read_addr = muxEntry->read_addr;
4949 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4951 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4955 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4959 *data_buff++ = sel_value;
4960 *data_buff++ = read_value;
4962 sel_value += muxEntry->select_value_stride;
4965 return (loop_cnt * (2 * sizeof(uint32_t)));
4969 ql_rdmux2(qla_host_t *ha,
4970 ql_minidump_entry_mux2_t *muxEntry,
4971 uint32_t *data_buff)
4976 uint32_t select_addr_1, select_addr_2;
4977 uint32_t select_value_1, select_value_2;
4978 uint32_t select_value_count, select_value_mask;
4979 uint32_t read_addr, read_value;
4981 select_addr_1 = muxEntry->select_addr_1;
4982 select_addr_2 = muxEntry->select_addr_2;
4983 select_value_1 = muxEntry->select_value_1;
4984 select_value_2 = muxEntry->select_value_2;
4985 select_value_count = muxEntry->select_value_count;
4986 select_value_mask = muxEntry->select_value_mask;
4988 read_addr = muxEntry->read_addr;
4990 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4993 uint32_t temp_sel_val;
4995 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4999 temp_sel_val = select_value_1 & select_value_mask;
5001 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5005 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5009 *data_buff++ = temp_sel_val;
5010 *data_buff++ = read_value;
5012 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5016 temp_sel_val = select_value_2 & select_value_mask;
5018 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5022 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5026 *data_buff++ = temp_sel_val;
5027 *data_buff++ = read_value;
5029 select_value_1 += muxEntry->select_value_stride;
5030 select_value_2 += muxEntry->select_value_stride;
5033 return (loop_cnt * (4 * sizeof(uint32_t)));
5037 * Handling Queue State Reads.
5041 ql_rdqueue(qla_host_t *ha,
5042 ql_minidump_entry_queue_t *queueEntry,
5043 uint32_t *data_buff)
5047 uint32_t read_value;
5048 uint32_t read_addr, read_stride, select_addr;
5049 uint32_t queue_id, read_cnt;
5051 read_cnt = queueEntry->read_addr_cnt;
5052 read_stride = queueEntry->read_addr_stride;
5053 select_addr = queueEntry->select_addr;
5055 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5058 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5062 read_addr = queueEntry->read_addr;
5064 for (k = 0; k < read_cnt; k++) {
5066 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5070 *data_buff++ = read_value;
5071 read_addr += read_stride;
5074 queue_id += queueEntry->queue_id_stride;
5077 return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5081 * Handling control entries.
5085 ql_cntrl(qla_host_t *ha,
5086 ql_minidump_template_hdr_t *template_hdr,
5087 ql_minidump_entry_cntrl_t *crbEntry)
5091 uint32_t opcode, read_value, addr, entry_addr;
5094 entry_addr = crbEntry->addr;
5096 for (count = 0; count < crbEntry->op_count; count++) {
5097 opcode = crbEntry->opcode;
5099 if (opcode & QL_DBG_OPCODE_WR) {
5101 ret = ql_rdwr_indreg32(ha, entry_addr,
5102 &crbEntry->value_1, 0);
5106 opcode &= ~QL_DBG_OPCODE_WR;
5109 if (opcode & QL_DBG_OPCODE_RW) {
5111 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5115 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5119 opcode &= ~QL_DBG_OPCODE_RW;
5122 if (opcode & QL_DBG_OPCODE_AND) {
5124 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5128 read_value &= crbEntry->value_2;
5129 opcode &= ~QL_DBG_OPCODE_AND;
5131 if (opcode & QL_DBG_OPCODE_OR) {
5132 read_value |= crbEntry->value_3;
5133 opcode &= ~QL_DBG_OPCODE_OR;
5136 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5141 if (opcode & QL_DBG_OPCODE_OR) {
5143 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5147 read_value |= crbEntry->value_3;
5149 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5153 opcode &= ~QL_DBG_OPCODE_OR;
5156 if (opcode & QL_DBG_OPCODE_POLL) {
5158 opcode &= ~QL_DBG_OPCODE_POLL;
5159 timeout = crbEntry->poll_timeout;
5162 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5166 while ((read_value & crbEntry->value_2)
5167 != crbEntry->value_1) {
5170 qla_mdelay(__func__, 1);
5175 ret = ql_rdwr_indreg32(ha, addr,
5183 * Report timeout error.
5184 * core dump capture failed
5185 * Skip remaining entries.
5186 * Write buffer out to file
5187 * Use driver specific fields in template header
5188 * to report this error.
5194 if (opcode & QL_DBG_OPCODE_RDSTATE) {
5196 * decide which address to use.
5198 if (crbEntry->state_index_a) {
5199 addr = template_hdr->saved_state_array[
5200 crbEntry-> state_index_a];
5205 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5209 template_hdr->saved_state_array[crbEntry->state_index_v]
5211 opcode &= ~QL_DBG_OPCODE_RDSTATE;
5214 if (opcode & QL_DBG_OPCODE_WRSTATE) {
5216 * decide which value to use.
5218 if (crbEntry->state_index_v) {
5219 read_value = template_hdr->saved_state_array[
5220 crbEntry->state_index_v];
5222 read_value = crbEntry->value_1;
5225 * decide which address to use.
5227 if (crbEntry->state_index_a) {
5228 addr = template_hdr->saved_state_array[
5229 crbEntry-> state_index_a];
5234 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5238 opcode &= ~QL_DBG_OPCODE_WRSTATE;
5241 if (opcode & QL_DBG_OPCODE_MDSTATE) {
5242 /* Read value from saved state using index */
5243 read_value = template_hdr->saved_state_array[
5244 crbEntry->state_index_v];
5246 read_value <<= crbEntry->shl; /*Shift left operation */
5247 read_value >>= crbEntry->shr; /*Shift right operation */
5249 if (crbEntry->value_2) {
5250 /* check if AND mask is provided */
5251 read_value &= crbEntry->value_2;
5254 read_value |= crbEntry->value_3; /* OR operation */
5255 read_value += crbEntry->value_1; /* increment op */
5257 /* Write value back to state area. */
5259 template_hdr->saved_state_array[crbEntry->state_index_v]
5261 opcode &= ~QL_DBG_OPCODE_MDSTATE;
5264 entry_addr += crbEntry->addr_stride;
5271 * Handling rd poll entry.
5275 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5276 uint32_t *data_buff)
5280 uint32_t op_count, select_addr, select_value_stride, select_value;
5281 uint32_t read_addr, poll, mask, data_size, data;
5282 uint32_t wait_count = 0;
5284 select_addr = entry->select_addr;
5285 read_addr = entry->read_addr;
5286 select_value = entry->select_value;
5287 select_value_stride = entry->select_value_stride;
5288 op_count = entry->op_count;
5291 data_size = entry->data_size;
5293 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5295 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5301 while (wait_count < poll) {
5305 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5309 if ( (temp & mask) != 0 ) {
5315 if (wait_count == poll) {
5316 device_printf(ha->pci_dev,
5317 "%s: Error in processing entry\n", __func__);
5318 device_printf(ha->pci_dev,
5319 "%s: wait_count <0x%x> poll <0x%x>\n",
5320 __func__, wait_count, poll);
5324 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5328 *data_buff++ = select_value;
5329 *data_buff++ = data;
5330 select_value = select_value + select_value_stride;
5334 * for testing purpose we return amount of data written
5336 return (loop_cnt * (2 * sizeof(uint32_t)));
5341 * Handling rd modify write poll entry.
5345 ql_pollrd_modify_write(qla_host_t *ha,
5346 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5347 uint32_t *data_buff)
5350 uint32_t addr_1, addr_2, value_1, value_2, data;
5351 uint32_t poll, mask, data_size, modify_mask;
5352 uint32_t wait_count = 0;
5354 addr_1 = entry->addr_1;
5355 addr_2 = entry->addr_2;
5356 value_1 = entry->value_1;
5357 value_2 = entry->value_2;
5361 modify_mask = entry->modify_mask;
5362 data_size = entry->data_size;
5365 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5370 while (wait_count < poll) {
5374 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5378 if ( (temp & mask) != 0 ) {
5384 if (wait_count == poll) {
5385 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5389 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5393 data = (data & modify_mask);
5395 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5399 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5405 while (wait_count < poll) {
5409 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5413 if ( (temp & mask) != 0 ) {
5418 *data_buff++ = addr_2;
5419 *data_buff++ = data;
5423 * for testing purpose we return amount of data written
5425 return (2 * sizeof(uint32_t));