2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
44 #include "ql_minidump.h"
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58 uint32_t num_intrs, uint32_t create);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61 int tenable, int rcv);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static void qla_get_quick_stats(qla_host_t *ha);
77 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
78 static int qla_get_cam_search_mode(qla_host_t *ha);
80 static void ql_minidump_free(qla_host_t *ha);
84 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
90 err = sysctl_handle_int(oidp, &ret, 0, req);
92 if (err || !req->newptr)
97 ha = (qla_host_t *)arg1;
99 for (i = 0; i < ha->hw.num_sds_rings; i++) {
101 device_printf(ha->pci_dev,
102 "%s: sds_ring[%d] = %p\n", __func__,i,
103 (void *)ha->hw.sds[i].intr_count);
105 device_printf(ha->pci_dev,
106 "%s: sds_ring[%d].spurious_intr_count = %p\n",
108 i, (void *)ha->hw.sds[i].spurious_intr_count);
110 device_printf(ha->pci_dev,
111 "%s: sds_ring[%d].rx_free = %d\n", __func__,i,
112 ha->hw.sds[i].rx_free);
115 for (i = 0; i < ha->hw.num_tx_rings; i++)
116 device_printf(ha->pci_dev,
117 "%s: tx[%d] = %p\n", __func__,i,
118 (void *)ha->tx_ring[i].count);
120 for (i = 0; i < ha->hw.num_rds_rings; i++)
121 device_printf(ha->pci_dev,
122 "%s: rds_ring[%d] = %p\n", __func__,i,
123 (void *)ha->hw.rds[i].count);
125 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
126 (void *)ha->lro_pkt_count);
128 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
129 (void *)ha->lro_bytes);
131 #ifdef QL_ENABLE_ISCSI_TLV
132 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
133 (void *)ha->hw.iscsi_pkt_count);
134 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
141 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
146 err = sysctl_handle_int(oidp, &ret, 0, req);
148 if (err || !req->newptr)
152 ha = (qla_host_t *)arg1;
153 qla_get_quick_stats(ha);
161 qla_stop_pegs(qla_host_t *ha)
165 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
166 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
167 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
168 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
169 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
170 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
174 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
179 err = sysctl_handle_int(oidp, &ret, 0, req);
182 if (err || !req->newptr)
186 ha = (qla_host_t *)arg1;
187 (void)QLA_LOCK(ha, __func__, 0);
189 QLA_UNLOCK(ha, __func__);
194 #endif /* #ifdef QL_DBG */
197 qla_validate_set_port_cfg_bit(uint32_t bits)
199 if ((bits & 0xF) > 1)
202 if (((bits >> 4) & 0xF) > 2)
205 if (((bits >> 8) & 0xF) > 2)
212 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
218 err = sysctl_handle_int(oidp, &ret, 0, req);
220 if (err || !req->newptr)
223 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
225 ha = (qla_host_t *)arg1;
227 err = qla_get_port_config(ha, &cfg_bits);
230 goto qla_sysctl_set_port_cfg_exit;
233 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
235 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
239 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
241 if ((ret & 0xF) == 0) {
242 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
243 } else if ((ret & 0xF) == 1){
244 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
246 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
250 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
253 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
254 } else if (ret == 1){
255 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
257 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
260 err = qla_set_port_config(ha, cfg_bits);
262 ha = (qla_host_t *)arg1;
264 err = qla_get_port_config(ha, &cfg_bits);
267 qla_sysctl_set_port_cfg_exit:
272 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
277 err = sysctl_handle_int(oidp, &ret, 0, req);
279 if (err || !req->newptr)
282 ha = (qla_host_t *)arg1;
284 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
285 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
286 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
288 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
295 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
300 err = sysctl_handle_int(oidp, &ret, 0, req);
302 if (err || !req->newptr)
305 ha = (qla_host_t *)arg1;
306 err = qla_get_cam_search_mode(ha);
313 * Name: ql_hw_add_sysctls
314 * Function: Add P3Plus specific sysctls
317 ql_hw_add_sysctls(qla_host_t *ha)
323 ha->hw.num_sds_rings = MAX_SDS_RINGS;
324 ha->hw.num_rds_rings = MAX_RDS_RINGS;
325 ha->hw.num_tx_rings = NUM_TX_RINGS;
327 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
330 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
332 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
333 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
334 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
335 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
337 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
338 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
339 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
340 ha->hw.num_tx_rings, "Number of Transmit Rings");
342 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
343 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
344 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
345 ha->txr_idx, "Tx Ring Used");
347 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
348 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
349 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
351 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
353 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
354 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
355 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
357 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
359 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
360 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
362 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
364 ha->hw.sds_cidx_thres = 32;
365 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
366 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
368 ha->hw.sds_cidx_thres,
369 "Number of SDS entries to process before updating"
370 " SDS Ring Consumer Index");
372 ha->hw.rds_pidx_thres = 32;
373 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
376 ha->hw.rds_pidx_thres,
377 "Number of Rcv Rings Entries to post before updating"
378 " RDS Ring Producer Index");
380 ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
381 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
382 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
383 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
384 &ha->hw.rcv_intr_coalesce,
385 ha->hw.rcv_intr_coalesce,
386 "Rcv Intr Coalescing Parameters\n"
387 "\tbits 15:0 max packets\n"
388 "\tbits 31:16 max micro-seconds to wait\n"
390 "\tifconfig <if> down && ifconfig <if> up\n"
391 "\tto take effect \n");
393 ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
394 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
395 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
397 &ha->hw.xmt_intr_coalesce,
398 ha->hw.xmt_intr_coalesce,
399 "Xmt Intr Coalescing Parameters\n"
400 "\tbits 15:0 max packets\n"
401 "\tbits 31:16 max micro-seconds to wait\n"
403 "\tifconfig <if> down && ifconfig <if> up\n"
404 "\tto take effect \n");
406 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
410 qla_sysctl_port_cfg, "I",
411 "Set Port Configuration if values below "
412 "otherwise Get Port Configuration\n"
413 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
414 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
415 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
416 " 1 = xmt only; 2 = rcv only;\n"
419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
423 qla_sysctl_set_cam_search_mode, "I",
424 "Set CAM Search Mode"
425 "\t 1 = search mode internal\n"
426 "\t 2 = search mode auto\n");
428 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
432 qla_sysctl_get_cam_search_mode, "I",
433 "Get CAM Search Mode"
434 "\t 1 = search mode internal\n"
435 "\t 2 = search mode auto\n");
437 ha->hw.enable_9kb = 1;
439 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
440 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
442 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
444 ha->hw.mdump_active = 0;
445 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
449 "Minidump retrieval is Active");
451 ha->hw.mdump_done = 0;
452 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
453 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454 OID_AUTO, "mdump_done", CTLFLAG_RW,
455 &ha->hw.mdump_done, ha->hw.mdump_done,
456 "Minidump has been done and available for retrieval");
458 ha->hw.mdump_capture_mask = 0xF;
459 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
462 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
463 "Minidump capture mask");
467 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
468 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469 OID_AUTO, "err_inject",
470 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
471 "Error to be injected\n"
472 "\t\t\t 0: No Errors\n"
473 "\t\t\t 1: rcv: rxb struct invalid\n"
474 "\t\t\t 2: rcv: mp == NULL\n"
475 "\t\t\t 3: lro: rxb struct invalid\n"
476 "\t\t\t 4: lro: mp == NULL\n"
477 "\t\t\t 5: rcv: num handles invalid\n"
478 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
479 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
480 "\t\t\t 8: mbx: mailbox command failure\n"
481 "\t\t\t 9: heartbeat failure\n"
482 "\t\t\t A: temperature failure\n"
483 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
485 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
489 qla_sysctl_stop_pegs, "I", "Peg Stop");
491 #endif /* #ifdef QL_DBG */
493 ha->hw.user_pri_nic = 0;
494 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
495 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
498 "VLAN Tag User Priority for Normal Ethernet Packets");
500 ha->hw.user_pri_iscsi = 4;
501 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
502 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
504 ha->hw.user_pri_iscsi,
505 "VLAN Tag User Priority for iSCSI Packets");
510 ql_hw_link_status(qla_host_t *ha)
512 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
514 if (ha->hw.link_up) {
515 device_printf(ha->pci_dev, "link Up\n");
517 device_printf(ha->pci_dev, "link Down\n");
520 if (ha->hw.flags.fduplex) {
521 device_printf(ha->pci_dev, "Full Duplex\n");
523 device_printf(ha->pci_dev, "Half Duplex\n");
526 if (ha->hw.flags.autoneg) {
527 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
529 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
532 switch (ha->hw.link_speed) {
534 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
538 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
542 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
546 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
550 switch (ha->hw.module_type) {
553 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
557 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
561 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
565 device_printf(ha->pci_dev,
566 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
567 ha->hw.cable_length);
571 device_printf(ha->pci_dev, "Module Type 10GE Active"
572 " Limiting Copper(Compliant)[%d m]\n",
573 ha->hw.cable_length);
577 device_printf(ha->pci_dev,
578 "Module Type 10GE Passive Copper"
579 " (Legacy, Best Effort)[%d m]\n",
580 ha->hw.cable_length);
584 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
588 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
592 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
596 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
600 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
601 "(Legacy, Best Effort)\n");
605 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
610 if (ha->hw.link_faults == 1)
611 device_printf(ha->pci_dev, "SFP Power Fault\n");
616 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
619 ql_free_dma(qla_host_t *ha)
623 if (ha->hw.dma_buf.flags.sds_ring) {
624 for (i = 0; i < ha->hw.num_sds_rings; i++) {
625 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
627 ha->hw.dma_buf.flags.sds_ring = 0;
630 if (ha->hw.dma_buf.flags.rds_ring) {
631 for (i = 0; i < ha->hw.num_rds_rings; i++) {
632 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
634 ha->hw.dma_buf.flags.rds_ring = 0;
637 if (ha->hw.dma_buf.flags.tx_ring) {
638 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
639 ha->hw.dma_buf.flags.tx_ring = 0;
641 ql_minidump_free(ha);
646 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
649 ql_alloc_dma(qla_host_t *ha)
652 uint32_t i, j, size, tx_ring_size;
654 qla_hw_tx_cntxt_t *tx_cntxt;
660 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
664 * Allocate Transmit Ring
666 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
667 size = (tx_ring_size * ha->hw.num_tx_rings);
669 hw->dma_buf.tx_ring.alignment = 8;
670 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
672 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
673 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
674 goto ql_alloc_dma_exit;
677 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
678 paddr = hw->dma_buf.tx_ring.dma_addr;
680 for (i = 0; i < ha->hw.num_tx_rings; i++) {
681 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
683 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
684 tx_cntxt->tx_ring_paddr = paddr;
686 vaddr += tx_ring_size;
687 paddr += tx_ring_size;
690 for (i = 0; i < ha->hw.num_tx_rings; i++) {
691 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
693 tx_cntxt->tx_cons = (uint32_t *)vaddr;
694 tx_cntxt->tx_cons_paddr = paddr;
696 vaddr += sizeof (uint32_t);
697 paddr += sizeof (uint32_t);
700 ha->hw.dma_buf.flags.tx_ring = 1;
702 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
703 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
704 hw->dma_buf.tx_ring.dma_b));
706 * Allocate Receive Descriptor Rings
709 for (i = 0; i < hw->num_rds_rings; i++) {
711 hw->dma_buf.rds_ring[i].alignment = 8;
712 hw->dma_buf.rds_ring[i].size =
713 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
715 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
716 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
719 for (j = 0; j < i; j++)
720 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
722 goto ql_alloc_dma_exit;
724 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
725 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
726 hw->dma_buf.rds_ring[i].dma_b));
729 hw->dma_buf.flags.rds_ring = 1;
732 * Allocate Status Descriptor Rings
735 for (i = 0; i < hw->num_sds_rings; i++) {
736 hw->dma_buf.sds_ring[i].alignment = 8;
737 hw->dma_buf.sds_ring[i].size =
738 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
740 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
741 device_printf(dev, "%s: sds ring alloc failed\n",
744 for (j = 0; j < i; j++)
745 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
747 goto ql_alloc_dma_exit;
749 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
751 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
752 hw->dma_buf.sds_ring[i].dma_b));
754 for (i = 0; i < hw->num_sds_rings; i++) {
755 hw->sds[i].sds_ring_base =
756 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
759 hw->dma_buf.flags.sds_ring = 1;
768 #define Q8_MBX_MSEC_DELAY 5000
771 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
772 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
778 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
780 ha->qla_initiate_recovery = 1;
781 goto exit_qla_mbx_cmd;
787 i = Q8_MBX_MSEC_DELAY;
790 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
796 qla_mdelay(__func__, 1);
802 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
805 ha->qla_initiate_recovery = 1;
806 goto exit_qla_mbx_cmd;
809 for (i = 0; i < n_hmbox; i++) {
810 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
814 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
817 i = Q8_MBX_MSEC_DELAY;
819 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
821 if ((data & 0x3) == 1) {
822 data = READ_REG32(ha, Q8_FW_MBOX0);
823 if ((data & 0xF000) != 0x8000)
829 qla_mdelay(__func__, 1);
834 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
837 ha->qla_initiate_recovery = 1;
838 goto exit_qla_mbx_cmd;
841 for (i = 0; i < n_fwmbox; i++) {
842 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
845 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
846 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
853 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
857 device_t dev = ha->pci_dev;
859 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
863 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
865 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
866 device_printf(dev, "%s: failed0\n", __func__);
871 if (supports_9kb != NULL) {
872 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
878 if (num_rcvq != NULL)
879 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF);
881 if ((err != 1) && (err != 0)) {
882 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
889 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
893 device_t dev = ha->pci_dev;
894 q80_config_intr_t *c_intr;
895 q80_config_intr_rsp_t *c_intr_rsp;
897 c_intr = (q80_config_intr_t *)ha->hw.mbox;
898 bzero(c_intr, (sizeof (q80_config_intr_t)));
900 c_intr->opcode = Q8_MBX_CONFIG_INTR;
902 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
903 c_intr->count_version |= Q8_MBX_CMD_VERSION;
905 c_intr->nentries = num_intrs;
907 for (i = 0; i < num_intrs; i++) {
909 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
910 c_intr->intr[i].msix_index = start_idx + 1 + i;
912 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
913 c_intr->intr[i].msix_index =
914 ha->hw.intr_id[(start_idx + i)];
917 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
920 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
921 (sizeof (q80_config_intr_t) >> 2),
922 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
923 device_printf(dev, "%s: failed0\n", __func__);
927 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
929 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
932 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
933 c_intr_rsp->nentries);
935 for (i = 0; i < c_intr_rsp->nentries; i++) {
936 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
938 c_intr_rsp->intr[i].status,
939 c_intr_rsp->intr[i].intr_id,
940 c_intr_rsp->intr[i].intr_src);
946 for (i = 0; ((i < num_intrs) && create); i++) {
947 if (!c_intr_rsp->intr[i].status) {
948 ha->hw.intr_id[(start_idx + i)] =
949 c_intr_rsp->intr[i].intr_id;
950 ha->hw.intr_src[(start_idx + i)] =
951 c_intr_rsp->intr[i].intr_src;
959 * Name: qla_config_rss
960 * Function: Configure RSS for the context/interface.
962 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
963 0x8030f20c77cb2da3ULL,
964 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
965 0x255b0ec26d5a56daULL };
968 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
970 q80_config_rss_t *c_rss;
971 q80_config_rss_rsp_t *c_rss_rsp;
973 device_t dev = ha->pci_dev;
975 c_rss = (q80_config_rss_t *)ha->hw.mbox;
976 bzero(c_rss, (sizeof (q80_config_rss_t)));
978 c_rss->opcode = Q8_MBX_CONFIG_RSS;
980 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
981 c_rss->count_version |= Q8_MBX_CMD_VERSION;
983 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
984 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
985 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
986 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
988 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
989 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
991 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
993 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
994 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
996 c_rss->cntxt_id = cntxt_id;
998 for (i = 0; i < 5; i++) {
999 c_rss->rss_key[i] = rss_key[i];
1002 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1003 (sizeof (q80_config_rss_t) >> 2),
1004 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1005 device_printf(dev, "%s: failed0\n", __func__);
1008 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1010 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1013 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1020 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1021 uint16_t cntxt_id, uint8_t *ind_table)
1023 q80_config_rss_ind_table_t *c_rss_ind;
1024 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
1026 device_t dev = ha->pci_dev;
1028 if ((count > Q8_RSS_IND_TBL_SIZE) ||
1029 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1030 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1035 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1036 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1038 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1039 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1040 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1042 c_rss_ind->start_idx = start_idx;
1043 c_rss_ind->end_idx = start_idx + count - 1;
1044 c_rss_ind->cntxt_id = cntxt_id;
1045 bcopy(ind_table, c_rss_ind->ind_table, count);
1047 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1048 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1049 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1050 device_printf(dev, "%s: failed0\n", __func__);
1054 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1055 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1058 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1065 * Name: qla_config_intr_coalesce
1066 * Function: Configure Interrupt Coalescing.
1069 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1072 q80_config_intr_coalesc_t *intrc;
1073 q80_config_intr_coalesc_rsp_t *intrc_rsp;
1075 device_t dev = ha->pci_dev;
1077 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1078 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1080 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1081 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1082 intrc->count_version |= Q8_MBX_CMD_VERSION;
1085 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1086 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1087 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1089 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1090 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1091 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1094 intrc->cntxt_id = cntxt_id;
1097 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1098 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1100 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1101 intrc->sds_ring_mask |= (1 << i);
1103 intrc->ms_timeout = 1000;
1106 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1107 (sizeof (q80_config_intr_coalesc_t) >> 2),
1108 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1109 device_printf(dev, "%s: failed0\n", __func__);
1112 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1114 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1117 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1126 * Name: qla_config_mac_addr
1127 * Function: binds a MAC address to the context/interface.
1128 * Can be unicast, multicast or broadcast.
1131 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1134 q80_config_mac_addr_t *cmac;
1135 q80_config_mac_addr_rsp_t *cmac_rsp;
1137 device_t dev = ha->pci_dev;
1139 uint8_t *mac_cpy = mac_addr;
1141 if (num_mac > Q8_MAX_MAC_ADDRS) {
1142 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1143 __func__, (add_mac ? "Add" : "Del"), num_mac);
1147 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1148 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1150 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1151 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1152 cmac->count_version |= Q8_MBX_CMD_VERSION;
1155 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1157 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1159 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1161 cmac->nmac_entries = num_mac;
1162 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1164 for (i = 0; i < num_mac; i++) {
1165 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1166 mac_addr = mac_addr + ETHER_ADDR_LEN;
1169 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1170 (sizeof (q80_config_mac_addr_t) >> 2),
1171 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1172 device_printf(dev, "%s: %s failed0\n", __func__,
1173 (add_mac ? "Add" : "Del"));
1176 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1178 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1181 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1182 (add_mac ? "Add" : "Del"), err);
1183 for (i = 0; i < num_mac; i++) {
1184 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1185 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1186 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1187 mac_cpy += ETHER_ADDR_LEN;
1197 * Name: qla_set_mac_rcv_mode
1198 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1201 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1203 q80_config_mac_rcv_mode_t *rcv_mode;
1205 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
1206 device_t dev = ha->pci_dev;
1208 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1209 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1211 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1212 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1213 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1215 rcv_mode->mode = mode;
1217 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1219 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1220 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1221 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1222 device_printf(dev, "%s: failed0\n", __func__);
1225 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1227 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1230 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1238 ql_set_promisc(qla_host_t *ha)
1242 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1243 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1248 qla_reset_promisc(qla_host_t *ha)
1250 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1251 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1255 ql_set_allmulti(qla_host_t *ha)
1259 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1260 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1265 qla_reset_allmulti(qla_host_t *ha)
1267 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1268 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1272 * Name: ql_set_max_mtu
1274 * Sets the maximum transfer unit size for the specified rcv context.
1277 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1280 q80_set_max_mtu_t *max_mtu;
1281 q80_set_max_mtu_rsp_t *max_mtu_rsp;
1286 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1287 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1289 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1290 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1291 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1293 max_mtu->cntxt_id = cntxt_id;
1296 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1297 (sizeof (q80_set_max_mtu_t) >> 2),
1298 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1299 device_printf(dev, "%s: failed\n", __func__);
1303 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1305 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1308 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1315 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1318 q80_link_event_t *lnk;
1319 q80_link_event_rsp_t *lnk_rsp;
1324 lnk = (q80_link_event_t *)ha->hw.mbox;
1325 bzero(lnk, (sizeof (q80_link_event_t)));
1327 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1328 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1329 lnk->count_version |= Q8_MBX_CMD_VERSION;
1331 lnk->cntxt_id = cntxt_id;
1332 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1334 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1335 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1336 device_printf(dev, "%s: failed\n", __func__);
1340 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1342 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1345 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1352 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1355 q80_config_fw_lro_t *fw_lro;
1356 q80_config_fw_lro_rsp_t *fw_lro_rsp;
1361 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1362 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1364 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1365 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1366 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1368 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1369 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1371 fw_lro->cntxt_id = cntxt_id;
1373 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1374 (sizeof (q80_config_fw_lro_t) >> 2),
1375 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1376 device_printf(dev, "%s: failed\n", __func__);
1380 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1382 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1385 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1392 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1395 q80_hw_config_t *hw_config;
1396 q80_hw_config_rsp_t *hw_config_rsp;
1401 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1402 bzero(hw_config, sizeof (q80_hw_config_t));
1404 hw_config->opcode = Q8_MBX_HW_CONFIG;
1405 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1406 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1408 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1410 hw_config->u.set_cam_search_mode.mode = search_mode;
1412 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1413 (sizeof (q80_hw_config_t) >> 2),
1414 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1415 device_printf(dev, "%s: failed\n", __func__);
1418 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1420 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1423 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1430 qla_get_cam_search_mode(qla_host_t *ha)
1433 q80_hw_config_t *hw_config;
1434 q80_hw_config_rsp_t *hw_config_rsp;
1439 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1440 bzero(hw_config, sizeof (q80_hw_config_t));
1442 hw_config->opcode = Q8_MBX_HW_CONFIG;
1443 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1444 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1446 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1448 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1449 (sizeof (q80_hw_config_t) >> 2),
1450 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1451 device_printf(dev, "%s: failed\n", __func__);
1454 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1456 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1459 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1461 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1462 hw_config_rsp->u.get_cam_search_mode.mode);
1471 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1473 device_t dev = ha->pci_dev;
1475 if (i < ha->hw.num_tx_rings) {
1476 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1477 __func__, i, xstat->total_bytes);
1478 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1479 __func__, i, xstat->total_pkts);
1480 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1481 __func__, i, xstat->errors);
1482 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1483 __func__, i, xstat->pkts_dropped);
1484 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1485 __func__, i, xstat->switch_pkts);
1486 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1487 __func__, i, xstat->num_buffers);
1489 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1490 __func__, xstat->total_bytes);
1491 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1492 __func__, xstat->total_pkts);
1493 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1494 __func__, xstat->errors);
1495 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1496 __func__, xstat->pkts_dropped);
1497 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1498 __func__, xstat->switch_pkts);
1499 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1500 __func__, xstat->num_buffers);
1505 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1507 device_t dev = ha->pci_dev;
1509 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1510 rstat->total_bytes);
1511 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1513 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1514 rstat->lro_pkt_count);
1515 device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1516 rstat->sw_pkt_count);
1517 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1518 rstat->ip_chksum_err);
1519 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1520 rstat->pkts_wo_acntxts);
1521 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1522 __func__, rstat->pkts_dropped_no_sds_card);
1523 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1524 __func__, rstat->pkts_dropped_no_sds_host);
1525 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1526 rstat->oversized_pkts);
1527 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1528 __func__, rstat->pkts_dropped_no_rds);
1529 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1530 __func__, rstat->unxpctd_mcast_pkts);
1531 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1532 rstat->re1_fbq_error);
1533 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1534 rstat->invalid_mac_addr);
1535 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1536 rstat->rds_prime_trys);
1537 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1538 rstat->rds_prime_success);
1539 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1540 rstat->lro_flows_added);
1541 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1542 rstat->lro_flows_deleted);
1543 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1544 rstat->lro_flows_active);
1545 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1546 __func__, rstat->pkts_droped_unknown);
1550 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1552 device_t dev = ha->pci_dev;
1554 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1556 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1558 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1559 mstat->xmt_mcast_pkts);
1560 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1561 mstat->xmt_bcast_pkts);
1562 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1563 mstat->xmt_pause_frames);
1564 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1565 mstat->xmt_cntrl_pkts);
1566 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1567 __func__, mstat->xmt_pkt_lt_64bytes);
1568 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1569 __func__, mstat->xmt_pkt_lt_127bytes);
1570 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1571 __func__, mstat->xmt_pkt_lt_255bytes);
1572 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1573 __func__, mstat->xmt_pkt_lt_511bytes);
1574 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1575 __func__, mstat->xmt_pkt_lt_1023bytes);
1576 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1577 __func__, mstat->xmt_pkt_lt_1518bytes);
1578 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1579 __func__, mstat->xmt_pkt_gt_1518bytes);
1581 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1583 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1585 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1586 mstat->rcv_mcast_pkts);
1587 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1588 mstat->rcv_bcast_pkts);
1589 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1590 mstat->rcv_pause_frames);
1591 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1592 mstat->rcv_cntrl_pkts);
1593 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1594 __func__, mstat->rcv_pkt_lt_64bytes);
1595 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1596 __func__, mstat->rcv_pkt_lt_127bytes);
1597 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1598 __func__, mstat->rcv_pkt_lt_255bytes);
1599 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1600 __func__, mstat->rcv_pkt_lt_511bytes);
1601 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1602 __func__, mstat->rcv_pkt_lt_1023bytes);
1603 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1604 __func__, mstat->rcv_pkt_lt_1518bytes);
1605 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1606 __func__, mstat->rcv_pkt_gt_1518bytes);
1608 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1609 mstat->rcv_len_error);
1610 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1611 mstat->rcv_len_small);
1612 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1613 mstat->rcv_len_large);
1614 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1616 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1617 mstat->rcv_dropped);
1618 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1620 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1621 mstat->align_error);
1626 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1629 q80_get_stats_t *stat;
1630 q80_get_stats_rsp_t *stat_rsp;
1635 stat = (q80_get_stats_t *)ha->hw.mbox;
1636 bzero(stat, (sizeof (q80_get_stats_t)));
1638 stat->opcode = Q8_MBX_GET_STATS;
1639 stat->count_version = 2;
1640 stat->count_version |= Q8_MBX_CMD_VERSION;
1644 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1645 ha->hw.mbox, (rsp_size >> 2), 0)) {
1646 device_printf(dev, "%s: failed\n", __func__);
1650 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1652 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1662 ql_get_stats(qla_host_t *ha)
1664 q80_get_stats_rsp_t *stat_rsp;
1665 q80_mac_stats_t *mstat;
1666 q80_xmt_stats_t *xstat;
1667 q80_rcv_stats_t *rstat;
1671 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1673 * Get MAC Statistics
1675 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1676 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1678 cmd |= ((ha->pci_func & 0x1) << 16);
1680 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1681 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1682 qla_mac_stats(ha, mstat);
1684 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1685 __func__, ha->hw.mbox[0]);
1688 * Get RCV Statistics
1690 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1691 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1692 cmd |= (ha->hw.rcv_cntxt_id << 16);
1694 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1695 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1696 qla_rcv_stats(ha, rstat);
1698 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1699 __func__, ha->hw.mbox[0]);
1702 * Get XMT Statistics
1704 for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1705 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1706 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1707 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1709 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1711 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1712 qla_xmt_stats(ha, xstat, i);
1714 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1715 __func__, ha->hw.mbox[0]);
1722 qla_get_quick_stats(qla_host_t *ha)
1724 q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1725 q80_mac_stats_t *mstat;
1726 q80_xmt_stats_t *xstat;
1727 q80_rcv_stats_t *rstat;
1730 stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1732 cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1733 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1735 // cmd |= ((ha->pci_func & 0x3) << 16);
1736 cmd |= (0xFFFF << 16);
1738 if (qla_get_hw_stats(ha, cmd,
1739 sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1741 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1742 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1743 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1744 qla_mac_stats(ha, mstat);
1745 qla_rcv_stats(ha, rstat);
1746 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1748 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1749 __func__, ha->hw.mbox[0]);
1756 * Function: Checks if the packet to be transmitted is a candidate for
1757 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1758 * Ring Structure are plugged in.
1761 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1763 struct ether_vlan_header *eh;
1764 struct ip *ip = NULL;
1765 struct ip6_hdr *ip6 = NULL;
1766 struct tcphdr *th = NULL;
1767 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1768 uint16_t etype, opcode, offload = 1;
1774 eh = mtod(mp, struct ether_vlan_header *);
1776 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1777 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1778 etype = ntohs(eh->evl_proto);
1780 ehdrlen = ETHER_HDR_LEN;
1781 etype = ntohs(eh->evl_encap_proto);
1789 tcp_opt_off = ehdrlen + sizeof(struct ip) +
1790 sizeof(struct tcphdr);
1792 if (mp->m_len < tcp_opt_off) {
1793 m_copydata(mp, 0, tcp_opt_off, hdr);
1794 ip = (struct ip *)(hdr + ehdrlen);
1796 ip = (struct ip *)(mp->m_data + ehdrlen);
1799 ip_hlen = ip->ip_hl << 2;
1800 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1803 if ((ip->ip_p != IPPROTO_TCP) ||
1804 (ip_hlen != sizeof (struct ip))){
1805 /* IP Options are not supported */
1809 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1813 case ETHERTYPE_IPV6:
1815 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1816 sizeof (struct tcphdr);
1818 if (mp->m_len < tcp_opt_off) {
1819 m_copydata(mp, 0, tcp_opt_off, hdr);
1820 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1822 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1825 ip_hlen = sizeof(struct ip6_hdr);
1826 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1828 if (ip6->ip6_nxt != IPPROTO_TCP) {
1829 //device_printf(dev, "%s: ipv6\n", __func__);
1832 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1836 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1844 tcp_hlen = th->th_off << 2;
1845 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1847 if (mp->m_len < hdrlen) {
1848 if (mp->m_len < tcp_opt_off) {
1849 if (tcp_hlen > sizeof(struct tcphdr)) {
1850 m_copydata(mp, tcp_opt_off,
1851 (tcp_hlen - sizeof(struct tcphdr)),
1855 m_copydata(mp, 0, hdrlen, hdr);
1859 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1861 tx_cmd->flags_opcode = opcode ;
1862 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1863 tx_cmd->total_hdr_len = hdrlen;
1865 /* Check for Multicast least significant bit of MSB == 1 */
1866 if (eh->evl_dhost[0] & 0x01) {
1867 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1870 if (mp->m_len < hdrlen) {
1871 printf("%d\n", hdrlen);
1879 * Name: qla_tx_chksum
1880 * Function: Checks if the packet to be transmitted is a candidate for
1881 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1882 * Ring Structure are plugged in.
1885 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1886 uint32_t *tcp_hdr_off)
1888 struct ether_vlan_header *eh;
1890 struct ip6_hdr *ip6;
1891 uint32_t ehdrlen, ip_hlen;
1892 uint16_t etype, opcode, offload = 1;
1894 uint8_t buf[sizeof(struct ip6_hdr)];
1900 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1903 eh = mtod(mp, struct ether_vlan_header *);
1905 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1906 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1907 etype = ntohs(eh->evl_proto);
1909 ehdrlen = ETHER_HDR_LEN;
1910 etype = ntohs(eh->evl_encap_proto);
1916 ip = (struct ip *)(mp->m_data + ehdrlen);
1918 ip_hlen = sizeof (struct ip);
1920 if (mp->m_len < (ehdrlen + ip_hlen)) {
1921 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1922 ip = (struct ip *)buf;
1925 if (ip->ip_p == IPPROTO_TCP)
1926 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1927 else if (ip->ip_p == IPPROTO_UDP)
1928 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1930 //device_printf(dev, "%s: ipv4\n", __func__);
1935 case ETHERTYPE_IPV6:
1936 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1938 ip_hlen = sizeof(struct ip6_hdr);
1940 if (mp->m_len < (ehdrlen + ip_hlen)) {
1941 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1943 ip6 = (struct ip6_hdr *)buf;
1946 if (ip6->ip6_nxt == IPPROTO_TCP)
1947 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1948 else if (ip6->ip6_nxt == IPPROTO_UDP)
1949 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1951 //device_printf(dev, "%s: ipv6\n", __func__);
1964 *tcp_hdr_off = (ip_hlen + ehdrlen);
1969 #define QLA_TX_MIN_FREE 2
1972 * Function: Transmits a packet. It first checks if the packet is a
1973 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1974 * offload. If either of these creteria are not met, it is transmitted
1975 * as a regular ethernet frame.
1978 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1979 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1981 struct ether_vlan_header *eh;
1982 qla_hw_t *hw = &ha->hw;
1983 q80_tx_cmd_t *tx_cmd, tso_cmd;
1984 bus_dma_segment_t *c_seg;
1985 uint32_t num_tx_cmds, hdr_len = 0;
1986 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1989 uint8_t *src = NULL, *dst = NULL;
1990 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1991 uint32_t op_code = 0;
1992 uint32_t tcp_hdr_off = 0;
1997 * Always make sure there is atleast one empty slot in the tx_ring
1998 * tx_ring is considered full when there only one entry available
2000 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2002 total_length = mp->m_pkthdr.len;
2003 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2004 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2005 __func__, total_length);
2008 eh = mtod(mp, struct ether_vlan_header *);
2010 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2012 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2015 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2018 /* find the additional tx_cmd descriptors required */
2020 if (mp->m_flags & M_VLANTAG)
2021 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2023 hdr_len = tso_cmd.total_hdr_len;
2025 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2026 bytes = QL_MIN(bytes, hdr_len);
2032 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2036 hdr_len = tso_cmd.total_hdr_len;
2039 src = (uint8_t *)eh;
2043 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2047 ha->hw.iscsi_pkt_count++;
2049 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2050 qla_hw_tx_done_locked(ha, txr_idx);
2051 if (hw->tx_cntxt[txr_idx].txr_free <=
2052 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2053 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2054 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2060 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2062 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2064 if (nsegs > ha->hw.max_tx_segs)
2065 ha->hw.max_tx_segs = nsegs;
2067 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2070 tx_cmd->flags_opcode = op_code;
2071 tx_cmd->tcp_hdr_off = tcp_hdr_off;
2074 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2077 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2078 ha->tx_tso_frames++;
2081 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2082 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2085 eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2087 } else if (mp->m_flags & M_VLANTAG) {
2089 if (hdr_len) { /* TSO */
2090 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2091 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2092 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2094 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2096 ha->hw_vlan_tx_frames++;
2097 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2100 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2101 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2106 tx_cmd->n_bufs = (uint8_t)nsegs;
2107 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2108 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2109 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2114 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2118 tx_cmd->buf1_addr = c_seg->ds_addr;
2119 tx_cmd->buf1_len = c_seg->ds_len;
2123 tx_cmd->buf2_addr = c_seg->ds_addr;
2124 tx_cmd->buf2_len = c_seg->ds_len;
2128 tx_cmd->buf3_addr = c_seg->ds_addr;
2129 tx_cmd->buf3_len = c_seg->ds_len;
2133 tx_cmd->buf4_addr = c_seg->ds_addr;
2134 tx_cmd->buf4_len = c_seg->ds_len;
2142 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2143 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2144 (NUM_TX_DESCRIPTORS - 1);
2150 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2151 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2154 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2156 /* TSO : Copy the header in the following tx cmd descriptors */
2158 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2160 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2161 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2163 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2164 bytes = QL_MIN(bytes, hdr_len);
2166 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2168 if (mp->m_flags & M_VLANTAG) {
2169 /* first copy the src/dst MAC addresses */
2170 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2171 dst += (ETHER_ADDR_LEN * 2);
2172 src += (ETHER_ADDR_LEN * 2);
2174 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2176 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2179 /* bytes left in src header */
2180 hdr_len -= ((ETHER_ADDR_LEN * 2) +
2181 ETHER_VLAN_ENCAP_LEN);
2183 /* bytes left in TxCmd Entry */
2184 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2187 bcopy(src, dst, bytes);
2191 bcopy(src, dst, bytes);
2196 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2197 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2198 (NUM_TX_DESCRIPTORS - 1);
2202 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2203 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2205 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2207 bcopy(src, tx_cmd, bytes);
2211 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2212 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2213 (NUM_TX_DESCRIPTORS - 1);
2218 hw->tx_cntxt[txr_idx].txr_free =
2219 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2221 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2223 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2230 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2232 qla_config_rss_ind_table(qla_host_t *ha)
2235 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2238 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2239 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2242 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2243 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2245 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2246 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2248 count = Q8_CONFIG_IND_TBL_SIZE;
2251 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2260 * Name: ql_del_hw_if
2261 * Function: Destroys the hardware specific entities corresponding to an
2262 * Ethernet Interface
2265 ql_del_hw_if(qla_host_t *ha)
2270 (void)qla_stop_nic_func(ha);
2272 qla_del_rcv_cntxt(ha);
2274 qla_del_xmt_cntxt(ha);
2276 if (ha->hw.flags.init_intr_cnxt) {
2277 for (i = 0; i < ha->hw.num_sds_rings; ) {
2279 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2280 num_msix = Q8_MAX_INTR_VECTORS;
2282 num_msix = ha->hw.num_sds_rings - i;
2283 qla_config_intr_cntxt(ha, i, num_msix, 0);
2288 ha->hw.flags.init_intr_cnxt = 0;
2295 qla_confirm_9kb_enable(qla_host_t *ha)
2297 uint32_t supports_9kb = 0;
2299 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2301 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2302 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2303 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2305 qla_get_nic_partition(ha, &supports_9kb, NULL);
2308 ha->hw.enable_9kb = 0;
2315 * Name: ql_init_hw_if
2316 * Function: Creates the hardware specific entities corresponding to an
2317 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2318 * corresponding to the interface. Enables LRO if allowed.
2321 ql_init_hw_if(qla_host_t *ha)
2325 uint8_t bcast_mac[6];
2331 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2332 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2333 ha->hw.dma_buf.sds_ring[i].size);
2336 for (i = 0; i < ha->hw.num_sds_rings; ) {
2338 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2339 num_msix = Q8_MAX_INTR_VECTORS;
2341 num_msix = ha->hw.num_sds_rings - i;
2343 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2349 for (i = 0; i < num_msix; ) {
2350 qla_config_intr_cntxt(ha, i,
2351 Q8_MAX_INTR_VECTORS, 0);
2352 i += Q8_MAX_INTR_VECTORS;
2361 ha->hw.flags.init_intr_cnxt = 1;
2364 * Create Receive Context
2366 if (qla_init_rcv_cntxt(ha)) {
2370 for (i = 0; i < ha->hw.num_rds_rings; i++) {
2371 rdesc = &ha->hw.rds[i];
2372 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2374 /* Update the RDS Producer Indices */
2375 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2381 * Create Transmit Context
2383 if (qla_init_xmt_cntxt(ha)) {
2384 qla_del_rcv_cntxt(ha);
2387 ha->hw.max_tx_segs = 0;
2389 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2392 ha->hw.flags.unicast_mac = 1;
2394 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2395 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2397 if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2400 ha->hw.flags.bcast_mac = 1;
2403 * program any cached multicast addresses
2405 if (qla_hw_add_all_mcast(ha))
2408 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2411 if (qla_config_rss_ind_table(ha))
2414 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2417 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2420 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2423 if (qla_init_nic_func(ha))
2426 if (qla_query_fw_dcbx_caps(ha))
2429 for (i = 0; i < ha->hw.num_sds_rings; i++)
2430 QL_ENABLE_INTERRUPTS(ha, i);
2436 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2438 device_t dev = ha->pci_dev;
2439 q80_rq_map_sds_to_rds_t *map_rings;
2440 q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2442 qla_hw_t *hw = &ha->hw;
2444 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2445 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2447 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2448 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2449 map_rings->count_version |= Q8_MBX_CMD_VERSION;
2451 map_rings->cntxt_id = hw->rcv_cntxt_id;
2452 map_rings->num_rings = num_idx;
2454 for (i = 0; i < num_idx; i++) {
2455 map_rings->sds_rds[i].sds_ring = i + start_idx;
2456 map_rings->sds_rds[i].rds_ring = i + start_idx;
2459 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2460 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2461 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2462 device_printf(dev, "%s: failed0\n", __func__);
2466 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2468 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2471 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2479 * Name: qla_init_rcv_cntxt
2480 * Function: Creates the Receive Context.
2483 qla_init_rcv_cntxt(qla_host_t *ha)
2485 q80_rq_rcv_cntxt_t *rcntxt;
2486 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
2487 q80_stat_desc_t *sdesc;
2489 qla_hw_t *hw = &ha->hw;
2492 uint32_t rcntxt_sds_rings;
2493 uint32_t rcntxt_rds_rings;
2499 * Create Receive Context
2502 for (i = 0; i < hw->num_sds_rings; i++) {
2503 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2505 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2506 sdesc->data[0] = 1ULL;
2507 sdesc->data[1] = 1ULL;
2511 rcntxt_sds_rings = hw->num_sds_rings;
2512 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2513 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2515 rcntxt_rds_rings = hw->num_rds_rings;
2517 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2518 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2520 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2521 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2523 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2524 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2525 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2527 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2528 Q8_RCV_CNTXT_CAP0_LRO |
2529 Q8_RCV_CNTXT_CAP0_HW_LRO |
2530 Q8_RCV_CNTXT_CAP0_RSS |
2531 Q8_RCV_CNTXT_CAP0_SGL_LRO;
2533 if (ha->hw.enable_9kb)
2534 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2536 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2538 if (ha->hw.num_rds_rings > 1) {
2539 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2540 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2542 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2544 rcntxt->nsds_rings = rcntxt_sds_rings;
2546 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2548 rcntxt->rcv_vpid = 0;
2550 for (i = 0; i < rcntxt_sds_rings; i++) {
2551 rcntxt->sds[i].paddr =
2552 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2553 rcntxt->sds[i].size =
2554 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2555 if (ha->msix_count == 2) {
2556 rcntxt->sds[i].intr_id =
2557 qla_host_to_le16(hw->intr_id[0]);
2558 rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2560 rcntxt->sds[i].intr_id =
2561 qla_host_to_le16(hw->intr_id[i]);
2562 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2566 for (i = 0; i < rcntxt_rds_rings; i++) {
2567 rcntxt->rds[i].paddr_std =
2568 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2570 if (ha->hw.enable_9kb)
2571 rcntxt->rds[i].std_bsize =
2572 qla_host_to_le64(MJUM9BYTES);
2574 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2576 rcntxt->rds[i].std_nentries =
2577 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2580 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2581 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2582 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2583 device_printf(dev, "%s: failed0\n", __func__);
2587 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2589 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2592 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2596 for (i = 0; i < rcntxt_sds_rings; i++) {
2597 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2600 for (i = 0; i < rcntxt_rds_rings; i++) {
2601 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2604 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2606 ha->hw.flags.init_rx_cnxt = 1;
2608 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2610 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2612 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2613 max_idx = MAX_RCNTXT_SDS_RINGS;
2615 max_idx = hw->num_sds_rings - i;
2617 err = qla_add_rcv_rings(ha, i, max_idx);
2625 if (hw->num_rds_rings > 1) {
2627 for (i = 0; i < hw->num_rds_rings; ) {
2629 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2630 max_idx = MAX_SDS_TO_RDS_MAP;
2632 max_idx = hw->num_rds_rings - i;
2634 err = qla_map_sds_to_rds(ha, i, max_idx);
2646 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2648 device_t dev = ha->pci_dev;
2649 q80_rq_add_rcv_rings_t *add_rcv;
2650 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2652 qla_hw_t *hw = &ha->hw;
2654 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2655 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2657 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2658 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2659 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2661 add_rcv->nrds_sets_rings = nsds | (1 << 5);
2662 add_rcv->nsds_rings = nsds;
2663 add_rcv->cntxt_id = hw->rcv_cntxt_id;
2665 for (i = 0; i < nsds; i++) {
2669 add_rcv->sds[i].paddr =
2670 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2672 add_rcv->sds[i].size =
2673 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2675 if (ha->msix_count == 2) {
2676 add_rcv->sds[i].intr_id =
2677 qla_host_to_le16(hw->intr_id[0]);
2678 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2680 add_rcv->sds[i].intr_id =
2681 qla_host_to_le16(hw->intr_id[j]);
2682 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2686 for (i = 0; (i < nsds); i++) {
2689 add_rcv->rds[i].paddr_std =
2690 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2692 if (ha->hw.enable_9kb)
2693 add_rcv->rds[i].std_bsize =
2694 qla_host_to_le64(MJUM9BYTES);
2696 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2698 add_rcv->rds[i].std_nentries =
2699 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2703 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2704 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2705 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2706 device_printf(dev, "%s: failed0\n", __func__);
2710 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2712 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2715 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2719 for (i = 0; i < nsds; i++) {
2720 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2723 for (i = 0; i < nsds; i++) {
2724 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2731 * Name: qla_del_rcv_cntxt
2732 * Function: Destroys the Receive Context.
2735 qla_del_rcv_cntxt(qla_host_t *ha)
2737 device_t dev = ha->pci_dev;
2738 q80_rcv_cntxt_destroy_t *rcntxt;
2739 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
2741 uint8_t bcast_mac[6];
2743 if (!ha->hw.flags.init_rx_cnxt)
2746 if (qla_hw_del_all_mcast(ha))
2749 if (ha->hw.flags.bcast_mac) {
2751 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2752 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2754 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
2756 ha->hw.flags.bcast_mac = 0;
2760 if (ha->hw.flags.unicast_mac) {
2761 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
2763 ha->hw.flags.unicast_mac = 0;
2766 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2767 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2769 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2770 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2771 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2773 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2775 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2776 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2777 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2778 device_printf(dev, "%s: failed0\n", __func__);
2781 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2783 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2786 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2789 ha->hw.flags.init_rx_cnxt = 0;
2794 * Name: qla_init_xmt_cntxt
2795 * Function: Creates the Transmit Context.
2798 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2801 qla_hw_t *hw = &ha->hw;
2802 q80_rq_tx_cntxt_t *tcntxt;
2803 q80_rsp_tx_cntxt_t *tcntxt_rsp;
2805 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2807 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2812 * Create Transmit Context
2814 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2815 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2817 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2818 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2819 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2821 #ifdef QL_ENABLE_ISCSI_TLV
2823 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2824 Q8_TX_CNTXT_CAP0_TC;
2826 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2827 tcntxt->traffic_class = 1;
2832 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2834 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2836 tcntxt->ntx_rings = 1;
2838 tcntxt->tx_ring[0].paddr =
2839 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2840 tcntxt->tx_ring[0].tx_consumer =
2841 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2842 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2844 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2845 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2848 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2849 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2851 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2852 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2854 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2855 device_printf(dev, "%s: failed0\n", __func__);
2858 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2860 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2863 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2867 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2868 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2870 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2878 * Name: qla_del_xmt_cntxt
2879 * Function: Destroys the Transmit Context.
2882 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2884 device_t dev = ha->pci_dev;
2885 q80_tx_cntxt_destroy_t *tcntxt;
2886 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
2889 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2890 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2892 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2893 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2894 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2896 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2898 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2899 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2900 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2901 device_printf(dev, "%s: failed0\n", __func__);
2904 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2906 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2909 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2916 qla_del_xmt_cntxt(qla_host_t *ha)
2920 if (!ha->hw.flags.init_tx_cnxt)
2923 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2924 if (qla_del_xmt_cntxt_i(ha, i))
2927 ha->hw.flags.init_tx_cnxt = 0;
2931 qla_init_xmt_cntxt(qla_host_t *ha)
2935 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2936 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2937 for (j = 0; j < i; j++)
2938 qla_del_xmt_cntxt_i(ha, j);
2942 ha->hw.flags.init_tx_cnxt = 1;
2947 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
2953 nmcast = ha->hw.nmcast;
2955 QL_DPRINT2(ha, (ha->pci_dev,
2956 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
2958 mcast = ha->hw.mac_addr_arr;
2959 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
2961 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2962 if ((ha->hw.mcast[i].addr[0] != 0) ||
2963 (ha->hw.mcast[i].addr[1] != 0) ||
2964 (ha->hw.mcast[i].addr[2] != 0) ||
2965 (ha->hw.mcast[i].addr[3] != 0) ||
2966 (ha->hw.mcast[i].addr[4] != 0) ||
2967 (ha->hw.mcast[i].addr[5] != 0)) {
2969 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
2970 mcast = mcast + ETHER_ADDR_LEN;
2973 if (count == Q8_MAX_MAC_ADDRS) {
2974 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
2975 add_mcast, count)) {
2976 device_printf(ha->pci_dev,
2977 "%s: failed\n", __func__);
2982 mcast = ha->hw.mac_addr_arr;
2984 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
2992 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
2994 device_printf(ha->pci_dev, "%s: failed\n", __func__);
2998 QL_DPRINT2(ha, (ha->pci_dev,
2999 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3005 qla_hw_add_all_mcast(qla_host_t *ha)
3009 ret = qla_hw_all_mcast(ha, 1);
3015 qla_hw_del_all_mcast(qla_host_t *ha)
3019 ret = qla_hw_all_mcast(ha, 0);
3021 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3028 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3032 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3033 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3034 return (0); /* its been already added */
3040 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3044 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3046 if ((ha->hw.mcast[i].addr[0] == 0) &&
3047 (ha->hw.mcast[i].addr[1] == 0) &&
3048 (ha->hw.mcast[i].addr[2] == 0) &&
3049 (ha->hw.mcast[i].addr[3] == 0) &&
3050 (ha->hw.mcast[i].addr[4] == 0) &&
3051 (ha->hw.mcast[i].addr[5] == 0)) {
3053 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3056 mta = mta + ETHER_ADDR_LEN;
3068 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3072 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3073 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3075 ha->hw.mcast[i].addr[0] = 0;
3076 ha->hw.mcast[i].addr[1] = 0;
3077 ha->hw.mcast[i].addr[2] = 0;
3078 ha->hw.mcast[i].addr[3] = 0;
3079 ha->hw.mcast[i].addr[4] = 0;
3080 ha->hw.mcast[i].addr[5] = 0;
3084 mta = mta + ETHER_ADDR_LEN;
3095 * Name: ql_hw_set_multi
3096 * Function: Sets the Multicast Addresses provided by the host O.S into the
3097 * hardware (for the given interface)
3100 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3103 uint8_t *mta = mcast_addr;
3109 mcast = ha->hw.mac_addr_arr;
3110 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3112 for (i = 0; i < mcnt; i++) {
3113 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3115 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3116 bcopy(mta, mcast, ETHER_ADDR_LEN);
3117 mcast = mcast + ETHER_ADDR_LEN;
3121 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3122 bcopy(mta, mcast, ETHER_ADDR_LEN);
3123 mcast = mcast + ETHER_ADDR_LEN;
3128 if (count == Q8_MAX_MAC_ADDRS) {
3129 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3131 device_printf(ha->pci_dev, "%s: failed\n",
3137 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3140 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3145 mcast = ha->hw.mac_addr_arr;
3146 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3149 mta += Q8_MAC_ADDR_LEN;
3153 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3155 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3159 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3161 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3169 * Name: qla_hw_tx_done_locked
3170 * Function: Handle Transmit Completions
3173 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3176 qla_hw_t *hw = &ha->hw;
3177 uint32_t comp_idx, comp_count = 0;
3178 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3180 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3182 /* retrieve index of last entry in tx ring completed */
3183 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3185 while (comp_idx != hw_tx_cntxt->txr_comp) {
3187 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3189 hw_tx_cntxt->txr_comp++;
3190 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3191 hw_tx_cntxt->txr_comp = 0;
3196 ha->ifp->if_opackets++;
3198 bus_dmamap_sync(ha->tx_tag, txb->map,
3199 BUS_DMASYNC_POSTWRITE);
3200 bus_dmamap_unload(ha->tx_tag, txb->map);
3201 m_freem(txb->m_head);
3207 hw_tx_cntxt->txr_free += comp_count;
3212 * Name: ql_hw_tx_done
3213 * Function: Handle Transmit Completions
3216 ql_hw_tx_done(qla_host_t *ha)
3221 if (!mtx_trylock(&ha->tx_lock)) {
3222 QL_DPRINT8(ha, (ha->pci_dev,
3223 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
3226 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3227 qla_hw_tx_done_locked(ha, i);
3228 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
3233 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3240 ql_update_link_state(qla_host_t *ha)
3242 uint32_t link_state;
3243 uint32_t prev_link_state;
3245 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3249 link_state = READ_REG32(ha, Q8_LINK_STATE);
3251 prev_link_state = ha->hw.link_up;
3253 if (ha->pci_func == 0)
3254 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3256 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3258 if (prev_link_state != ha->hw.link_up) {
3259 if (ha->hw.link_up) {
3260 if_link_state_change(ha->ifp, LINK_STATE_UP);
3262 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3269 ql_hw_stop_rcv(qla_host_t *ha)
3271 int i, done, count = 100;
3273 ha->flags.stop_rcv = 1;
3277 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3278 if (ha->hw.sds[i].rcv_active)
3284 qla_mdelay(__func__, 10);
3288 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3294 ql_hw_check_health(qla_host_t *ha)
3298 ha->hw.health_count++;
3300 if (ha->hw.health_count < 1000)
3303 ha->hw.health_count = 0;
3305 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3307 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3308 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3309 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3314 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3316 if ((val != ha->hw.hbeat_value) &&
3317 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3318 ha->hw.hbeat_value = val;
3321 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3328 qla_init_nic_func(qla_host_t *ha)
3331 q80_init_nic_func_t *init_nic;
3332 q80_init_nic_func_rsp_t *init_nic_rsp;
3337 init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3338 bzero(init_nic, sizeof(q80_init_nic_func_t));
3340 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3341 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3342 init_nic->count_version |= Q8_MBX_CMD_VERSION;
3344 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3345 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3346 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3348 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3349 if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3350 (sizeof (q80_init_nic_func_t) >> 2),
3351 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3352 device_printf(dev, "%s: failed\n", __func__);
3356 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3357 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3359 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3362 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3369 qla_stop_nic_func(qla_host_t *ha)
3372 q80_stop_nic_func_t *stop_nic;
3373 q80_stop_nic_func_rsp_t *stop_nic_rsp;
3378 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3379 bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3381 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3382 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3383 stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3385 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3386 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3388 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3389 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3390 (sizeof (q80_stop_nic_func_t) >> 2),
3391 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3392 device_printf(dev, "%s: failed\n", __func__);
3396 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3397 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3399 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3402 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3409 qla_query_fw_dcbx_caps(qla_host_t *ha)
3412 q80_query_fw_dcbx_caps_t *fw_dcbx;
3413 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp;
3418 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3419 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3421 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3422 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3423 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3425 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3426 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3427 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3428 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3429 device_printf(dev, "%s: failed\n", __func__);
3433 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3434 ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3435 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3437 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3440 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3447 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3448 uint32_t aen_mb3, uint32_t aen_mb4)
3451 q80_idc_ack_t *idc_ack;
3452 q80_idc_ack_rsp_t *idc_ack_rsp;
3458 idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3459 bzero(idc_ack, sizeof(q80_idc_ack_t));
3461 idc_ack->opcode = Q8_MBX_IDC_ACK;
3462 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3463 idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3465 idc_ack->aen_mb1 = aen_mb1;
3466 idc_ack->aen_mb2 = aen_mb2;
3467 idc_ack->aen_mb3 = aen_mb3;
3468 idc_ack->aen_mb4 = aen_mb4;
3470 ha->hw.imd_compl= 0;
3472 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3473 (sizeof (q80_idc_ack_t) >> 2),
3474 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3475 device_printf(dev, "%s: failed\n", __func__);
3479 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3481 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3484 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3488 while (count && !ha->hw.imd_compl) {
3489 qla_mdelay(__func__, 100);
3496 device_printf(dev, "%s: count %d\n", __func__, count);
3502 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3505 q80_set_port_cfg_t *pcfg;
3506 q80_set_port_cfg_rsp_t *pfg_rsp;
3512 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3513 bzero(pcfg, sizeof(q80_set_port_cfg_t));
3515 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3516 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3517 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3519 pcfg->cfg_bits = cfg_bits;
3521 device_printf(dev, "%s: cfg_bits"
3522 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3523 " [0x%x, 0x%x, 0x%x]\n", __func__,
3524 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3525 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3526 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3528 ha->hw.imd_compl= 0;
3530 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3531 (sizeof (q80_set_port_cfg_t) >> 2),
3532 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3533 device_printf(dev, "%s: failed\n", __func__);
3537 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3539 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3541 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3542 while (count && !ha->hw.imd_compl) {
3543 qla_mdelay(__func__, 100);
3547 device_printf(dev, "%s: count %d\n", __func__, count);
3554 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3563 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3566 device_t dev = ha->pci_dev;
3567 q80_config_md_templ_size_t *md_size;
3568 q80_config_md_templ_size_rsp_t *md_size_rsp;
3570 #ifndef QL_LDFLASH_FW
3572 ql_minidump_template_hdr_t *hdr;
3574 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3575 *size = hdr->size_of_template;
3578 #endif /* #ifdef QL_LDFLASH_FW */
3580 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3581 bzero(md_size, sizeof(q80_config_md_templ_size_t));
3583 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3584 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3585 md_size->count_version |= Q8_MBX_CMD_VERSION;
3587 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3588 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3589 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3591 device_printf(dev, "%s: failed\n", __func__);
3596 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3598 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3601 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3605 *size = md_size_rsp->templ_size;
3611 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3614 q80_get_port_cfg_t *pcfg;
3615 q80_get_port_cfg_rsp_t *pcfg_rsp;
3620 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3621 bzero(pcfg, sizeof(q80_get_port_cfg_t));
3623 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3624 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3625 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3627 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3628 (sizeof (q80_get_port_cfg_t) >> 2),
3629 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3630 device_printf(dev, "%s: failed\n", __func__);
3634 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3636 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3639 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3643 device_printf(dev, "%s: [cfg_bits, port type]"
3644 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3645 " [0x%x, 0x%x, 0x%x]\n", __func__,
3646 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3647 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3648 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3649 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3652 *cfg_bits = pcfg_rsp->cfg_bits;
3658 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3660 struct ether_vlan_header *eh;
3662 struct ip *ip = NULL;
3663 struct ip6_hdr *ip6 = NULL;
3664 struct tcphdr *th = NULL;
3667 uint8_t buf[sizeof(struct ip6_hdr)];
3669 eh = mtod(mp, struct ether_vlan_header *);
3671 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3672 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3673 etype = ntohs(eh->evl_proto);
3675 hdrlen = ETHER_HDR_LEN;
3676 etype = ntohs(eh->evl_encap_proto);
3679 if (etype == ETHERTYPE_IP) {
3681 offset = (hdrlen + sizeof (struct ip));
3683 if (mp->m_len >= offset) {
3684 ip = (struct ip *)(mp->m_data + hdrlen);
3686 m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3687 ip = (struct ip *)buf;
3690 if (ip->ip_p == IPPROTO_TCP) {
3692 hdrlen += ip->ip_hl << 2;
3693 offset = hdrlen + 4;
3695 if (mp->m_len >= offset) {
3696 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3698 m_copydata(mp, hdrlen, 4, buf);
3699 th = (struct tcphdr *)buf;
3703 } else if (etype == ETHERTYPE_IPV6) {
3705 offset = (hdrlen + sizeof (struct ip6_hdr));
3707 if (mp->m_len >= offset) {
3708 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3710 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3711 ip6 = (struct ip6_hdr *)buf;
3714 if (ip6->ip6_nxt == IPPROTO_TCP) {
3716 hdrlen += sizeof(struct ip6_hdr);
3717 offset = hdrlen + 4;
3719 if (mp->m_len >= offset) {
3720 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3722 m_copydata(mp, hdrlen, 4, buf);
3723 th = (struct tcphdr *)buf;
3729 if ((th->th_sport == htons(3260)) ||
3730 (th->th_dport == htons(3260)))
3737 qla_hw_async_event(qla_host_t *ha)
3739 switch (ha->hw.aen_mb0) {
3741 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3742 ha->hw.aen_mb3, ha->hw.aen_mb4);
3753 #ifdef QL_LDFLASH_FW
3755 ql_get_minidump_template(qla_host_t *ha)
3758 device_t dev = ha->pci_dev;
3759 q80_config_md_templ_cmd_t *md_templ;
3760 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
3762 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3763 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3765 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3766 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3767 md_templ->count_version |= Q8_MBX_CMD_VERSION;
3769 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3770 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3772 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3773 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3775 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3777 device_printf(dev, "%s: failed\n", __func__);
3782 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3784 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3787 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3794 #endif /* #ifdef QL_LDFLASH_FW */
3797 * Minidump related functionality
3800 static int ql_parse_template(qla_host_t *ha);
3802 static uint32_t ql_rdcrb(qla_host_t *ha,
3803 ql_minidump_entry_rdcrb_t *crb_entry,
3804 uint32_t * data_buff);
3806 static uint32_t ql_pollrd(qla_host_t *ha,
3807 ql_minidump_entry_pollrd_t *entry,
3808 uint32_t * data_buff);
3810 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3811 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3812 uint32_t *data_buff);
3814 static uint32_t ql_L2Cache(qla_host_t *ha,
3815 ql_minidump_entry_cache_t *cacheEntry,
3816 uint32_t * data_buff);
3818 static uint32_t ql_L1Cache(qla_host_t *ha,
3819 ql_minidump_entry_cache_t *cacheEntry,
3820 uint32_t *data_buff);
3822 static uint32_t ql_rdocm(qla_host_t *ha,
3823 ql_minidump_entry_rdocm_t *ocmEntry,
3824 uint32_t *data_buff);
3826 static uint32_t ql_rdmem(qla_host_t *ha,
3827 ql_minidump_entry_rdmem_t *mem_entry,
3828 uint32_t *data_buff);
3830 static uint32_t ql_rdrom(qla_host_t *ha,
3831 ql_minidump_entry_rdrom_t *romEntry,
3832 uint32_t *data_buff);
3834 static uint32_t ql_rdmux(qla_host_t *ha,
3835 ql_minidump_entry_mux_t *muxEntry,
3836 uint32_t *data_buff);
3838 static uint32_t ql_rdmux2(qla_host_t *ha,
3839 ql_minidump_entry_mux2_t *muxEntry,
3840 uint32_t *data_buff);
3842 static uint32_t ql_rdqueue(qla_host_t *ha,
3843 ql_minidump_entry_queue_t *queueEntry,
3844 uint32_t *data_buff);
3846 static uint32_t ql_cntrl(qla_host_t *ha,
3847 ql_minidump_template_hdr_t *template_hdr,
3848 ql_minidump_entry_cntrl_t *crbEntry);
3852 ql_minidump_size(qla_host_t *ha)
3856 ql_minidump_template_hdr_t *hdr;
3858 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3862 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3863 if (i & ha->hw.mdump_capture_mask)
3864 size += hdr->capture_size_array[k];
3871 ql_free_minidump_buffer(qla_host_t *ha)
3873 if (ha->hw.mdump_buffer != NULL) {
3874 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3875 ha->hw.mdump_buffer = NULL;
3876 ha->hw.mdump_buffer_size = 0;
3882 ql_alloc_minidump_buffer(qla_host_t *ha)
3884 ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3886 if (!ha->hw.mdump_buffer_size)
3889 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3892 if (ha->hw.mdump_buffer == NULL)
3899 ql_free_minidump_template_buffer(qla_host_t *ha)
3901 if (ha->hw.mdump_template != NULL) {
3902 free(ha->hw.mdump_template, M_QLA83XXBUF);
3903 ha->hw.mdump_template = NULL;
3904 ha->hw.mdump_template_size = 0;
3910 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3912 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3914 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3915 M_QLA83XXBUF, M_NOWAIT);
3917 if (ha->hw.mdump_template == NULL)
3924 ql_alloc_minidump_buffers(qla_host_t *ha)
3928 ret = ql_alloc_minidump_template_buffer(ha);
3933 ret = ql_alloc_minidump_buffer(ha);
3936 ql_free_minidump_template_buffer(ha);
3943 ql_validate_minidump_checksum(qla_host_t *ha)
3947 uint32_t *template_buff;
3949 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3950 template_buff = ha->hw.dma_buf.minidump.dma_b;
3952 while (count-- > 0) {
3953 sum += *template_buff++;
3957 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3964 ql_minidump_init(qla_host_t *ha)
3967 uint32_t template_size = 0;
3968 device_t dev = ha->pci_dev;
3971 * Get Minidump Template Size
3973 ret = qla_get_minidump_tmplt_size(ha, &template_size);
3975 if (ret || (template_size == 0)) {
3976 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3982 * Allocate Memory for Minidump Template
3985 ha->hw.dma_buf.minidump.alignment = 8;
3986 ha->hw.dma_buf.minidump.size = template_size;
3988 #ifdef QL_LDFLASH_FW
3989 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3991 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3995 ha->hw.dma_buf.flags.minidump = 1;
3998 * Retrieve Minidump Template
4000 ret = ql_get_minidump_template(ha);
4002 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4004 #endif /* #ifdef QL_LDFLASH_FW */
4008 ret = ql_validate_minidump_checksum(ha);
4012 ret = ql_alloc_minidump_buffers(ha);
4015 ha->hw.mdump_init = 1;
4018 "%s: ql_alloc_minidump_buffers"
4019 " failed\n", __func__);
4021 device_printf(dev, "%s: ql_validate_minidump_checksum"
4022 " failed\n", __func__);
4025 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4030 ql_minidump_free(ha);
4036 ql_minidump_free(qla_host_t *ha)
4038 ha->hw.mdump_init = 0;
4039 if (ha->hw.dma_buf.flags.minidump) {
4040 ha->hw.dma_buf.flags.minidump = 0;
4041 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4044 ql_free_minidump_template_buffer(ha);
4045 ql_free_minidump_buffer(ha);
4051 ql_minidump(qla_host_t *ha)
4053 if (!ha->hw.mdump_init)
4056 if (ha->hw.mdump_done)
4059 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4061 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4062 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4064 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4065 ha->hw.mdump_template_size);
4067 ql_parse_template(ha);
4069 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4071 ha->hw.mdump_done = 1;
4081 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4083 if (esize != entry->hdr.entry_capture_size) {
4084 entry->hdr.entry_capture_size = esize;
4085 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4092 ql_parse_template(qla_host_t *ha)
4094 uint32_t num_of_entries, buff_level, e_cnt, esize;
4095 uint32_t end_cnt, rv = 0;
4096 char *dump_buff, *dbuff;
4097 int sane_start = 0, sane_end = 0;
4098 ql_minidump_template_hdr_t *template_hdr;
4099 ql_minidump_entry_t *entry;
4100 uint32_t capture_mask;
4103 /* Setup parameters */
4104 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4106 if (template_hdr->entry_type == TLHDR)
4109 dump_buff = (char *) ha->hw.mdump_buffer;
4111 num_of_entries = template_hdr->num_of_entries;
4113 entry = (ql_minidump_entry_t *) ((char *)template_hdr
4114 + template_hdr->first_entry_offset );
4116 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4117 template_hdr->ocm_window_array[ha->pci_func];
4118 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4120 capture_mask = ha->hw.mdump_capture_mask;
4121 dump_size = ha->hw.mdump_buffer_size;
4123 template_hdr->driver_capture_mask = capture_mask;
4125 QL_DPRINT80(ha, (ha->pci_dev,
4126 "%s: sane_start = %d num_of_entries = %d "
4127 "capture_mask = 0x%x dump_size = %d \n",
4128 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4130 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4133 * If the capture_mask of the entry does not match capture mask
4134 * skip the entry after marking the driver_flags indicator.
4137 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4139 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4140 entry = (ql_minidump_entry_t *) ((char *) entry
4141 + entry->hdr.entry_size);
4146 * This is ONLY needed in implementations where
4147 * the capture buffer allocated is too small to capture
4148 * all of the required entries for a given capture mask.
4149 * We need to empty the buffer contents to a file
4150 * if possible, before processing the next entry
4151 * If the buff_full_flag is set, no further capture will happen
4152 * and all remaining non-control entries will be skipped.
4154 if (entry->hdr.entry_capture_size != 0) {
4155 if ((buff_level + entry->hdr.entry_capture_size) >
4157 /* Try to recover by emptying buffer to file */
4158 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4159 entry = (ql_minidump_entry_t *) ((char *) entry
4160 + entry->hdr.entry_size);
4166 * Decode the entry type and process it accordingly
4169 switch (entry->hdr.entry_type) {
4174 if (sane_end == 0) {
4181 dbuff = dump_buff + buff_level;
4182 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4183 ql_entry_err_chk(entry, esize);
4184 buff_level += esize;
4188 dbuff = dump_buff + buff_level;
4189 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4190 ql_entry_err_chk(entry, esize);
4191 buff_level += esize;
4195 dbuff = dump_buff + buff_level;
4196 esize = ql_pollrd_modify_write(ha, (void *)entry,
4198 ql_entry_err_chk(entry, esize);
4199 buff_level += esize;
4206 dbuff = dump_buff + buff_level;
4207 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4209 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4211 ql_entry_err_chk(entry, esize);
4212 buff_level += esize;
4218 dbuff = dump_buff + buff_level;
4219 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4220 ql_entry_err_chk(entry, esize);
4221 buff_level += esize;
4225 dbuff = dump_buff + buff_level;
4226 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4227 ql_entry_err_chk(entry, esize);
4228 buff_level += esize;
4232 dbuff = dump_buff + buff_level;
4233 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4234 ql_entry_err_chk(entry, esize);
4235 buff_level += esize;
4240 dbuff = dump_buff + buff_level;
4241 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4242 ql_entry_err_chk(entry, esize);
4243 buff_level += esize;
4247 dbuff = dump_buff + buff_level;
4248 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4249 ql_entry_err_chk(entry, esize);
4250 buff_level += esize;
4254 dbuff = dump_buff + buff_level;
4255 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4256 ql_entry_err_chk(entry, esize);
4257 buff_level += esize;
4261 dbuff = dump_buff + buff_level;
4262 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4263 ql_entry_err_chk(entry, esize);
4264 buff_level += esize;
4268 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4269 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4273 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4276 /* next entry in the template */
4277 entry = (ql_minidump_entry_t *) ((char *) entry
4278 + entry->hdr.entry_size);
4281 if (!sane_start || (sane_end > 1)) {
4282 device_printf(ha->pci_dev,
4283 "\n%s: Template configuration error. Check Template\n",
4287 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4288 __func__, template_hdr->num_of_entries));
4294 * Read CRB operation.
4297 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4298 uint32_t * data_buff)
4302 uint32_t op_count, addr, stride, value = 0;
4304 addr = crb_entry->addr;
4305 op_count = crb_entry->op_count;
4306 stride = crb_entry->addr_stride;
4308 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4310 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4315 *data_buff++ = addr;
4316 *data_buff++ = value;
4317 addr = addr + stride;
4321 * for testing purpose we return amount of data written
4323 return (op_count * (2 * sizeof(uint32_t)));
4331 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4332 uint32_t * data_buff)
4338 uint32_t read_value;
4339 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4340 uint32_t tag_value, read_cnt;
4341 volatile uint8_t cntl_value_r;
4345 loop_cnt = cacheEntry->op_count;
4347 read_addr = cacheEntry->read_addr;
4348 cntrl_addr = cacheEntry->control_addr;
4349 cntl_value_w = (uint32_t) cacheEntry->write_value;
4351 tag_reg_addr = cacheEntry->tag_reg_addr;
4353 tag_value = cacheEntry->init_tag_value;
4354 read_cnt = cacheEntry->read_addr_cnt;
4356 for (i = 0; i < loop_cnt; i++) {
4358 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4362 if (cacheEntry->write_value != 0) {
4364 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4370 if (cacheEntry->poll_mask != 0) {
4372 timeout = cacheEntry->poll_wait;
4374 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4378 cntl_value_r = (uint8_t)data;
4380 while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4383 qla_mdelay(__func__, 1);
4388 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4393 cntl_value_r = (uint8_t)data;
4396 /* Report timeout error.
4397 * core dump capture failed
4398 * Skip remaining entries.
4399 * Write buffer out to file
4400 * Use driver specific fields in template header
4401 * to report this error.
4408 for (k = 0; k < read_cnt; k++) {
4410 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4414 *data_buff++ = read_value;
4415 addr += cacheEntry->read_addr_stride;
4418 tag_value += cacheEntry->tag_value_stride;
4421 return (read_cnt * loop_cnt * sizeof(uint32_t));
4429 ql_L1Cache(qla_host_t *ha,
4430 ql_minidump_entry_cache_t *cacheEntry,
4431 uint32_t *data_buff)
4437 uint32_t read_value;
4438 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4439 uint32_t tag_value, read_cnt;
4440 uint32_t cntl_value_w;
4442 loop_cnt = cacheEntry->op_count;
4444 read_addr = cacheEntry->read_addr;
4445 cntrl_addr = cacheEntry->control_addr;
4446 cntl_value_w = (uint32_t) cacheEntry->write_value;
4448 tag_reg_addr = cacheEntry->tag_reg_addr;
4450 tag_value = cacheEntry->init_tag_value;
4451 read_cnt = cacheEntry->read_addr_cnt;
4453 for (i = 0; i < loop_cnt; i++) {
4455 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4459 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4464 for (k = 0; k < read_cnt; k++) {
4466 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4470 *data_buff++ = read_value;
4471 addr += cacheEntry->read_addr_stride;
4474 tag_value += cacheEntry->tag_value_stride;
4477 return (read_cnt * loop_cnt * sizeof(uint32_t));
4481 * Reading OCM memory
4485 ql_rdocm(qla_host_t *ha,
4486 ql_minidump_entry_rdocm_t *ocmEntry,
4487 uint32_t *data_buff)
4490 volatile uint32_t addr;
4491 volatile uint32_t value;
4493 addr = ocmEntry->read_addr;
4494 loop_cnt = ocmEntry->op_count;
4496 for (i = 0; i < loop_cnt; i++) {
4497 value = READ_REG32(ha, addr);
4498 *data_buff++ = value;
4499 addr += ocmEntry->read_addr_stride;
4501 return (loop_cnt * sizeof(value));
4509 ql_rdmem(qla_host_t *ha,
4510 ql_minidump_entry_rdmem_t *mem_entry,
4511 uint32_t *data_buff)
4515 volatile uint32_t addr;
4516 q80_offchip_mem_val_t val;
4518 addr = mem_entry->read_addr;
4520 /* size in bytes / 16 */
4521 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4523 for (i = 0; i < loop_cnt; i++) {
4525 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4529 *data_buff++ = val.data_lo;
4530 *data_buff++ = val.data_hi;
4531 *data_buff++ = val.data_ulo;
4532 *data_buff++ = val.data_uhi;
4534 addr += (sizeof(uint32_t) * 4);
4537 return (loop_cnt * (sizeof(uint32_t) * 4));
4545 ql_rdrom(qla_host_t *ha,
4546 ql_minidump_entry_rdrom_t *romEntry,
4547 uint32_t *data_buff)
4554 addr = romEntry->read_addr;
4555 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4556 loop_cnt /= sizeof(value);
4558 for (i = 0; i < loop_cnt; i++) {
4560 ret = ql_rd_flash32(ha, addr, &value);
4564 *data_buff++ = value;
4565 addr += sizeof(value);
4568 return (loop_cnt * sizeof(value));
4576 ql_rdmux(qla_host_t *ha,
4577 ql_minidump_entry_mux_t *muxEntry,
4578 uint32_t *data_buff)
4582 uint32_t read_value, sel_value;
4583 uint32_t read_addr, select_addr;
4585 select_addr = muxEntry->select_addr;
4586 sel_value = muxEntry->select_value;
4587 read_addr = muxEntry->read_addr;
4589 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4591 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4595 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4599 *data_buff++ = sel_value;
4600 *data_buff++ = read_value;
4602 sel_value += muxEntry->select_value_stride;
4605 return (loop_cnt * (2 * sizeof(uint32_t)));
4609 ql_rdmux2(qla_host_t *ha,
4610 ql_minidump_entry_mux2_t *muxEntry,
4611 uint32_t *data_buff)
4616 uint32_t select_addr_1, select_addr_2;
4617 uint32_t select_value_1, select_value_2;
4618 uint32_t select_value_count, select_value_mask;
4619 uint32_t read_addr, read_value;
4621 select_addr_1 = muxEntry->select_addr_1;
4622 select_addr_2 = muxEntry->select_addr_2;
4623 select_value_1 = muxEntry->select_value_1;
4624 select_value_2 = muxEntry->select_value_2;
4625 select_value_count = muxEntry->select_value_count;
4626 select_value_mask = muxEntry->select_value_mask;
4628 read_addr = muxEntry->read_addr;
4630 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4633 uint32_t temp_sel_val;
4635 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4639 temp_sel_val = select_value_1 & select_value_mask;
4641 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4645 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4649 *data_buff++ = temp_sel_val;
4650 *data_buff++ = read_value;
4652 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4656 temp_sel_val = select_value_2 & select_value_mask;
4658 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4662 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4666 *data_buff++ = temp_sel_val;
4667 *data_buff++ = read_value;
4669 select_value_1 += muxEntry->select_value_stride;
4670 select_value_2 += muxEntry->select_value_stride;
4673 return (loop_cnt * (4 * sizeof(uint32_t)));
4677 * Handling Queue State Reads.
4681 ql_rdqueue(qla_host_t *ha,
4682 ql_minidump_entry_queue_t *queueEntry,
4683 uint32_t *data_buff)
4687 uint32_t read_value;
4688 uint32_t read_addr, read_stride, select_addr;
4689 uint32_t queue_id, read_cnt;
4691 read_cnt = queueEntry->read_addr_cnt;
4692 read_stride = queueEntry->read_addr_stride;
4693 select_addr = queueEntry->select_addr;
4695 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4698 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4702 read_addr = queueEntry->read_addr;
4704 for (k = 0; k < read_cnt; k++) {
4706 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4710 *data_buff++ = read_value;
4711 read_addr += read_stride;
4714 queue_id += queueEntry->queue_id_stride;
4717 return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4721 * Handling control entries.
4725 ql_cntrl(qla_host_t *ha,
4726 ql_minidump_template_hdr_t *template_hdr,
4727 ql_minidump_entry_cntrl_t *crbEntry)
4731 uint32_t opcode, read_value, addr, entry_addr;
4734 entry_addr = crbEntry->addr;
4736 for (count = 0; count < crbEntry->op_count; count++) {
4737 opcode = crbEntry->opcode;
4739 if (opcode & QL_DBG_OPCODE_WR) {
4741 ret = ql_rdwr_indreg32(ha, entry_addr,
4742 &crbEntry->value_1, 0);
4746 opcode &= ~QL_DBG_OPCODE_WR;
4749 if (opcode & QL_DBG_OPCODE_RW) {
4751 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4755 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4759 opcode &= ~QL_DBG_OPCODE_RW;
4762 if (opcode & QL_DBG_OPCODE_AND) {
4764 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4768 read_value &= crbEntry->value_2;
4769 opcode &= ~QL_DBG_OPCODE_AND;
4771 if (opcode & QL_DBG_OPCODE_OR) {
4772 read_value |= crbEntry->value_3;
4773 opcode &= ~QL_DBG_OPCODE_OR;
4776 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4781 if (opcode & QL_DBG_OPCODE_OR) {
4783 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4787 read_value |= crbEntry->value_3;
4789 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4793 opcode &= ~QL_DBG_OPCODE_OR;
4796 if (opcode & QL_DBG_OPCODE_POLL) {
4798 opcode &= ~QL_DBG_OPCODE_POLL;
4799 timeout = crbEntry->poll_timeout;
4802 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4806 while ((read_value & crbEntry->value_2)
4807 != crbEntry->value_1) {
4810 qla_mdelay(__func__, 1);
4815 ret = ql_rdwr_indreg32(ha, addr,
4823 * Report timeout error.
4824 * core dump capture failed
4825 * Skip remaining entries.
4826 * Write buffer out to file
4827 * Use driver specific fields in template header
4828 * to report this error.
4834 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4836 * decide which address to use.
4838 if (crbEntry->state_index_a) {
4839 addr = template_hdr->saved_state_array[
4840 crbEntry-> state_index_a];
4845 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4849 template_hdr->saved_state_array[crbEntry->state_index_v]
4851 opcode &= ~QL_DBG_OPCODE_RDSTATE;
4854 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4856 * decide which value to use.
4858 if (crbEntry->state_index_v) {
4859 read_value = template_hdr->saved_state_array[
4860 crbEntry->state_index_v];
4862 read_value = crbEntry->value_1;
4865 * decide which address to use.
4867 if (crbEntry->state_index_a) {
4868 addr = template_hdr->saved_state_array[
4869 crbEntry-> state_index_a];
4874 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4878 opcode &= ~QL_DBG_OPCODE_WRSTATE;
4881 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4882 /* Read value from saved state using index */
4883 read_value = template_hdr->saved_state_array[
4884 crbEntry->state_index_v];
4886 read_value <<= crbEntry->shl; /*Shift left operation */
4887 read_value >>= crbEntry->shr; /*Shift right operation */
4889 if (crbEntry->value_2) {
4890 /* check if AND mask is provided */
4891 read_value &= crbEntry->value_2;
4894 read_value |= crbEntry->value_3; /* OR operation */
4895 read_value += crbEntry->value_1; /* increment op */
4897 /* Write value back to state area. */
4899 template_hdr->saved_state_array[crbEntry->state_index_v]
4901 opcode &= ~QL_DBG_OPCODE_MDSTATE;
4904 entry_addr += crbEntry->addr_stride;
4911 * Handling rd poll entry.
4915 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4916 uint32_t *data_buff)
4920 uint32_t op_count, select_addr, select_value_stride, select_value;
4921 uint32_t read_addr, poll, mask, data_size, data;
4922 uint32_t wait_count = 0;
4924 select_addr = entry->select_addr;
4925 read_addr = entry->read_addr;
4926 select_value = entry->select_value;
4927 select_value_stride = entry->select_value_stride;
4928 op_count = entry->op_count;
4931 data_size = entry->data_size;
4933 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4935 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4941 while (wait_count < poll) {
4945 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4949 if ( (temp & mask) != 0 ) {
4955 if (wait_count == poll) {
4956 device_printf(ha->pci_dev,
4957 "%s: Error in processing entry\n", __func__);
4958 device_printf(ha->pci_dev,
4959 "%s: wait_count <0x%x> poll <0x%x>\n",
4960 __func__, wait_count, poll);
4964 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4968 *data_buff++ = select_value;
4969 *data_buff++ = data;
4970 select_value = select_value + select_value_stride;
4974 * for testing purpose we return amount of data written
4976 return (loop_cnt * (2 * sizeof(uint32_t)));
4981 * Handling rd modify write poll entry.
4985 ql_pollrd_modify_write(qla_host_t *ha,
4986 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4987 uint32_t *data_buff)
4990 uint32_t addr_1, addr_2, value_1, value_2, data;
4991 uint32_t poll, mask, data_size, modify_mask;
4992 uint32_t wait_count = 0;
4994 addr_1 = entry->addr_1;
4995 addr_2 = entry->addr_2;
4996 value_1 = entry->value_1;
4997 value_2 = entry->value_2;
5001 modify_mask = entry->modify_mask;
5002 data_size = entry->data_size;
5005 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5010 while (wait_count < poll) {
5014 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5018 if ( (temp & mask) != 0 ) {
5024 if (wait_count == poll) {
5025 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5029 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5033 data = (data & modify_mask);
5035 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5039 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5045 while (wait_count < poll) {
5049 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5053 if ( (temp & mask) != 0 ) {
5058 *data_buff++ = addr_2;
5059 *data_buff++ = data;
5063 * for testing purpose we return amount of data written
5065 return (2 * sizeof(uint32_t));