2 * Copyright (c) 2013-2014 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
49 static void qla_del_rcv_cntxt(qla_host_t *ha);
50 static int qla_init_rcv_cntxt(qla_host_t *ha);
51 static void qla_del_xmt_cntxt(qla_host_t *ha);
52 static int qla_init_xmt_cntxt(qla_host_t *ha);
53 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs,
58 static int qla_get_nic_partition(qla_host_t *ha);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx);
71 static int qla_minidump_init(qla_host_t *ha);
72 static void qla_minidump_free(qla_host_t *ha);
76 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
82 err = sysctl_handle_int(oidp, &ret, 0, req);
84 if (err || !req->newptr)
89 ha = (qla_host_t *)arg1;
91 for (i = 0; i < ha->hw.num_sds_rings; i++)
92 device_printf(ha->pci_dev,
93 "%s: sds_ring[%d] = %p\n", __func__,i,
94 (void *)ha->hw.sds[i].intr_count);
96 for (i = 0; i < ha->hw.num_tx_rings; i++)
97 device_printf(ha->pci_dev,
98 "%s: tx[%d] = %p\n", __func__,i,
99 (void *)ha->tx_ring[i].count);
101 for (i = 0; i < ha->hw.num_rds_rings; i++)
102 device_printf(ha->pci_dev,
103 "%s: rds_ring[%d] = %p\n", __func__,i,
104 (void *)ha->hw.rds[i].count);
106 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
107 (void *)ha->lro_pkt_count);
109 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
110 (void *)ha->lro_bytes);
118 qla_stop_pegs(qla_host_t *ha)
122 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
123 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
124 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
125 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
126 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
127 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
131 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
136 err = sysctl_handle_int(oidp, &ret, 0, req);
139 if (err || !req->newptr)
143 ha = (qla_host_t *)arg1;
144 (void)QLA_LOCK(ha, __func__, 0);
146 QLA_UNLOCK(ha, __func__);
151 #endif /* #ifdef QL_DBG */
154 * Name: ql_hw_add_sysctls
155 * Function: Add P3Plus specific sysctls
158 ql_hw_add_sysctls(qla_host_t *ha)
164 ha->hw.num_sds_rings = MAX_SDS_RINGS;
165 ha->hw.num_rds_rings = MAX_RDS_RINGS;
166 ha->hw.num_tx_rings = NUM_TX_RINGS;
168 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
171 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
173 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
174 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
176 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
178 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
181 ha->hw.num_tx_rings, "Number of Transmit Rings");
183 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
186 ha->txr_idx, "Tx Ring Used");
188 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
189 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
192 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
194 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
195 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
197 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
199 ha->hw.sds_cidx_thres = 32;
200 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
203 ha->hw.sds_cidx_thres,
204 "Number of SDS entries to process before updating"
205 " SDS Ring Consumer Index");
207 ha->hw.rds_pidx_thres = 32;
208 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
209 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
211 ha->hw.rds_pidx_thres,
212 "Number of Rcv Rings Entries to post before updating"
213 " RDS Ring Producer Index");
215 ha->hw.min_lro_pkt_size = 512;
216 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
217 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
218 OID_AUTO, "min_lro_pkt_size", CTLFLAG_RD, &ha->hw.min_lro_pkt_size,
219 ha->hw.min_lro_pkt_size, "minimum packet size to trigger lro");
221 ha->hw.mdump_active = 0;
222 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
223 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
224 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
226 "Minidump Utility is Active \n"
227 "\t 0 = Minidump Utility is not active\n"
228 "\t 1 = Minidump Utility is retrieved on this port\n"
229 "\t 2 = Minidump Utility is retrieved on the other port\n");
231 ha->hw.mdump_start = 0;
232 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
233 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234 OID_AUTO, "minidump_start", CTLFLAG_RW,
235 &ha->hw.mdump_start, ha->hw.mdump_start,
236 "Minidump Utility can start minidump process");
239 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
240 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
241 OID_AUTO, "err_inject",
242 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
243 "Error to be injected\n"
244 "\t\t\t 0: No Errors\n"
245 "\t\t\t 1: rcv: rxb struct invalid\n"
246 "\t\t\t 2: rcv: mp == NULL\n"
247 "\t\t\t 3: lro: rxb struct invalid\n"
248 "\t\t\t 4: lro: mp == NULL\n"
249 "\t\t\t 5: rcv: num handles invalid\n"
250 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
251 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
252 "\t\t\t 8: mbx: mailbox command failure\n"
253 "\t\t\t 9: heartbeat failure\n"
254 "\t\t\t A: temperature failure\n" );
256 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
257 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
258 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
260 qla_sysctl_stop_pegs, "I", "Peg Stop");
262 #endif /* #ifdef QL_DBG */
267 ql_hw_link_status(qla_host_t *ha)
269 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
271 if (ha->hw.link_up) {
272 device_printf(ha->pci_dev, "link Up\n");
274 device_printf(ha->pci_dev, "link Down\n");
277 if (ha->hw.flags.fduplex) {
278 device_printf(ha->pci_dev, "Full Duplex\n");
280 device_printf(ha->pci_dev, "Half Duplex\n");
283 if (ha->hw.flags.autoneg) {
284 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
286 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
289 switch (ha->hw.link_speed) {
291 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
295 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
299 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
303 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
307 switch (ha->hw.module_type) {
310 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
314 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
318 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
322 device_printf(ha->pci_dev,
323 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
324 ha->hw.cable_length);
328 device_printf(ha->pci_dev, "Module Type 10GE Active"
329 " Limiting Copper(Compliant)[%d m]\n",
330 ha->hw.cable_length);
334 device_printf(ha->pci_dev,
335 "Module Type 10GE Passive Copper"
336 " (Legacy, Best Effort)[%d m]\n",
337 ha->hw.cable_length);
341 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
345 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
349 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
353 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
357 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
358 "(Legacy, Best Effort)\n");
362 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
367 if (ha->hw.link_faults == 1)
368 device_printf(ha->pci_dev, "SFP Power Fault\n");
373 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
376 ql_free_dma(qla_host_t *ha)
380 if (ha->hw.dma_buf.flags.sds_ring) {
381 for (i = 0; i < ha->hw.num_sds_rings; i++) {
382 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
384 ha->hw.dma_buf.flags.sds_ring = 0;
387 if (ha->hw.dma_buf.flags.rds_ring) {
388 for (i = 0; i < ha->hw.num_rds_rings; i++) {
389 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
391 ha->hw.dma_buf.flags.rds_ring = 0;
394 if (ha->hw.dma_buf.flags.tx_ring) {
395 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
396 ha->hw.dma_buf.flags.tx_ring = 0;
398 qla_minidump_free(ha);
403 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
406 ql_alloc_dma(qla_host_t *ha)
409 uint32_t i, j, size, tx_ring_size;
411 qla_hw_tx_cntxt_t *tx_cntxt;
417 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
421 * Allocate Transmit Ring
423 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
424 size = (tx_ring_size * ha->hw.num_tx_rings);
426 hw->dma_buf.tx_ring.alignment = 8;
427 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
429 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
430 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
431 goto ql_alloc_dma_exit;
434 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
435 paddr = hw->dma_buf.tx_ring.dma_addr;
437 for (i = 0; i < ha->hw.num_tx_rings; i++) {
438 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
440 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
441 tx_cntxt->tx_ring_paddr = paddr;
443 vaddr += tx_ring_size;
444 paddr += tx_ring_size;
447 for (i = 0; i < ha->hw.num_tx_rings; i++) {
448 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
450 tx_cntxt->tx_cons = (uint32_t *)vaddr;
451 tx_cntxt->tx_cons_paddr = paddr;
453 vaddr += sizeof (uint32_t);
454 paddr += sizeof (uint32_t);
457 ha->hw.dma_buf.flags.tx_ring = 1;
459 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
460 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
461 hw->dma_buf.tx_ring.dma_b));
463 * Allocate Receive Descriptor Rings
466 for (i = 0; i < hw->num_rds_rings; i++) {
468 hw->dma_buf.rds_ring[i].alignment = 8;
469 hw->dma_buf.rds_ring[i].size =
470 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
472 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
473 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
476 for (j = 0; j < i; j++)
477 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
479 goto ql_alloc_dma_exit;
481 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
482 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
483 hw->dma_buf.rds_ring[i].dma_b));
486 hw->dma_buf.flags.rds_ring = 1;
489 * Allocate Status Descriptor Rings
492 for (i = 0; i < hw->num_sds_rings; i++) {
493 hw->dma_buf.sds_ring[i].alignment = 8;
494 hw->dma_buf.sds_ring[i].size =
495 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
497 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
498 device_printf(dev, "%s: sds ring alloc failed\n",
501 for (j = 0; j < i; j++)
502 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
504 goto ql_alloc_dma_exit;
506 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
508 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
509 hw->dma_buf.sds_ring[i].dma_b));
511 for (i = 0; i < hw->num_sds_rings; i++) {
512 hw->sds[i].sds_ring_base =
513 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
516 hw->dma_buf.flags.sds_ring = 1;
525 #define Q8_MBX_MSEC_DELAY 5000
528 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
529 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
535 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
537 ha->qla_initiate_recovery = 1;
538 goto exit_qla_mbx_cmd;
544 i = Q8_MBX_MSEC_DELAY;
547 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
553 qla_mdelay(__func__, 1);
559 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
562 ha->qla_initiate_recovery = 1;
563 goto exit_qla_mbx_cmd;
566 for (i = 0; i < n_hmbox; i++) {
567 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
571 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
574 i = Q8_MBX_MSEC_DELAY;
576 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
578 if ((data & 0x3) == 1) {
579 data = READ_REG32(ha, Q8_FW_MBOX0);
580 if ((data & 0xF000) != 0x8000)
586 qla_mdelay(__func__, 1);
591 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
594 ha->qla_initiate_recovery = 1;
595 goto exit_qla_mbx_cmd;
598 for (i = 0; i < n_fwmbox; i++) {
599 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
602 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
603 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
610 qla_get_nic_partition(qla_host_t *ha)
613 device_t dev = ha->pci_dev;
615 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
619 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
621 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
622 device_printf(dev, "%s: failed0\n", __func__);
627 if ((err != 1) && (err != 0)) {
628 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
635 qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs, uint32_t create)
638 device_t dev = ha->pci_dev;
639 q80_config_intr_t *c_intr;
640 q80_config_intr_rsp_t *c_intr_rsp;
642 c_intr = (q80_config_intr_t *)ha->hw.mbox;
643 bzero(c_intr, (sizeof (q80_config_intr_t)));
645 c_intr->opcode = Q8_MBX_CONFIG_INTR;
647 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
648 c_intr->count_version |= Q8_MBX_CMD_VERSION;
650 c_intr->nentries = num_intrs;
652 for (i = 0; i < num_intrs; i++) {
654 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
655 c_intr->intr[i].msix_index = i + 1;
657 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
658 c_intr->intr[i].msix_index = ha->hw.intr_id[i];
661 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
664 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
665 (sizeof (q80_config_intr_t) >> 2),
666 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
667 device_printf(dev, "%s: failed0\n", __func__);
671 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
673 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
676 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
677 c_intr_rsp->nentries);
679 for (i = 0; i < c_intr_rsp->nentries; i++) {
680 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
682 c_intr_rsp->intr[i].status,
683 c_intr_rsp->intr[i].intr_id,
684 c_intr_rsp->intr[i].intr_src);
690 for (i = 0; ((i < num_intrs) && create); i++) {
691 if (!c_intr_rsp->intr[i].status) {
692 ha->hw.intr_id[i] = c_intr_rsp->intr[i].intr_id;
693 ha->hw.intr_src[i] = c_intr_rsp->intr[i].intr_src;
701 * Name: qla_config_rss
702 * Function: Configure RSS for the context/interface.
704 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
705 0x8030f20c77cb2da3ULL,
706 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
707 0x255b0ec26d5a56daULL };
710 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
712 q80_config_rss_t *c_rss;
713 q80_config_rss_rsp_t *c_rss_rsp;
715 device_t dev = ha->pci_dev;
717 c_rss = (q80_config_rss_t *)ha->hw.mbox;
718 bzero(c_rss, (sizeof (q80_config_rss_t)));
720 c_rss->opcode = Q8_MBX_CONFIG_RSS;
722 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
723 c_rss->count_version |= Q8_MBX_CMD_VERSION;
725 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
726 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
728 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
729 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
731 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
733 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
734 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
736 c_rss->cntxt_id = cntxt_id;
738 for (i = 0; i < 5; i++) {
739 c_rss->rss_key[i] = rss_key[i];
742 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
743 (sizeof (q80_config_rss_t) >> 2),
744 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
745 device_printf(dev, "%s: failed0\n", __func__);
748 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
750 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
753 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
759 static uint8_t rss_ind_default_table[Q8_RSS_IND_TBL_SIZE];
762 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
763 uint16_t cntxt_id, uint8_t *ind_table)
765 q80_config_rss_ind_table_t *c_rss_ind;
766 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
768 device_t dev = ha->pci_dev;
770 if ((count > Q8_RSS_IND_TBL_SIZE) ||
771 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
772 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
777 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
778 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
780 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
781 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
782 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
784 c_rss_ind->start_idx = start_idx;
785 c_rss_ind->end_idx = start_idx + count - 1;
786 c_rss_ind->cntxt_id = cntxt_id;
787 bcopy(ind_table, c_rss_ind->ind_table, count);
789 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
790 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
791 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
792 device_printf(dev, "%s: failed0\n", __func__);
796 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
797 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
800 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
807 * Name: qla_config_intr_coalesce
808 * Function: Configure Interrupt Coalescing.
811 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
813 q80_config_intr_coalesc_t *intrc;
814 q80_config_intr_coalesc_rsp_t *intrc_rsp;
816 device_t dev = ha->pci_dev;
818 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
819 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
821 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
822 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
823 intrc->count_version |= Q8_MBX_CMD_VERSION;
825 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
826 intrc->cntxt_id = cntxt_id;
828 intrc->max_pkts = 256;
829 intrc->max_mswait = 3;
832 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
833 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
835 for (i = 0; i < ha->hw.num_sds_rings; i++) {
836 intrc->sds_ring_mask |= (1 << i);
838 intrc->ms_timeout = 1000;
841 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
842 (sizeof (q80_config_intr_coalesc_t) >> 2),
843 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
844 device_printf(dev, "%s: failed0\n", __func__);
847 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
849 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
852 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
861 * Name: qla_config_mac_addr
862 * Function: binds a MAC address to the context/interface.
863 * Can be unicast, multicast or broadcast.
866 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
868 q80_config_mac_addr_t *cmac;
869 q80_config_mac_addr_rsp_t *cmac_rsp;
871 device_t dev = ha->pci_dev;
873 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
874 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
876 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
877 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
878 cmac->count_version |= Q8_MBX_CMD_VERSION;
881 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
883 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
885 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
887 cmac->nmac_entries = 1;
888 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
889 bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
891 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
892 (sizeof (q80_config_mac_addr_t) >> 2),
893 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
894 device_printf(dev, "%s: %s failed0\n", __func__,
895 (add_mac ? "Add" : "Del"));
898 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
900 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
903 device_printf(dev, "%s: %s "
904 "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
905 __func__, (add_mac ? "Add" : "Del"),
906 mac_addr[0], mac_addr[1], mac_addr[2],
907 mac_addr[3], mac_addr[4], mac_addr[5], err);
916 * Name: qla_set_mac_rcv_mode
917 * Function: Enable/Disable AllMulticast and Promiscous Modes.
920 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
922 q80_config_mac_rcv_mode_t *rcv_mode;
924 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
925 device_t dev = ha->pci_dev;
927 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
928 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
930 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
931 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
932 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
934 rcv_mode->mode = mode;
936 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
938 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
939 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
940 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
941 device_printf(dev, "%s: failed0\n", __func__);
944 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
946 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
949 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
957 ql_set_promisc(qla_host_t *ha)
961 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
962 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
967 ql_set_allmulti(qla_host_t *ha)
971 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
972 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
978 * Name: ql_set_max_mtu
980 * Sets the maximum transfer unit size for the specified rcv context.
983 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
986 q80_set_max_mtu_t *max_mtu;
987 q80_set_max_mtu_rsp_t *max_mtu_rsp;
992 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
993 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
995 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
996 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
997 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
999 max_mtu->cntxt_id = cntxt_id;
1002 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1003 (sizeof (q80_set_max_mtu_t) >> 2),
1004 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1005 device_printf(dev, "%s: failed\n", __func__);
1009 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1011 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1014 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1021 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1024 q80_link_event_t *lnk;
1025 q80_link_event_rsp_t *lnk_rsp;
1030 lnk = (q80_link_event_t *)ha->hw.mbox;
1031 bzero(lnk, (sizeof (q80_link_event_t)));
1033 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1034 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1035 lnk->count_version |= Q8_MBX_CMD_VERSION;
1037 lnk->cntxt_id = cntxt_id;
1038 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1040 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1041 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1042 device_printf(dev, "%s: failed\n", __func__);
1046 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1048 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1051 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1058 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1061 q80_config_fw_lro_t *fw_lro;
1062 q80_config_fw_lro_rsp_t *fw_lro_rsp;
1067 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1068 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1070 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1071 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1072 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1074 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1076 fw_lro->cntxt_id = cntxt_id;
1078 if (ha->hw.min_lro_pkt_size) {
1079 fw_lro->flags |= Q8_MBX_FW_LRO_LOW_THRESHOLD;
1080 fw_lro->low_threshold = ha->hw.min_lro_pkt_size;
1083 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1084 (sizeof (q80_config_fw_lro_t) >> 2),
1085 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1086 device_printf(dev, "%s: failed\n", __func__);
1090 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1092 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1095 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1102 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat)
1104 device_t dev = ha->pci_dev;
1106 device_printf(dev, "%s: total_bytes\t\t%" PRIu64 "\n", __func__,
1107 xstat->total_bytes);
1108 device_printf(dev, "%s: total_pkts\t\t%" PRIu64 "\n", __func__,
1110 device_printf(dev, "%s: errors\t\t%" PRIu64 "\n", __func__,
1112 device_printf(dev, "%s: pkts_dropped\t%" PRIu64 "\n", __func__,
1113 xstat->pkts_dropped);
1114 device_printf(dev, "%s: switch_pkts\t\t%" PRIu64 "\n", __func__,
1115 xstat->switch_pkts);
1116 device_printf(dev, "%s: num_buffers\t\t%" PRIu64 "\n", __func__,
1117 xstat->num_buffers);
1121 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1123 device_t dev = ha->pci_dev;
1125 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1126 rstat->total_bytes);
1127 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1129 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1130 rstat->lro_pkt_count);
1131 device_printf(dev, "%s: sw_pkt_count\t\t%" PRIu64 "\n", __func__,
1132 rstat->sw_pkt_count);
1133 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1134 rstat->ip_chksum_err);
1135 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1136 rstat->pkts_wo_acntxts);
1137 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1138 __func__, rstat->pkts_dropped_no_sds_card);
1139 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1140 __func__, rstat->pkts_dropped_no_sds_host);
1141 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1142 rstat->oversized_pkts);
1143 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1144 __func__, rstat->pkts_dropped_no_rds);
1145 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1146 __func__, rstat->unxpctd_mcast_pkts);
1147 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1148 rstat->re1_fbq_error);
1149 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1150 rstat->invalid_mac_addr);
1151 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1152 rstat->rds_prime_trys);
1153 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1154 rstat->rds_prime_success);
1155 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1156 rstat->lro_flows_added);
1157 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1158 rstat->lro_flows_deleted);
1159 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1160 rstat->lro_flows_active);
1161 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1162 __func__, rstat->pkts_droped_unknown);
1166 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1168 device_t dev = ha->pci_dev;
1170 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1172 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1174 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1175 mstat->xmt_mcast_pkts);
1176 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1177 mstat->xmt_bcast_pkts);
1178 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1179 mstat->xmt_pause_frames);
1180 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1181 mstat->xmt_cntrl_pkts);
1182 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1183 __func__, mstat->xmt_pkt_lt_64bytes);
1184 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1185 __func__, mstat->xmt_pkt_lt_127bytes);
1186 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1187 __func__, mstat->xmt_pkt_lt_255bytes);
1188 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1189 __func__, mstat->xmt_pkt_lt_511bytes);
1190 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t%" PRIu64 "\n",
1191 __func__, mstat->xmt_pkt_lt_1023bytes);
1192 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t%" PRIu64 "\n",
1193 __func__, mstat->xmt_pkt_lt_1518bytes);
1194 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t%" PRIu64 "\n",
1195 __func__, mstat->xmt_pkt_gt_1518bytes);
1197 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1199 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1201 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1202 mstat->rcv_mcast_pkts);
1203 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1204 mstat->rcv_bcast_pkts);
1205 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1206 mstat->rcv_pause_frames);
1207 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1208 mstat->rcv_cntrl_pkts);
1209 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1210 __func__, mstat->rcv_pkt_lt_64bytes);
1211 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1212 __func__, mstat->rcv_pkt_lt_127bytes);
1213 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1214 __func__, mstat->rcv_pkt_lt_255bytes);
1215 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1216 __func__, mstat->rcv_pkt_lt_511bytes);
1217 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t%" PRIu64 "\n",
1218 __func__, mstat->rcv_pkt_lt_1023bytes);
1219 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t%" PRIu64 "\n",
1220 __func__, mstat->rcv_pkt_lt_1518bytes);
1221 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t%" PRIu64 "\n",
1222 __func__, mstat->rcv_pkt_gt_1518bytes);
1224 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1225 mstat->rcv_len_error);
1226 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1227 mstat->rcv_len_small);
1228 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1229 mstat->rcv_len_large);
1230 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1232 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1233 mstat->rcv_dropped);
1234 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1236 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1237 mstat->align_error);
1242 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd)
1245 q80_get_stats_t *stat;
1246 q80_get_stats_rsp_t *stat_rsp;
1251 stat = (q80_get_stats_t *)ha->hw.mbox;
1252 bzero(stat, (sizeof (q80_get_stats_t)));
1254 stat->opcode = Q8_MBX_GET_STATS;
1255 stat->count_version = 2;
1256 stat->count_version |= Q8_MBX_CMD_VERSION;
1260 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1261 ha->hw.mbox, (sizeof (q80_get_stats_rsp_t) >> 2), 0)) {
1262 device_printf(dev, "%s: failed\n", __func__);
1266 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1268 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1278 ql_get_stats(qla_host_t *ha)
1280 q80_get_stats_rsp_t *stat_rsp;
1281 q80_mac_stats_t *mstat;
1282 q80_xmt_stats_t *xstat;
1283 q80_rcv_stats_t *rstat;
1286 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1288 * Get MAC Statistics
1290 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1292 cmd |= ((ha->pci_func & 0x1) << 16);
1294 if (qla_get_hw_stats(ha, cmd) == 0) {
1295 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1296 qla_mac_stats(ha, mstat);
1298 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1299 __func__, ha->hw.mbox[0]);
1302 * Get RCV Statistics
1304 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1305 cmd |= (ha->hw.rcv_cntxt_id << 16);
1307 if (qla_get_hw_stats(ha, cmd) == 0) {
1308 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1309 qla_rcv_stats(ha, rstat);
1311 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1312 __func__, ha->hw.mbox[0]);
1315 * Get XMT Statistics
1317 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1318 cmd |= (ha->hw.tx_cntxt[ha->txr_idx].tx_cntxt_id << 16);
1321 if (qla_get_hw_stats(ha, cmd) == 0) {
1322 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1323 qla_xmt_stats(ha, xstat);
1325 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1326 __func__, ha->hw.mbox[0]);
1332 * Function: Checks if the packet to be transmitted is a candidate for
1333 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1334 * Ring Structure are plugged in.
1337 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1339 struct ether_vlan_header *eh;
1340 struct ip *ip = NULL;
1341 struct ip6_hdr *ip6 = NULL;
1342 struct tcphdr *th = NULL;
1343 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1344 uint16_t etype, opcode, offload = 1;
1350 eh = mtod(mp, struct ether_vlan_header *);
1352 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1353 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1354 etype = ntohs(eh->evl_proto);
1356 ehdrlen = ETHER_HDR_LEN;
1357 etype = ntohs(eh->evl_encap_proto);
1365 tcp_opt_off = ehdrlen + sizeof(struct ip) +
1366 sizeof(struct tcphdr);
1368 if (mp->m_len < tcp_opt_off) {
1369 m_copydata(mp, 0, tcp_opt_off, hdr);
1370 ip = (struct ip *)(hdr + ehdrlen);
1372 ip = (struct ip *)(mp->m_data + ehdrlen);
1375 ip_hlen = ip->ip_hl << 2;
1376 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1379 if ((ip->ip_p != IPPROTO_TCP) ||
1380 (ip_hlen != sizeof (struct ip))){
1381 /* IP Options are not supported */
1385 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1389 case ETHERTYPE_IPV6:
1391 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1392 sizeof (struct tcphdr);
1394 if (mp->m_len < tcp_opt_off) {
1395 m_copydata(mp, 0, tcp_opt_off, hdr);
1396 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1398 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1401 ip_hlen = sizeof(struct ip6_hdr);
1402 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1404 if (ip6->ip6_nxt != IPPROTO_TCP) {
1405 //device_printf(dev, "%s: ipv6\n", __func__);
1408 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1412 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1420 tcp_hlen = th->th_off << 2;
1421 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1423 if (mp->m_len < hdrlen) {
1424 if (mp->m_len < tcp_opt_off) {
1425 if (tcp_hlen > sizeof(struct tcphdr)) {
1426 m_copydata(mp, tcp_opt_off,
1427 (tcp_hlen - sizeof(struct tcphdr)),
1431 m_copydata(mp, 0, hdrlen, hdr);
1435 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1437 tx_cmd->flags_opcode = opcode ;
1438 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1439 tx_cmd->total_hdr_len = hdrlen;
1441 /* Check for Multicast least significant bit of MSB == 1 */
1442 if (eh->evl_dhost[0] & 0x01) {
1443 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1446 if (mp->m_len < hdrlen) {
1447 printf("%d\n", hdrlen);
1455 * Name: qla_tx_chksum
1456 * Function: Checks if the packet to be transmitted is a candidate for
1457 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1458 * Ring Structure are plugged in.
1461 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1462 uint32_t *tcp_hdr_off)
1464 struct ether_vlan_header *eh;
1466 struct ip6_hdr *ip6;
1467 uint32_t ehdrlen, ip_hlen;
1468 uint16_t etype, opcode, offload = 1;
1470 uint8_t buf[sizeof(struct ip6_hdr)];
1476 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1479 eh = mtod(mp, struct ether_vlan_header *);
1481 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1482 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1483 etype = ntohs(eh->evl_proto);
1485 ehdrlen = ETHER_HDR_LEN;
1486 etype = ntohs(eh->evl_encap_proto);
1492 ip = (struct ip *)(mp->m_data + ehdrlen);
1494 ip_hlen = sizeof (struct ip);
1496 if (mp->m_len < (ehdrlen + ip_hlen)) {
1497 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1498 ip = (struct ip *)buf;
1501 if (ip->ip_p == IPPROTO_TCP)
1502 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1503 else if (ip->ip_p == IPPROTO_UDP)
1504 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1506 //device_printf(dev, "%s: ipv4\n", __func__);
1511 case ETHERTYPE_IPV6:
1512 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1514 ip_hlen = sizeof(struct ip6_hdr);
1516 if (mp->m_len < (ehdrlen + ip_hlen)) {
1517 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1519 ip6 = (struct ip6_hdr *)buf;
1522 if (ip6->ip6_nxt == IPPROTO_TCP)
1523 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1524 else if (ip6->ip6_nxt == IPPROTO_UDP)
1525 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1527 //device_printf(dev, "%s: ipv6\n", __func__);
1540 *tcp_hdr_off = (ip_hlen + ehdrlen);
1545 #define QLA_TX_MIN_FREE 2
1548 * Function: Transmits a packet. It first checks if the packet is a
1549 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1550 * offload. If either of these creteria are not met, it is transmitted
1551 * as a regular ethernet frame.
1554 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1555 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx)
1557 struct ether_vlan_header *eh;
1558 qla_hw_t *hw = &ha->hw;
1559 q80_tx_cmd_t *tx_cmd, tso_cmd;
1560 bus_dma_segment_t *c_seg;
1561 uint32_t num_tx_cmds, hdr_len = 0;
1562 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1565 uint8_t *src = NULL, *dst = NULL;
1566 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1567 uint32_t op_code = 0;
1568 uint32_t tcp_hdr_off = 0;
1573 * Always make sure there is atleast one empty slot in the tx_ring
1574 * tx_ring is considered full when there only one entry available
1576 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1578 total_length = mp->m_pkthdr.len;
1579 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1580 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1581 __func__, total_length);
1584 eh = mtod(mp, struct ether_vlan_header *);
1586 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1588 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1591 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1594 /* find the additional tx_cmd descriptors required */
1596 if (mp->m_flags & M_VLANTAG)
1597 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1599 hdr_len = tso_cmd.total_hdr_len;
1601 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1602 bytes = QL_MIN(bytes, hdr_len);
1608 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1612 hdr_len = tso_cmd.total_hdr_len;
1615 src = (uint8_t *)eh;
1619 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1622 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1623 qla_hw_tx_done_locked(ha, txr_idx);
1624 if (hw->tx_cntxt[txr_idx].txr_free <=
1625 (num_tx_cmds + QLA_TX_MIN_FREE)) {
1626 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1627 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1633 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1635 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1637 if (nsegs > ha->hw.max_tx_segs)
1638 ha->hw.max_tx_segs = nsegs;
1640 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1643 tx_cmd->flags_opcode = op_code;
1644 tx_cmd->tcp_hdr_off = tcp_hdr_off;
1647 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1650 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1651 ha->tx_tso_frames++;
1654 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1655 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1656 } else if (mp->m_flags & M_VLANTAG) {
1658 if (hdr_len) { /* TSO */
1659 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1660 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1661 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1663 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1665 ha->hw_vlan_tx_frames++;
1666 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1670 tx_cmd->n_bufs = (uint8_t)nsegs;
1671 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1672 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1673 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1678 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1682 tx_cmd->buf1_addr = c_seg->ds_addr;
1683 tx_cmd->buf1_len = c_seg->ds_len;
1687 tx_cmd->buf2_addr = c_seg->ds_addr;
1688 tx_cmd->buf2_len = c_seg->ds_len;
1692 tx_cmd->buf3_addr = c_seg->ds_addr;
1693 tx_cmd->buf3_len = c_seg->ds_len;
1697 tx_cmd->buf4_addr = c_seg->ds_addr;
1698 tx_cmd->buf4_len = c_seg->ds_len;
1706 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1707 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1708 (NUM_TX_DESCRIPTORS - 1);
1714 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1715 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1718 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1720 /* TSO : Copy the header in the following tx cmd descriptors */
1722 txr_next = hw->tx_cntxt[txr_idx].txr_next;
1724 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1725 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1727 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1728 bytes = QL_MIN(bytes, hdr_len);
1730 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1732 if (mp->m_flags & M_VLANTAG) {
1733 /* first copy the src/dst MAC addresses */
1734 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1735 dst += (ETHER_ADDR_LEN * 2);
1736 src += (ETHER_ADDR_LEN * 2);
1738 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1740 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
1743 /* bytes left in src header */
1744 hdr_len -= ((ETHER_ADDR_LEN * 2) +
1745 ETHER_VLAN_ENCAP_LEN);
1747 /* bytes left in TxCmd Entry */
1748 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
1751 bcopy(src, dst, bytes);
1755 bcopy(src, dst, bytes);
1760 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1761 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1762 (NUM_TX_DESCRIPTORS - 1);
1766 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1767 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1769 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1771 bcopy(src, tx_cmd, bytes);
1775 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1776 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1777 (NUM_TX_DESCRIPTORS - 1);
1782 hw->tx_cntxt[txr_idx].txr_free =
1783 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
1785 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
1787 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
1794 qla_config_rss_ind_table(qla_host_t *ha)
1797 uint8_t rss_ind_tbl[16];
1799 bzero(rss_ind_default_table, sizeof(rss_ind_default_table));
1802 for (i = 0; i < 16; i++) {
1803 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
1806 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + 16) {
1808 if ((i + 16) > Q8_RSS_IND_TBL_MAX_IDX) {
1809 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
1814 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
1823 * Name: ql_del_hw_if
1824 * Function: Destroys the hardware specific entities corresponding to an
1825 * Ethernet Interface
1828 ql_del_hw_if(qla_host_t *ha)
1831 qla_del_rcv_cntxt(ha);
1832 qla_del_xmt_cntxt(ha);
1834 if (ha->hw.flags.init_intr_cnxt) {
1835 qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 0);
1836 ha->hw.flags.init_intr_cnxt = 0;
1841 * Name: ql_init_hw_if
1842 * Function: Creates the hardware specific entities corresponding to an
1843 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1844 * corresponding to the interface. Enables LRO if allowed.
1847 ql_init_hw_if(qla_host_t *ha)
1851 uint8_t bcast_mac[6];
1856 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1857 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1858 ha->hw.dma_buf.sds_ring[i].size);
1860 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
1862 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
1863 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
1864 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1866 qla_get_nic_partition(ha);
1868 if (qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 1) == 0) {
1869 ha->hw.flags.init_intr_cnxt = 1;
1873 if (ha->hw.mdump_init == 0) {
1874 qla_minidump_init(ha);
1878 * Create Receive Context
1880 if (qla_init_rcv_cntxt(ha)) {
1884 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1885 rdesc = &ha->hw.rds[i];
1886 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
1888 /* Update the RDS Producer Indices */
1889 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
1895 * Create Transmit Context
1897 if (qla_init_xmt_cntxt(ha)) {
1898 qla_del_rcv_cntxt(ha);
1901 ha->hw.max_tx_segs = 0;
1903 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
1906 ha->hw.flags.unicast_mac = 1;
1908 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1909 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1911 if (qla_config_mac_addr(ha, bcast_mac, 1))
1914 ha->hw.flags.bcast_mac = 1;
1917 * program any cached multicast addresses
1919 if (qla_hw_add_all_mcast(ha))
1922 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
1925 if (qla_config_rss_ind_table(ha))
1928 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0))
1931 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
1934 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
1937 for (i = 0; i < ha->hw.num_sds_rings; i++)
1938 QL_ENABLE_INTERRUPTS(ha, i);
1944 qla_map_sds_to_rds(qla_host_t *ha)
1946 device_t dev = ha->pci_dev;
1947 q80_rq_map_sds_to_rds_t *map_rings;
1948 q80_rsp_add_rcv_rings_t *map_rings_rsp;
1950 qla_hw_t *hw = &ha->hw;
1952 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
1953 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
1955 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
1956 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
1957 map_rings->count_version |= Q8_MBX_CMD_VERSION;
1959 map_rings->cntxt_id = hw->rcv_cntxt_id;
1960 map_rings->num_rings = hw->num_sds_rings;
1962 for (i = 0; i < hw->num_sds_rings; i++) {
1963 map_rings->sds_rds[i].sds_ring = i;
1964 map_rings->sds_rds[i].rds_ring = i;
1967 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
1968 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
1969 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
1970 device_printf(dev, "%s: failed0\n", __func__);
1974 map_rings_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
1976 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
1979 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1987 * Name: qla_init_rcv_cntxt
1988 * Function: Creates the Receive Context.
1991 qla_init_rcv_cntxt(qla_host_t *ha)
1993 q80_rq_rcv_cntxt_t *rcntxt;
1994 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
1995 q80_stat_desc_t *sdesc;
1997 qla_hw_t *hw = &ha->hw;
2000 uint32_t rcntxt_sds_rings;
2001 uint32_t rcntxt_rds_rings;
2006 * Create Receive Context
2009 for (i = 0; i < hw->num_sds_rings; i++) {
2010 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2012 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2013 sdesc->data[0] = 1ULL;
2014 sdesc->data[1] = 1ULL;
2018 rcntxt_sds_rings = hw->num_sds_rings;
2019 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2020 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2022 rcntxt_rds_rings = hw->num_rds_rings;
2024 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2025 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2027 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2028 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2030 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2031 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2032 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2034 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2035 Q8_RCV_CNTXT_CAP0_LRO |
2036 Q8_RCV_CNTXT_CAP0_HW_LRO |
2037 Q8_RCV_CNTXT_CAP0_RSS |
2038 Q8_RCV_CNTXT_CAP0_SGL_JUMBO |
2039 Q8_RCV_CNTXT_CAP0_SGL_LRO;
2041 if (ha->hw.num_rds_rings > 1) {
2042 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2043 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2045 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2047 rcntxt->nsds_rings = rcntxt_sds_rings;
2049 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2051 rcntxt->rcv_vpid = 0;
2053 for (i = 0; i < rcntxt_sds_rings; i++) {
2054 rcntxt->sds[i].paddr =
2055 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2056 rcntxt->sds[i].size =
2057 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2058 if (ha->msix_count == 2) {
2059 rcntxt->sds[i].intr_id =
2060 qla_host_to_le16(hw->intr_id[0]);
2061 rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2063 rcntxt->sds[i].intr_id =
2064 qla_host_to_le16(hw->intr_id[i]);
2065 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2069 for (i = 0; i < rcntxt_rds_rings; i++) {
2070 rcntxt->rds[i].paddr_std =
2071 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2072 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2073 rcntxt->rds[i].std_nentries =
2074 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2077 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2078 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2079 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2080 device_printf(dev, "%s: failed0\n", __func__);
2084 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2086 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2089 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2093 for (i = 0; i < rcntxt_sds_rings; i++) {
2094 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2097 for (i = 0; i < rcntxt_rds_rings; i++) {
2098 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2101 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2103 ha->hw.flags.init_rx_cnxt = 1;
2105 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2106 err = qla_add_rcv_rings(ha, MAX_RCNTXT_SDS_RINGS);
2111 if (hw->num_rds_rings > 1) {
2112 err = qla_map_sds_to_rds(ha);
2121 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx)
2123 device_t dev = ha->pci_dev;
2124 q80_rq_add_rcv_rings_t *add_rcv;
2125 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2128 qla_hw_t *hw = &ha->hw;
2130 nsds = hw->num_sds_rings - MAX_RCNTXT_SDS_RINGS;
2132 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2133 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2135 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2136 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2137 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2139 if (hw->num_rds_rings > 1)
2140 add_rcv->nrds_sets_rings = nsds | (1 << 5);
2142 add_rcv->nrds_sets_rings = 0;
2144 add_rcv->nsds_rings = nsds;
2145 add_rcv->cntxt_id = hw->rcv_cntxt_id;
2147 for (i = 0; i < nsds; i++) {
2151 add_rcv->sds[i].paddr =
2152 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2154 add_rcv->sds[i].size =
2155 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2157 if (ha->msix_count == 2) {
2158 add_rcv->sds[i].intr_id =
2159 qla_host_to_le16(hw->intr_id[0]);
2160 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2162 add_rcv->sds[i].intr_id =
2163 qla_host_to_le16(hw->intr_id[j]);
2164 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2168 for (i = 0; ((i < nsds) && (hw->num_rds_rings > 1)); i++) {
2170 add_rcv->rds[i].paddr_std =
2171 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2172 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2173 add_rcv->rds[i].std_nentries =
2174 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2178 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2179 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2180 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2181 device_printf(dev, "%s: failed0\n", __func__);
2185 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2187 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2190 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2194 for (i = sds_idx; i < hw->num_sds_rings; i++) {
2195 hw->sds[i].sds_consumer = add_rcv_rsp->sds_cons[(i - sds_idx)];
2197 for (i = sds_idx; i < hw->num_rds_rings; i++) {
2198 hw->rds[i].prod_std = add_rcv_rsp->rds[(i - sds_idx)].prod_std;
2204 * Name: qla_del_rcv_cntxt
2205 * Function: Destroys the Receive Context.
2208 qla_del_rcv_cntxt(qla_host_t *ha)
2210 device_t dev = ha->pci_dev;
2211 q80_rcv_cntxt_destroy_t *rcntxt;
2212 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
2214 uint8_t bcast_mac[6];
2216 if (!ha->hw.flags.init_rx_cnxt)
2219 if (qla_hw_del_all_mcast(ha))
2222 if (ha->hw.flags.bcast_mac) {
2224 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2227 if (qla_config_mac_addr(ha, bcast_mac, 0))
2229 ha->hw.flags.bcast_mac = 0;
2233 if (ha->hw.flags.unicast_mac) {
2234 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2236 ha->hw.flags.unicast_mac = 0;
2239 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2240 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2242 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2243 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2244 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2246 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2248 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2249 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2250 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2251 device_printf(dev, "%s: failed0\n", __func__);
2254 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2256 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2259 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2262 ha->hw.flags.init_rx_cnxt = 0;
2267 * Name: qla_init_xmt_cntxt
2268 * Function: Creates the Transmit Context.
2271 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2274 qla_hw_t *hw = &ha->hw;
2275 q80_rq_tx_cntxt_t *tcntxt;
2276 q80_rsp_tx_cntxt_t *tcntxt_rsp;
2278 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2280 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2285 * Create Transmit Context
2287 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2288 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2290 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2291 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2292 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2294 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2296 tcntxt->ntx_rings = 1;
2298 tcntxt->tx_ring[0].paddr =
2299 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2300 tcntxt->tx_ring[0].tx_consumer =
2301 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2302 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2304 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2305 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2308 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2309 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2311 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2312 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2314 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2315 device_printf(dev, "%s: failed0\n", __func__);
2318 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2320 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2323 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2327 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2328 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2335 * Name: qla_del_xmt_cntxt
2336 * Function: Destroys the Transmit Context.
2339 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2341 device_t dev = ha->pci_dev;
2342 q80_tx_cntxt_destroy_t *tcntxt;
2343 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
2346 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2347 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2349 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2350 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2351 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2353 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2355 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2356 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2357 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2358 device_printf(dev, "%s: failed0\n", __func__);
2361 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2363 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2366 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2373 qla_del_xmt_cntxt(qla_host_t *ha)
2377 if (!ha->hw.flags.init_tx_cnxt)
2380 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2381 if (qla_del_xmt_cntxt_i(ha, i))
2384 ha->hw.flags.init_tx_cnxt = 0;
2388 qla_init_xmt_cntxt(qla_host_t *ha)
2392 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2393 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2394 for (j = 0; j < i; j++)
2395 qla_del_xmt_cntxt_i(ha, j);
2399 ha->hw.flags.init_tx_cnxt = 1;
2404 qla_hw_add_all_mcast(qla_host_t *ha)
2408 nmcast = ha->hw.nmcast;
2410 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2411 if ((ha->hw.mcast[i].addr[0] != 0) ||
2412 (ha->hw.mcast[i].addr[1] != 0) ||
2413 (ha->hw.mcast[i].addr[2] != 0) ||
2414 (ha->hw.mcast[i].addr[3] != 0) ||
2415 (ha->hw.mcast[i].addr[4] != 0) ||
2416 (ha->hw.mcast[i].addr[5] != 0)) {
2418 if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2419 device_printf(ha->pci_dev, "%s: failed\n",
2431 qla_hw_del_all_mcast(qla_host_t *ha)
2435 nmcast = ha->hw.nmcast;
2437 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2438 if ((ha->hw.mcast[i].addr[0] != 0) ||
2439 (ha->hw.mcast[i].addr[1] != 0) ||
2440 (ha->hw.mcast[i].addr[2] != 0) ||
2441 (ha->hw.mcast[i].addr[3] != 0) ||
2442 (ha->hw.mcast[i].addr[4] != 0) ||
2443 (ha->hw.mcast[i].addr[5] != 0)) {
2445 if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2455 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2459 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2461 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2462 return 0; /* its been already added */
2465 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2467 if ((ha->hw.mcast[i].addr[0] == 0) &&
2468 (ha->hw.mcast[i].addr[1] == 0) &&
2469 (ha->hw.mcast[i].addr[2] == 0) &&
2470 (ha->hw.mcast[i].addr[3] == 0) &&
2471 (ha->hw.mcast[i].addr[4] == 0) &&
2472 (ha->hw.mcast[i].addr[5] == 0)) {
2474 if (qla_config_mac_addr(ha, mta, 1))
2477 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2487 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2491 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2492 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2494 if (qla_config_mac_addr(ha, mta, 0))
2497 ha->hw.mcast[i].addr[0] = 0;
2498 ha->hw.mcast[i].addr[1] = 0;
2499 ha->hw.mcast[i].addr[2] = 0;
2500 ha->hw.mcast[i].addr[3] = 0;
2501 ha->hw.mcast[i].addr[4] = 0;
2502 ha->hw.mcast[i].addr[5] = 0;
2513 * Name: ql_hw_set_multi
2514 * Function: Sets the Multicast Addresses provided the host O.S into the
2515 * hardware (for the given interface)
2518 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2522 uint8_t *mta = mcast;
2525 for (i = 0; i < mcnt; i++) {
2527 ret = qla_hw_add_mcast(ha, mta);
2531 ret = qla_hw_del_mcast(ha, mta);
2536 mta += Q8_MAC_ADDR_LEN;
2542 * Name: qla_hw_tx_done_locked
2543 * Function: Handle Transmit Completions
2546 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2549 qla_hw_t *hw = &ha->hw;
2550 uint32_t comp_idx, comp_count = 0;
2551 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2553 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2555 /* retrieve index of last entry in tx ring completed */
2556 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2558 while (comp_idx != hw_tx_cntxt->txr_comp) {
2560 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2562 hw_tx_cntxt->txr_comp++;
2563 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2564 hw_tx_cntxt->txr_comp = 0;
2569 ha->ifp->if_opackets++;
2571 bus_dmamap_sync(ha->tx_tag, txb->map,
2572 BUS_DMASYNC_POSTWRITE);
2573 bus_dmamap_unload(ha->tx_tag, txb->map);
2574 m_freem(txb->m_head);
2580 hw_tx_cntxt->txr_free += comp_count;
2585 * Name: ql_hw_tx_done
2586 * Function: Handle Transmit Completions
2589 ql_hw_tx_done(qla_host_t *ha)
2594 if (!mtx_trylock(&ha->tx_lock)) {
2595 QL_DPRINT8(ha, (ha->pci_dev,
2596 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2599 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2600 qla_hw_tx_done_locked(ha, i);
2601 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2606 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2613 ql_update_link_state(qla_host_t *ha)
2615 uint32_t link_state;
2616 uint32_t prev_link_state;
2618 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2622 link_state = READ_REG32(ha, Q8_LINK_STATE);
2624 prev_link_state = ha->hw.link_up;
2626 if (ha->pci_func == 0)
2627 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
2629 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
2631 if (prev_link_state != ha->hw.link_up) {
2632 if (ha->hw.link_up) {
2633 if_link_state_change(ha->ifp, LINK_STATE_UP);
2635 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
2642 ql_hw_stop_rcv(qla_host_t *ha)
2644 int i, done, count = 100;
2648 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2649 if (ha->hw.sds[i].rcv_active)
2655 qla_mdelay(__func__, 10);
2658 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
2664 ql_hw_check_health(qla_host_t *ha)
2668 ha->hw.health_count++;
2670 if (ha->hw.health_count < 1000)
2673 ha->hw.health_count = 0;
2675 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
2677 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
2678 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
2679 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
2684 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
2686 if ((val != ha->hw.hbeat_value) &&
2687 (!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) {
2688 ha->hw.hbeat_value = val;
2691 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
2698 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
2701 device_t dev = ha->pci_dev;
2702 q80_config_md_templ_size_t *md_size;
2703 q80_config_md_templ_size_rsp_t *md_size_rsp;
2705 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
2706 bzero(md_size, sizeof(q80_config_md_templ_size_t));
2708 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
2709 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
2710 md_size->count_version |= Q8_MBX_CMD_VERSION;
2712 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
2713 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
2714 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
2716 device_printf(dev, "%s: failed\n", __func__);
2721 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
2723 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
2726 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2730 *size = md_size_rsp->templ_size;
2736 qla_get_minidump_template(qla_host_t *ha)
2739 device_t dev = ha->pci_dev;
2740 q80_config_md_templ_cmd_t *md_templ;
2741 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
2743 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
2744 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
2746 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
2747 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
2748 md_templ->count_version |= Q8_MBX_CMD_VERSION;
2750 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
2751 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
2753 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
2754 (sizeof(q80_config_md_templ_cmd_t) >> 2),
2756 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
2758 device_printf(dev, "%s: failed\n", __func__);
2763 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
2765 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
2768 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2777 qla_minidump_init(qla_host_t *ha)
2780 uint32_t template_size = 0;
2781 device_t dev = ha->pci_dev;
2784 * Get Minidump Template Size
2786 ret = qla_get_minidump_tmplt_size(ha, &template_size);
2788 if (ret || (template_size == 0)) {
2789 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
2795 * Allocate Memory for Minidump Template
2798 ha->hw.dma_buf.minidump.alignment = 8;
2799 ha->hw.dma_buf.minidump.size = template_size;
2801 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
2803 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
2807 ha->hw.dma_buf.flags.minidump = 1;
2810 * Retrieve Minidump Template
2812 ret = qla_get_minidump_template(ha);
2815 qla_minidump_free(ha);
2817 ha->hw.mdump_init = 1;
2825 qla_minidump_free(qla_host_t *ha)
2827 ha->hw.mdump_init = 0;
2828 if (ha->hw.dma_buf.flags.minidump) {
2829 ha->hw.dma_buf.flags.minidump = 0;
2830 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
2836 ql_minidump(qla_host_t *ha)
2838 uint32_t delay = 6000;
2840 if (!ha->hw.mdump_init)
2843 if (!ha->hw.mdump_active)
2846 if (ha->hw.mdump_active == 1) {
2847 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
2848 ha->hw.mdump_start = 1;
2851 while (delay-- && ha->hw.mdump_active) {
2852 qla_mdelay(__func__, 100);
2854 ha->hw.mdump_start = 0;
2855 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);