2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
44 #include "ql_minidump.h"
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58 uint32_t num_intrs, uint32_t create);
59 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61 int tenable, int rcv);
62 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
65 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
67 static int qla_hw_add_all_mcast(qla_host_t *ha);
68 static int qla_hw_del_all_mcast(qla_host_t *ha);
69 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
71 static int qla_init_nic_func(qla_host_t *ha);
72 static int qla_stop_nic_func(qla_host_t *ha);
73 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
74 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76 static void qla_get_quick_stats(qla_host_t *ha);
78 static void ql_minidump_free(qla_host_t *ha);
82 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
88 err = sysctl_handle_int(oidp, &ret, 0, req);
90 if (err || !req->newptr)
95 ha = (qla_host_t *)arg1;
97 for (i = 0; i < ha->hw.num_sds_rings; i++)
98 device_printf(ha->pci_dev,
99 "%s: sds_ring[%d] = %p\n", __func__,i,
100 (void *)ha->hw.sds[i].intr_count);
102 for (i = 0; i < ha->hw.num_tx_rings; i++)
103 device_printf(ha->pci_dev,
104 "%s: tx[%d] = %p\n", __func__,i,
105 (void *)ha->tx_ring[i].count);
107 for (i = 0; i < ha->hw.num_rds_rings; i++)
108 device_printf(ha->pci_dev,
109 "%s: rds_ring[%d] = %p\n", __func__,i,
110 (void *)ha->hw.rds[i].count);
112 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113 (void *)ha->lro_pkt_count);
115 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116 (void *)ha->lro_bytes);
118 #ifdef QL_ENABLE_ISCSI_TLV
119 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120 (void *)ha->hw.iscsi_pkt_count);
121 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
128 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
133 err = sysctl_handle_int(oidp, &ret, 0, req);
135 if (err || !req->newptr)
139 ha = (qla_host_t *)arg1;
140 qla_get_quick_stats(ha);
148 qla_stop_pegs(qla_host_t *ha)
152 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
161 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
166 err = sysctl_handle_int(oidp, &ret, 0, req);
169 if (err || !req->newptr)
173 ha = (qla_host_t *)arg1;
174 (void)QLA_LOCK(ha, __func__, 0);
176 QLA_UNLOCK(ha, __func__);
181 #endif /* #ifdef QL_DBG */
184 qla_validate_set_port_cfg_bit(uint32_t bits)
186 if ((bits & 0xF) > 1)
189 if (((bits >> 4) & 0xF) > 2)
192 if (((bits >> 8) & 0xF) > 2)
199 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
205 err = sysctl_handle_int(oidp, &ret, 0, req);
207 if (err || !req->newptr)
210 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
212 ha = (qla_host_t *)arg1;
214 err = qla_get_port_config(ha, &cfg_bits);
217 goto qla_sysctl_set_port_cfg_exit;
220 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
222 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
226 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
228 if ((ret & 0xF) == 0) {
229 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230 } else if ((ret & 0xF) == 1){
231 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
233 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
237 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
240 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241 } else if (ret == 1){
242 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
244 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
247 err = qla_set_port_config(ha, cfg_bits);
249 ha = (qla_host_t *)arg1;
251 err = qla_get_port_config(ha, &cfg_bits);
254 qla_sysctl_set_port_cfg_exit:
259 * Name: ql_hw_add_sysctls
260 * Function: Add P3Plus specific sysctls
263 ql_hw_add_sysctls(qla_host_t *ha)
269 ha->hw.num_sds_rings = MAX_SDS_RINGS;
270 ha->hw.num_rds_rings = MAX_RDS_RINGS;
271 ha->hw.num_tx_rings = NUM_TX_RINGS;
273 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
278 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
283 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286 ha->hw.num_tx_rings, "Number of Transmit Rings");
288 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291 ha->txr_idx, "Tx Ring Used");
293 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
297 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
299 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
303 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
305 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
310 ha->hw.sds_cidx_thres = 32;
311 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314 ha->hw.sds_cidx_thres,
315 "Number of SDS entries to process before updating"
316 " SDS Ring Consumer Index");
318 ha->hw.rds_pidx_thres = 32;
319 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322 ha->hw.rds_pidx_thres,
323 "Number of Rcv Rings Entries to post before updating"
324 " RDS Ring Producer Index");
326 ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330 &ha->hw.rcv_intr_coalesce,
331 ha->hw.rcv_intr_coalesce,
332 "Rcv Intr Coalescing Parameters\n"
333 "\tbits 15:0 max packets\n"
334 "\tbits 31:16 max micro-seconds to wait\n"
336 "\tifconfig <if> down && ifconfig <if> up\n"
337 "\tto take effect \n");
339 ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343 &ha->hw.xmt_intr_coalesce,
344 ha->hw.xmt_intr_coalesce,
345 "Xmt Intr Coalescing Parameters\n"
346 "\tbits 15:0 max packets\n"
347 "\tbits 31:16 max micro-seconds to wait\n"
349 "\tifconfig <if> down && ifconfig <if> up\n"
350 "\tto take effect \n");
352 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
356 qla_sysctl_port_cfg, "I",
357 "Set Port Configuration if values below "
358 "otherwise Get Port Configuration\n"
359 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362 " 1 = xmt only; 2 = rcv only;\n"
365 ha->hw.enable_9kb = 1;
367 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
372 ha->hw.mdump_active = 0;
373 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
377 "Minidump retrieval is Active");
379 ha->hw.mdump_done = 0;
380 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382 OID_AUTO, "mdump_done", CTLFLAG_RW,
383 &ha->hw.mdump_done, ha->hw.mdump_done,
384 "Minidump has been done and available for retrieval");
386 ha->hw.mdump_capture_mask = 0xF;
387 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
388 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
389 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
390 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
391 "Minidump capture mask");
395 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
396 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397 OID_AUTO, "err_inject",
398 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
399 "Error to be injected\n"
400 "\t\t\t 0: No Errors\n"
401 "\t\t\t 1: rcv: rxb struct invalid\n"
402 "\t\t\t 2: rcv: mp == NULL\n"
403 "\t\t\t 3: lro: rxb struct invalid\n"
404 "\t\t\t 4: lro: mp == NULL\n"
405 "\t\t\t 5: rcv: num handles invalid\n"
406 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
407 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
408 "\t\t\t 8: mbx: mailbox command failure\n"
409 "\t\t\t 9: heartbeat failure\n"
410 "\t\t\t A: temperature failure\n" );
412 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
413 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
414 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
416 qla_sysctl_stop_pegs, "I", "Peg Stop");
418 #endif /* #ifdef QL_DBG */
420 ha->hw.user_pri_nic = 0;
421 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
422 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
423 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
425 "VLAN Tag User Priority for Normal Ethernet Packets");
427 ha->hw.user_pri_iscsi = 4;
428 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
429 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
431 ha->hw.user_pri_iscsi,
432 "VLAN Tag User Priority for iSCSI Packets");
437 ql_hw_link_status(qla_host_t *ha)
439 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
441 if (ha->hw.link_up) {
442 device_printf(ha->pci_dev, "link Up\n");
444 device_printf(ha->pci_dev, "link Down\n");
447 if (ha->hw.flags.fduplex) {
448 device_printf(ha->pci_dev, "Full Duplex\n");
450 device_printf(ha->pci_dev, "Half Duplex\n");
453 if (ha->hw.flags.autoneg) {
454 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
456 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
459 switch (ha->hw.link_speed) {
461 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
465 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
469 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
473 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
477 switch (ha->hw.module_type) {
480 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
484 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
488 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
492 device_printf(ha->pci_dev,
493 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
494 ha->hw.cable_length);
498 device_printf(ha->pci_dev, "Module Type 10GE Active"
499 " Limiting Copper(Compliant)[%d m]\n",
500 ha->hw.cable_length);
504 device_printf(ha->pci_dev,
505 "Module Type 10GE Passive Copper"
506 " (Legacy, Best Effort)[%d m]\n",
507 ha->hw.cable_length);
511 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
515 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
519 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
523 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
527 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
528 "(Legacy, Best Effort)\n");
532 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
537 if (ha->hw.link_faults == 1)
538 device_printf(ha->pci_dev, "SFP Power Fault\n");
543 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
546 ql_free_dma(qla_host_t *ha)
550 if (ha->hw.dma_buf.flags.sds_ring) {
551 for (i = 0; i < ha->hw.num_sds_rings; i++) {
552 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
554 ha->hw.dma_buf.flags.sds_ring = 0;
557 if (ha->hw.dma_buf.flags.rds_ring) {
558 for (i = 0; i < ha->hw.num_rds_rings; i++) {
559 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
561 ha->hw.dma_buf.flags.rds_ring = 0;
564 if (ha->hw.dma_buf.flags.tx_ring) {
565 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
566 ha->hw.dma_buf.flags.tx_ring = 0;
568 ql_minidump_free(ha);
573 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
576 ql_alloc_dma(qla_host_t *ha)
579 uint32_t i, j, size, tx_ring_size;
581 qla_hw_tx_cntxt_t *tx_cntxt;
587 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
591 * Allocate Transmit Ring
593 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
594 size = (tx_ring_size * ha->hw.num_tx_rings);
596 hw->dma_buf.tx_ring.alignment = 8;
597 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
599 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
600 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
601 goto ql_alloc_dma_exit;
604 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
605 paddr = hw->dma_buf.tx_ring.dma_addr;
607 for (i = 0; i < ha->hw.num_tx_rings; i++) {
608 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
610 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
611 tx_cntxt->tx_ring_paddr = paddr;
613 vaddr += tx_ring_size;
614 paddr += tx_ring_size;
617 for (i = 0; i < ha->hw.num_tx_rings; i++) {
618 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
620 tx_cntxt->tx_cons = (uint32_t *)vaddr;
621 tx_cntxt->tx_cons_paddr = paddr;
623 vaddr += sizeof (uint32_t);
624 paddr += sizeof (uint32_t);
627 ha->hw.dma_buf.flags.tx_ring = 1;
629 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
630 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
631 hw->dma_buf.tx_ring.dma_b));
633 * Allocate Receive Descriptor Rings
636 for (i = 0; i < hw->num_rds_rings; i++) {
638 hw->dma_buf.rds_ring[i].alignment = 8;
639 hw->dma_buf.rds_ring[i].size =
640 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
642 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
643 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
646 for (j = 0; j < i; j++)
647 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
649 goto ql_alloc_dma_exit;
651 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
652 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
653 hw->dma_buf.rds_ring[i].dma_b));
656 hw->dma_buf.flags.rds_ring = 1;
659 * Allocate Status Descriptor Rings
662 for (i = 0; i < hw->num_sds_rings; i++) {
663 hw->dma_buf.sds_ring[i].alignment = 8;
664 hw->dma_buf.sds_ring[i].size =
665 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
667 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
668 device_printf(dev, "%s: sds ring alloc failed\n",
671 for (j = 0; j < i; j++)
672 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
674 goto ql_alloc_dma_exit;
676 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
678 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
679 hw->dma_buf.sds_ring[i].dma_b));
681 for (i = 0; i < hw->num_sds_rings; i++) {
682 hw->sds[i].sds_ring_base =
683 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
686 hw->dma_buf.flags.sds_ring = 1;
695 #define Q8_MBX_MSEC_DELAY 5000
698 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
699 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
705 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
707 ha->qla_initiate_recovery = 1;
708 goto exit_qla_mbx_cmd;
714 i = Q8_MBX_MSEC_DELAY;
717 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
723 qla_mdelay(__func__, 1);
729 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
732 ha->qla_initiate_recovery = 1;
733 goto exit_qla_mbx_cmd;
736 for (i = 0; i < n_hmbox; i++) {
737 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
741 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
744 i = Q8_MBX_MSEC_DELAY;
746 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
748 if ((data & 0x3) == 1) {
749 data = READ_REG32(ha, Q8_FW_MBOX0);
750 if ((data & 0xF000) != 0x8000)
756 qla_mdelay(__func__, 1);
761 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
764 ha->qla_initiate_recovery = 1;
765 goto exit_qla_mbx_cmd;
768 for (i = 0; i < n_fwmbox; i++) {
769 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
772 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
773 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
780 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
784 device_t dev = ha->pci_dev;
786 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
790 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
792 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
793 device_printf(dev, "%s: failed0\n", __func__);
798 if (supports_9kb != NULL) {
799 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
805 if (num_rcvq != NULL)
806 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF);
808 if ((err != 1) && (err != 0)) {
809 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
816 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
820 device_t dev = ha->pci_dev;
821 q80_config_intr_t *c_intr;
822 q80_config_intr_rsp_t *c_intr_rsp;
824 c_intr = (q80_config_intr_t *)ha->hw.mbox;
825 bzero(c_intr, (sizeof (q80_config_intr_t)));
827 c_intr->opcode = Q8_MBX_CONFIG_INTR;
829 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
830 c_intr->count_version |= Q8_MBX_CMD_VERSION;
832 c_intr->nentries = num_intrs;
834 for (i = 0; i < num_intrs; i++) {
836 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
837 c_intr->intr[i].msix_index = start_idx + 1 + i;
839 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
840 c_intr->intr[i].msix_index =
841 ha->hw.intr_id[(start_idx + i)];
844 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
847 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
848 (sizeof (q80_config_intr_t) >> 2),
849 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
850 device_printf(dev, "%s: failed0\n", __func__);
854 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
856 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
859 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
860 c_intr_rsp->nentries);
862 for (i = 0; i < c_intr_rsp->nentries; i++) {
863 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
865 c_intr_rsp->intr[i].status,
866 c_intr_rsp->intr[i].intr_id,
867 c_intr_rsp->intr[i].intr_src);
873 for (i = 0; ((i < num_intrs) && create); i++) {
874 if (!c_intr_rsp->intr[i].status) {
875 ha->hw.intr_id[(start_idx + i)] =
876 c_intr_rsp->intr[i].intr_id;
877 ha->hw.intr_src[(start_idx + i)] =
878 c_intr_rsp->intr[i].intr_src;
886 * Name: qla_config_rss
887 * Function: Configure RSS for the context/interface.
889 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
890 0x8030f20c77cb2da3ULL,
891 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
892 0x255b0ec26d5a56daULL };
895 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
897 q80_config_rss_t *c_rss;
898 q80_config_rss_rsp_t *c_rss_rsp;
900 device_t dev = ha->pci_dev;
902 c_rss = (q80_config_rss_t *)ha->hw.mbox;
903 bzero(c_rss, (sizeof (q80_config_rss_t)));
905 c_rss->opcode = Q8_MBX_CONFIG_RSS;
907 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
908 c_rss->count_version |= Q8_MBX_CMD_VERSION;
910 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
911 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
912 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
913 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
915 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
916 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
918 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
920 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
921 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
923 c_rss->cntxt_id = cntxt_id;
925 for (i = 0; i < 5; i++) {
926 c_rss->rss_key[i] = rss_key[i];
929 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
930 (sizeof (q80_config_rss_t) >> 2),
931 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
932 device_printf(dev, "%s: failed0\n", __func__);
935 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
937 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
940 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
947 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
948 uint16_t cntxt_id, uint8_t *ind_table)
950 q80_config_rss_ind_table_t *c_rss_ind;
951 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
953 device_t dev = ha->pci_dev;
955 if ((count > Q8_RSS_IND_TBL_SIZE) ||
956 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
957 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
962 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
963 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
965 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
966 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
967 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
969 c_rss_ind->start_idx = start_idx;
970 c_rss_ind->end_idx = start_idx + count - 1;
971 c_rss_ind->cntxt_id = cntxt_id;
972 bcopy(ind_table, c_rss_ind->ind_table, count);
974 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
975 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
976 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
977 device_printf(dev, "%s: failed0\n", __func__);
981 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
982 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
985 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
992 * Name: qla_config_intr_coalesce
993 * Function: Configure Interrupt Coalescing.
996 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
999 q80_config_intr_coalesc_t *intrc;
1000 q80_config_intr_coalesc_rsp_t *intrc_rsp;
1002 device_t dev = ha->pci_dev;
1004 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1005 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1007 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1008 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1009 intrc->count_version |= Q8_MBX_CMD_VERSION;
1012 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1013 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1014 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1016 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1017 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1018 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1021 intrc->cntxt_id = cntxt_id;
1024 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1025 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1027 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1028 intrc->sds_ring_mask |= (1 << i);
1030 intrc->ms_timeout = 1000;
1033 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1034 (sizeof (q80_config_intr_coalesc_t) >> 2),
1035 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1036 device_printf(dev, "%s: failed0\n", __func__);
1039 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1041 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1044 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1053 * Name: qla_config_mac_addr
1054 * Function: binds a MAC address to the context/interface.
1055 * Can be unicast, multicast or broadcast.
1058 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1060 q80_config_mac_addr_t *cmac;
1061 q80_config_mac_addr_rsp_t *cmac_rsp;
1063 device_t dev = ha->pci_dev;
1065 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1066 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1068 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1069 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1070 cmac->count_version |= Q8_MBX_CMD_VERSION;
1073 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1075 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1077 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1079 cmac->nmac_entries = 1;
1080 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1081 bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
1083 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1084 (sizeof (q80_config_mac_addr_t) >> 2),
1085 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1086 device_printf(dev, "%s: %s failed0\n", __func__,
1087 (add_mac ? "Add" : "Del"));
1090 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1092 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1095 device_printf(dev, "%s: %s "
1096 "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1097 __func__, (add_mac ? "Add" : "Del"),
1098 mac_addr[0], mac_addr[1], mac_addr[2],
1099 mac_addr[3], mac_addr[4], mac_addr[5], err);
1108 * Name: qla_set_mac_rcv_mode
1109 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1112 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1114 q80_config_mac_rcv_mode_t *rcv_mode;
1116 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
1117 device_t dev = ha->pci_dev;
1119 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1120 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1122 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1123 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1124 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1126 rcv_mode->mode = mode;
1128 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1130 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1131 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1132 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1133 device_printf(dev, "%s: failed0\n", __func__);
1136 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1138 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1141 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1149 ql_set_promisc(qla_host_t *ha)
1153 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1154 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1159 qla_reset_promisc(qla_host_t *ha)
1161 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1162 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1166 ql_set_allmulti(qla_host_t *ha)
1170 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1171 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1176 qla_reset_allmulti(qla_host_t *ha)
1178 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1179 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1183 * Name: ql_set_max_mtu
1185 * Sets the maximum transfer unit size for the specified rcv context.
1188 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1191 q80_set_max_mtu_t *max_mtu;
1192 q80_set_max_mtu_rsp_t *max_mtu_rsp;
1197 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1198 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1200 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1201 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1202 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1204 max_mtu->cntxt_id = cntxt_id;
1207 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1208 (sizeof (q80_set_max_mtu_t) >> 2),
1209 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1210 device_printf(dev, "%s: failed\n", __func__);
1214 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1216 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1219 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1226 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1229 q80_link_event_t *lnk;
1230 q80_link_event_rsp_t *lnk_rsp;
1235 lnk = (q80_link_event_t *)ha->hw.mbox;
1236 bzero(lnk, (sizeof (q80_link_event_t)));
1238 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1239 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1240 lnk->count_version |= Q8_MBX_CMD_VERSION;
1242 lnk->cntxt_id = cntxt_id;
1243 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1245 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1246 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1247 device_printf(dev, "%s: failed\n", __func__);
1251 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1253 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1256 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1263 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1266 q80_config_fw_lro_t *fw_lro;
1267 q80_config_fw_lro_rsp_t *fw_lro_rsp;
1272 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1273 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1275 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1276 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1277 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1279 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1280 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1282 fw_lro->cntxt_id = cntxt_id;
1284 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1285 (sizeof (q80_config_fw_lro_t) >> 2),
1286 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1287 device_printf(dev, "%s: failed\n", __func__);
1291 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1293 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1296 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1303 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1305 device_t dev = ha->pci_dev;
1307 if (i < ha->hw.num_tx_rings) {
1308 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1309 __func__, i, xstat->total_bytes);
1310 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1311 __func__, i, xstat->total_pkts);
1312 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1313 __func__, i, xstat->errors);
1314 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1315 __func__, i, xstat->pkts_dropped);
1316 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1317 __func__, i, xstat->switch_pkts);
1318 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1319 __func__, i, xstat->num_buffers);
1321 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1322 __func__, xstat->total_bytes);
1323 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1324 __func__, xstat->total_pkts);
1325 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1326 __func__, xstat->errors);
1327 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1328 __func__, xstat->pkts_dropped);
1329 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1330 __func__, xstat->switch_pkts);
1331 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1332 __func__, xstat->num_buffers);
1337 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1339 device_t dev = ha->pci_dev;
1341 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1342 rstat->total_bytes);
1343 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1345 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1346 rstat->lro_pkt_count);
1347 device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1348 rstat->sw_pkt_count);
1349 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1350 rstat->ip_chksum_err);
1351 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1352 rstat->pkts_wo_acntxts);
1353 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1354 __func__, rstat->pkts_dropped_no_sds_card);
1355 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1356 __func__, rstat->pkts_dropped_no_sds_host);
1357 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1358 rstat->oversized_pkts);
1359 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1360 __func__, rstat->pkts_dropped_no_rds);
1361 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1362 __func__, rstat->unxpctd_mcast_pkts);
1363 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1364 rstat->re1_fbq_error);
1365 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1366 rstat->invalid_mac_addr);
1367 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1368 rstat->rds_prime_trys);
1369 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1370 rstat->rds_prime_success);
1371 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1372 rstat->lro_flows_added);
1373 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1374 rstat->lro_flows_deleted);
1375 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1376 rstat->lro_flows_active);
1377 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1378 __func__, rstat->pkts_droped_unknown);
1382 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1384 device_t dev = ha->pci_dev;
1386 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1388 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1390 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1391 mstat->xmt_mcast_pkts);
1392 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1393 mstat->xmt_bcast_pkts);
1394 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1395 mstat->xmt_pause_frames);
1396 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1397 mstat->xmt_cntrl_pkts);
1398 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1399 __func__, mstat->xmt_pkt_lt_64bytes);
1400 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1401 __func__, mstat->xmt_pkt_lt_127bytes);
1402 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1403 __func__, mstat->xmt_pkt_lt_255bytes);
1404 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1405 __func__, mstat->xmt_pkt_lt_511bytes);
1406 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1407 __func__, mstat->xmt_pkt_lt_1023bytes);
1408 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1409 __func__, mstat->xmt_pkt_lt_1518bytes);
1410 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1411 __func__, mstat->xmt_pkt_gt_1518bytes);
1413 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1415 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1417 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1418 mstat->rcv_mcast_pkts);
1419 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1420 mstat->rcv_bcast_pkts);
1421 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1422 mstat->rcv_pause_frames);
1423 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1424 mstat->rcv_cntrl_pkts);
1425 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1426 __func__, mstat->rcv_pkt_lt_64bytes);
1427 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1428 __func__, mstat->rcv_pkt_lt_127bytes);
1429 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1430 __func__, mstat->rcv_pkt_lt_255bytes);
1431 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1432 __func__, mstat->rcv_pkt_lt_511bytes);
1433 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1434 __func__, mstat->rcv_pkt_lt_1023bytes);
1435 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1436 __func__, mstat->rcv_pkt_lt_1518bytes);
1437 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1438 __func__, mstat->rcv_pkt_gt_1518bytes);
1440 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1441 mstat->rcv_len_error);
1442 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1443 mstat->rcv_len_small);
1444 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1445 mstat->rcv_len_large);
1446 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1448 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1449 mstat->rcv_dropped);
1450 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1452 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1453 mstat->align_error);
1458 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1461 q80_get_stats_t *stat;
1462 q80_get_stats_rsp_t *stat_rsp;
1467 stat = (q80_get_stats_t *)ha->hw.mbox;
1468 bzero(stat, (sizeof (q80_get_stats_t)));
1470 stat->opcode = Q8_MBX_GET_STATS;
1471 stat->count_version = 2;
1472 stat->count_version |= Q8_MBX_CMD_VERSION;
1476 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1477 ha->hw.mbox, (rsp_size >> 2), 0)) {
1478 device_printf(dev, "%s: failed\n", __func__);
1482 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1484 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1494 ql_get_stats(qla_host_t *ha)
1496 q80_get_stats_rsp_t *stat_rsp;
1497 q80_mac_stats_t *mstat;
1498 q80_xmt_stats_t *xstat;
1499 q80_rcv_stats_t *rstat;
1503 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1505 * Get MAC Statistics
1507 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1508 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1510 cmd |= ((ha->pci_func & 0x1) << 16);
1512 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1513 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1514 qla_mac_stats(ha, mstat);
1516 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1517 __func__, ha->hw.mbox[0]);
1520 * Get RCV Statistics
1522 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1523 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1524 cmd |= (ha->hw.rcv_cntxt_id << 16);
1526 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1527 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1528 qla_rcv_stats(ha, rstat);
1530 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1531 __func__, ha->hw.mbox[0]);
1534 * Get XMT Statistics
1536 for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1537 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1538 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1539 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1541 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1543 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1544 qla_xmt_stats(ha, xstat, i);
1546 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1547 __func__, ha->hw.mbox[0]);
1554 qla_get_quick_stats(qla_host_t *ha)
1556 q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1557 q80_mac_stats_t *mstat;
1558 q80_xmt_stats_t *xstat;
1559 q80_rcv_stats_t *rstat;
1562 stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1564 cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1565 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1567 // cmd |= ((ha->pci_func & 0x3) << 16);
1568 cmd |= (0xFFFF << 16);
1570 if (qla_get_hw_stats(ha, cmd,
1571 sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1573 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1574 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1575 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1576 qla_mac_stats(ha, mstat);
1577 qla_rcv_stats(ha, rstat);
1578 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1580 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1581 __func__, ha->hw.mbox[0]);
1588 * Function: Checks if the packet to be transmitted is a candidate for
1589 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1590 * Ring Structure are plugged in.
1593 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1595 struct ether_vlan_header *eh;
1596 struct ip *ip = NULL;
1597 struct ip6_hdr *ip6 = NULL;
1598 struct tcphdr *th = NULL;
1599 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1600 uint16_t etype, opcode, offload = 1;
1606 eh = mtod(mp, struct ether_vlan_header *);
1608 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1609 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1610 etype = ntohs(eh->evl_proto);
1612 ehdrlen = ETHER_HDR_LEN;
1613 etype = ntohs(eh->evl_encap_proto);
1621 tcp_opt_off = ehdrlen + sizeof(struct ip) +
1622 sizeof(struct tcphdr);
1624 if (mp->m_len < tcp_opt_off) {
1625 m_copydata(mp, 0, tcp_opt_off, hdr);
1626 ip = (struct ip *)(hdr + ehdrlen);
1628 ip = (struct ip *)(mp->m_data + ehdrlen);
1631 ip_hlen = ip->ip_hl << 2;
1632 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1635 if ((ip->ip_p != IPPROTO_TCP) ||
1636 (ip_hlen != sizeof (struct ip))){
1637 /* IP Options are not supported */
1641 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1645 case ETHERTYPE_IPV6:
1647 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1648 sizeof (struct tcphdr);
1650 if (mp->m_len < tcp_opt_off) {
1651 m_copydata(mp, 0, tcp_opt_off, hdr);
1652 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1654 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1657 ip_hlen = sizeof(struct ip6_hdr);
1658 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1660 if (ip6->ip6_nxt != IPPROTO_TCP) {
1661 //device_printf(dev, "%s: ipv6\n", __func__);
1664 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1668 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1676 tcp_hlen = th->th_off << 2;
1677 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1679 if (mp->m_len < hdrlen) {
1680 if (mp->m_len < tcp_opt_off) {
1681 if (tcp_hlen > sizeof(struct tcphdr)) {
1682 m_copydata(mp, tcp_opt_off,
1683 (tcp_hlen - sizeof(struct tcphdr)),
1687 m_copydata(mp, 0, hdrlen, hdr);
1691 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1693 tx_cmd->flags_opcode = opcode ;
1694 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1695 tx_cmd->total_hdr_len = hdrlen;
1697 /* Check for Multicast least significant bit of MSB == 1 */
1698 if (eh->evl_dhost[0] & 0x01) {
1699 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1702 if (mp->m_len < hdrlen) {
1703 printf("%d\n", hdrlen);
1711 * Name: qla_tx_chksum
1712 * Function: Checks if the packet to be transmitted is a candidate for
1713 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1714 * Ring Structure are plugged in.
1717 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1718 uint32_t *tcp_hdr_off)
1720 struct ether_vlan_header *eh;
1722 struct ip6_hdr *ip6;
1723 uint32_t ehdrlen, ip_hlen;
1724 uint16_t etype, opcode, offload = 1;
1726 uint8_t buf[sizeof(struct ip6_hdr)];
1732 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1735 eh = mtod(mp, struct ether_vlan_header *);
1737 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1738 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1739 etype = ntohs(eh->evl_proto);
1741 ehdrlen = ETHER_HDR_LEN;
1742 etype = ntohs(eh->evl_encap_proto);
1748 ip = (struct ip *)(mp->m_data + ehdrlen);
1750 ip_hlen = sizeof (struct ip);
1752 if (mp->m_len < (ehdrlen + ip_hlen)) {
1753 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1754 ip = (struct ip *)buf;
1757 if (ip->ip_p == IPPROTO_TCP)
1758 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1759 else if (ip->ip_p == IPPROTO_UDP)
1760 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1762 //device_printf(dev, "%s: ipv4\n", __func__);
1767 case ETHERTYPE_IPV6:
1768 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1770 ip_hlen = sizeof(struct ip6_hdr);
1772 if (mp->m_len < (ehdrlen + ip_hlen)) {
1773 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1775 ip6 = (struct ip6_hdr *)buf;
1778 if (ip6->ip6_nxt == IPPROTO_TCP)
1779 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1780 else if (ip6->ip6_nxt == IPPROTO_UDP)
1781 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1783 //device_printf(dev, "%s: ipv6\n", __func__);
1796 *tcp_hdr_off = (ip_hlen + ehdrlen);
1801 #define QLA_TX_MIN_FREE 2
1804 * Function: Transmits a packet. It first checks if the packet is a
1805 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1806 * offload. If either of these creteria are not met, it is transmitted
1807 * as a regular ethernet frame.
1810 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1811 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1813 struct ether_vlan_header *eh;
1814 qla_hw_t *hw = &ha->hw;
1815 q80_tx_cmd_t *tx_cmd, tso_cmd;
1816 bus_dma_segment_t *c_seg;
1817 uint32_t num_tx_cmds, hdr_len = 0;
1818 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1821 uint8_t *src = NULL, *dst = NULL;
1822 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1823 uint32_t op_code = 0;
1824 uint32_t tcp_hdr_off = 0;
1829 * Always make sure there is atleast one empty slot in the tx_ring
1830 * tx_ring is considered full when there only one entry available
1832 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1834 total_length = mp->m_pkthdr.len;
1835 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1836 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1837 __func__, total_length);
1840 eh = mtod(mp, struct ether_vlan_header *);
1842 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1844 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1847 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1850 /* find the additional tx_cmd descriptors required */
1852 if (mp->m_flags & M_VLANTAG)
1853 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1855 hdr_len = tso_cmd.total_hdr_len;
1857 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1858 bytes = QL_MIN(bytes, hdr_len);
1864 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1868 hdr_len = tso_cmd.total_hdr_len;
1871 src = (uint8_t *)eh;
1875 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1879 ha->hw.iscsi_pkt_count++;
1881 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1882 qla_hw_tx_done_locked(ha, txr_idx);
1883 if (hw->tx_cntxt[txr_idx].txr_free <=
1884 (num_tx_cmds + QLA_TX_MIN_FREE)) {
1885 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1886 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1892 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1894 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1896 if (nsegs > ha->hw.max_tx_segs)
1897 ha->hw.max_tx_segs = nsegs;
1899 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1902 tx_cmd->flags_opcode = op_code;
1903 tx_cmd->tcp_hdr_off = tcp_hdr_off;
1906 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1909 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1910 ha->tx_tso_frames++;
1913 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1914 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1917 eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1919 } else if (mp->m_flags & M_VLANTAG) {
1921 if (hdr_len) { /* TSO */
1922 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1923 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1924 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1926 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1928 ha->hw_vlan_tx_frames++;
1929 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1932 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1933 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1938 tx_cmd->n_bufs = (uint8_t)nsegs;
1939 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1940 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1941 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1946 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1950 tx_cmd->buf1_addr = c_seg->ds_addr;
1951 tx_cmd->buf1_len = c_seg->ds_len;
1955 tx_cmd->buf2_addr = c_seg->ds_addr;
1956 tx_cmd->buf2_len = c_seg->ds_len;
1960 tx_cmd->buf3_addr = c_seg->ds_addr;
1961 tx_cmd->buf3_len = c_seg->ds_len;
1965 tx_cmd->buf4_addr = c_seg->ds_addr;
1966 tx_cmd->buf4_len = c_seg->ds_len;
1974 txr_next = hw->tx_cntxt[txr_idx].txr_next =
1975 (hw->tx_cntxt[txr_idx].txr_next + 1) &
1976 (NUM_TX_DESCRIPTORS - 1);
1982 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1983 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1986 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1988 /* TSO : Copy the header in the following tx cmd descriptors */
1990 txr_next = hw->tx_cntxt[txr_idx].txr_next;
1992 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1993 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1995 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1996 bytes = QL_MIN(bytes, hdr_len);
1998 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2000 if (mp->m_flags & M_VLANTAG) {
2001 /* first copy the src/dst MAC addresses */
2002 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2003 dst += (ETHER_ADDR_LEN * 2);
2004 src += (ETHER_ADDR_LEN * 2);
2006 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2008 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2011 /* bytes left in src header */
2012 hdr_len -= ((ETHER_ADDR_LEN * 2) +
2013 ETHER_VLAN_ENCAP_LEN);
2015 /* bytes left in TxCmd Entry */
2016 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2019 bcopy(src, dst, bytes);
2023 bcopy(src, dst, bytes);
2028 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2029 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2030 (NUM_TX_DESCRIPTORS - 1);
2034 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2035 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2037 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2039 bcopy(src, tx_cmd, bytes);
2043 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2044 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2045 (NUM_TX_DESCRIPTORS - 1);
2050 hw->tx_cntxt[txr_idx].txr_free =
2051 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2053 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2055 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2062 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2064 qla_config_rss_ind_table(qla_host_t *ha)
2067 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2070 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2071 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2074 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2075 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2077 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2078 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2080 count = Q8_CONFIG_IND_TBL_SIZE;
2083 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2092 * Name: ql_del_hw_if
2093 * Function: Destroys the hardware specific entities corresponding to an
2094 * Ethernet Interface
2097 ql_del_hw_if(qla_host_t *ha)
2102 (void)qla_stop_nic_func(ha);
2104 qla_del_rcv_cntxt(ha);
2105 qla_del_xmt_cntxt(ha);
2107 if (ha->hw.flags.init_intr_cnxt) {
2108 for (i = 0; i < ha->hw.num_sds_rings; ) {
2110 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2111 num_msix = Q8_MAX_INTR_VECTORS;
2113 num_msix = ha->hw.num_sds_rings - i;
2114 qla_config_intr_cntxt(ha, i, num_msix, 0);
2119 ha->hw.flags.init_intr_cnxt = 0;
2125 qla_confirm_9kb_enable(qla_host_t *ha)
2127 uint32_t supports_9kb = 0;
2129 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2131 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2132 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2133 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2135 qla_get_nic_partition(ha, &supports_9kb, NULL);
2138 ha->hw.enable_9kb = 0;
2145 * Name: ql_init_hw_if
2146 * Function: Creates the hardware specific entities corresponding to an
2147 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2148 * corresponding to the interface. Enables LRO if allowed.
2151 ql_init_hw_if(qla_host_t *ha)
2155 uint8_t bcast_mac[6];
2161 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2162 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2163 ha->hw.dma_buf.sds_ring[i].size);
2166 for (i = 0; i < ha->hw.num_sds_rings; ) {
2168 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2169 num_msix = Q8_MAX_INTR_VECTORS;
2171 num_msix = ha->hw.num_sds_rings - i;
2173 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2179 for (i = 0; i < num_msix; ) {
2180 qla_config_intr_cntxt(ha, i,
2181 Q8_MAX_INTR_VECTORS, 0);
2182 i += Q8_MAX_INTR_VECTORS;
2191 ha->hw.flags.init_intr_cnxt = 1;
2194 * Create Receive Context
2196 if (qla_init_rcv_cntxt(ha)) {
2200 for (i = 0; i < ha->hw.num_rds_rings; i++) {
2201 rdesc = &ha->hw.rds[i];
2202 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2204 /* Update the RDS Producer Indices */
2205 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2211 * Create Transmit Context
2213 if (qla_init_xmt_cntxt(ha)) {
2214 qla_del_rcv_cntxt(ha);
2217 ha->hw.max_tx_segs = 0;
2219 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2222 ha->hw.flags.unicast_mac = 1;
2224 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2227 if (qla_config_mac_addr(ha, bcast_mac, 1))
2230 ha->hw.flags.bcast_mac = 1;
2233 * program any cached multicast addresses
2235 if (qla_hw_add_all_mcast(ha))
2238 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2241 if (qla_config_rss_ind_table(ha))
2244 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2247 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2250 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2253 if (qla_init_nic_func(ha))
2256 if (qla_query_fw_dcbx_caps(ha))
2259 for (i = 0; i < ha->hw.num_sds_rings; i++)
2260 QL_ENABLE_INTERRUPTS(ha, i);
2266 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2268 device_t dev = ha->pci_dev;
2269 q80_rq_map_sds_to_rds_t *map_rings;
2270 q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2272 qla_hw_t *hw = &ha->hw;
2274 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2275 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2277 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2278 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2279 map_rings->count_version |= Q8_MBX_CMD_VERSION;
2281 map_rings->cntxt_id = hw->rcv_cntxt_id;
2282 map_rings->num_rings = num_idx;
2284 for (i = 0; i < num_idx; i++) {
2285 map_rings->sds_rds[i].sds_ring = i + start_idx;
2286 map_rings->sds_rds[i].rds_ring = i + start_idx;
2289 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2290 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2291 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2292 device_printf(dev, "%s: failed0\n", __func__);
2296 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2298 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2301 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2309 * Name: qla_init_rcv_cntxt
2310 * Function: Creates the Receive Context.
2313 qla_init_rcv_cntxt(qla_host_t *ha)
2315 q80_rq_rcv_cntxt_t *rcntxt;
2316 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
2317 q80_stat_desc_t *sdesc;
2319 qla_hw_t *hw = &ha->hw;
2322 uint32_t rcntxt_sds_rings;
2323 uint32_t rcntxt_rds_rings;
2329 * Create Receive Context
2332 for (i = 0; i < hw->num_sds_rings; i++) {
2333 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2335 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2336 sdesc->data[0] = 1ULL;
2337 sdesc->data[1] = 1ULL;
2341 rcntxt_sds_rings = hw->num_sds_rings;
2342 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2343 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2345 rcntxt_rds_rings = hw->num_rds_rings;
2347 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2348 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2350 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2351 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2353 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2354 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2355 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2357 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2358 Q8_RCV_CNTXT_CAP0_LRO |
2359 Q8_RCV_CNTXT_CAP0_HW_LRO |
2360 Q8_RCV_CNTXT_CAP0_RSS |
2361 Q8_RCV_CNTXT_CAP0_SGL_LRO;
2363 if (ha->hw.enable_9kb)
2364 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2366 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2368 if (ha->hw.num_rds_rings > 1) {
2369 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2370 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2372 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2374 rcntxt->nsds_rings = rcntxt_sds_rings;
2376 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2378 rcntxt->rcv_vpid = 0;
2380 for (i = 0; i < rcntxt_sds_rings; i++) {
2381 rcntxt->sds[i].paddr =
2382 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2383 rcntxt->sds[i].size =
2384 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2385 if (ha->msix_count == 2) {
2386 rcntxt->sds[i].intr_id =
2387 qla_host_to_le16(hw->intr_id[0]);
2388 rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2390 rcntxt->sds[i].intr_id =
2391 qla_host_to_le16(hw->intr_id[i]);
2392 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2396 for (i = 0; i < rcntxt_rds_rings; i++) {
2397 rcntxt->rds[i].paddr_std =
2398 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2400 if (ha->hw.enable_9kb)
2401 rcntxt->rds[i].std_bsize =
2402 qla_host_to_le64(MJUM9BYTES);
2404 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2406 rcntxt->rds[i].std_nentries =
2407 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2410 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2411 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2412 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2413 device_printf(dev, "%s: failed0\n", __func__);
2417 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2419 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2422 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2426 for (i = 0; i < rcntxt_sds_rings; i++) {
2427 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2430 for (i = 0; i < rcntxt_rds_rings; i++) {
2431 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2434 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2436 ha->hw.flags.init_rx_cnxt = 1;
2438 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2440 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2442 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2443 max_idx = MAX_RCNTXT_SDS_RINGS;
2445 max_idx = hw->num_sds_rings - i;
2447 err = qla_add_rcv_rings(ha, i, max_idx);
2455 if (hw->num_rds_rings > 1) {
2457 for (i = 0; i < hw->num_rds_rings; ) {
2459 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2460 max_idx = MAX_SDS_TO_RDS_MAP;
2462 max_idx = hw->num_rds_rings - i;
2464 err = qla_map_sds_to_rds(ha, i, max_idx);
2476 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2478 device_t dev = ha->pci_dev;
2479 q80_rq_add_rcv_rings_t *add_rcv;
2480 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2482 qla_hw_t *hw = &ha->hw;
2484 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2485 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2487 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2488 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2489 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2491 add_rcv->nrds_sets_rings = nsds | (1 << 5);
2492 add_rcv->nsds_rings = nsds;
2493 add_rcv->cntxt_id = hw->rcv_cntxt_id;
2495 for (i = 0; i < nsds; i++) {
2499 add_rcv->sds[i].paddr =
2500 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2502 add_rcv->sds[i].size =
2503 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2505 if (ha->msix_count == 2) {
2506 add_rcv->sds[i].intr_id =
2507 qla_host_to_le16(hw->intr_id[0]);
2508 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2510 add_rcv->sds[i].intr_id =
2511 qla_host_to_le16(hw->intr_id[j]);
2512 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2516 for (i = 0; (i < nsds); i++) {
2519 add_rcv->rds[i].paddr_std =
2520 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2522 if (ha->hw.enable_9kb)
2523 add_rcv->rds[i].std_bsize =
2524 qla_host_to_le64(MJUM9BYTES);
2526 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2528 add_rcv->rds[i].std_nentries =
2529 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2533 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2534 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2535 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2536 device_printf(dev, "%s: failed0\n", __func__);
2540 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2542 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2545 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2549 for (i = 0; i < nsds; i++) {
2550 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2553 for (i = 0; i < nsds; i++) {
2554 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2561 * Name: qla_del_rcv_cntxt
2562 * Function: Destroys the Receive Context.
2565 qla_del_rcv_cntxt(qla_host_t *ha)
2567 device_t dev = ha->pci_dev;
2568 q80_rcv_cntxt_destroy_t *rcntxt;
2569 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
2571 uint8_t bcast_mac[6];
2573 if (!ha->hw.flags.init_rx_cnxt)
2576 if (qla_hw_del_all_mcast(ha))
2579 if (ha->hw.flags.bcast_mac) {
2581 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2582 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2584 if (qla_config_mac_addr(ha, bcast_mac, 0))
2586 ha->hw.flags.bcast_mac = 0;
2590 if (ha->hw.flags.unicast_mac) {
2591 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2593 ha->hw.flags.unicast_mac = 0;
2596 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2597 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2599 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2600 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2601 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2603 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2605 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2606 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2607 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2608 device_printf(dev, "%s: failed0\n", __func__);
2611 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2613 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2616 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2619 ha->hw.flags.init_rx_cnxt = 0;
2624 * Name: qla_init_xmt_cntxt
2625 * Function: Creates the Transmit Context.
2628 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2631 qla_hw_t *hw = &ha->hw;
2632 q80_rq_tx_cntxt_t *tcntxt;
2633 q80_rsp_tx_cntxt_t *tcntxt_rsp;
2635 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2637 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2642 * Create Transmit Context
2644 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2645 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2647 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2648 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2649 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2651 #ifdef QL_ENABLE_ISCSI_TLV
2653 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2654 Q8_TX_CNTXT_CAP0_TC;
2656 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2657 tcntxt->traffic_class = 1;
2662 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2664 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2666 tcntxt->ntx_rings = 1;
2668 tcntxt->tx_ring[0].paddr =
2669 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2670 tcntxt->tx_ring[0].tx_consumer =
2671 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2672 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2674 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2675 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2678 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2679 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2681 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2682 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2684 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2685 device_printf(dev, "%s: failed0\n", __func__);
2688 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2690 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2693 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2697 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2698 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2700 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2708 * Name: qla_del_xmt_cntxt
2709 * Function: Destroys the Transmit Context.
2712 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2714 device_t dev = ha->pci_dev;
2715 q80_tx_cntxt_destroy_t *tcntxt;
2716 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
2719 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2720 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2722 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2723 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2724 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2726 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2728 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2729 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2730 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2731 device_printf(dev, "%s: failed0\n", __func__);
2734 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2736 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2739 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2746 qla_del_xmt_cntxt(qla_host_t *ha)
2750 if (!ha->hw.flags.init_tx_cnxt)
2753 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2754 if (qla_del_xmt_cntxt_i(ha, i))
2757 ha->hw.flags.init_tx_cnxt = 0;
2761 qla_init_xmt_cntxt(qla_host_t *ha)
2765 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2766 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2767 for (j = 0; j < i; j++)
2768 qla_del_xmt_cntxt_i(ha, j);
2772 ha->hw.flags.init_tx_cnxt = 1;
2777 qla_hw_add_all_mcast(qla_host_t *ha)
2781 nmcast = ha->hw.nmcast;
2783 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2784 if ((ha->hw.mcast[i].addr[0] != 0) ||
2785 (ha->hw.mcast[i].addr[1] != 0) ||
2786 (ha->hw.mcast[i].addr[2] != 0) ||
2787 (ha->hw.mcast[i].addr[3] != 0) ||
2788 (ha->hw.mcast[i].addr[4] != 0) ||
2789 (ha->hw.mcast[i].addr[5] != 0)) {
2791 if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2792 device_printf(ha->pci_dev, "%s: failed\n",
2804 qla_hw_del_all_mcast(qla_host_t *ha)
2808 nmcast = ha->hw.nmcast;
2810 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2811 if ((ha->hw.mcast[i].addr[0] != 0) ||
2812 (ha->hw.mcast[i].addr[1] != 0) ||
2813 (ha->hw.mcast[i].addr[2] != 0) ||
2814 (ha->hw.mcast[i].addr[3] != 0) ||
2815 (ha->hw.mcast[i].addr[4] != 0) ||
2816 (ha->hw.mcast[i].addr[5] != 0)) {
2818 if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2828 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2832 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2834 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2835 return 0; /* its been already added */
2838 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2840 if ((ha->hw.mcast[i].addr[0] == 0) &&
2841 (ha->hw.mcast[i].addr[1] == 0) &&
2842 (ha->hw.mcast[i].addr[2] == 0) &&
2843 (ha->hw.mcast[i].addr[3] == 0) &&
2844 (ha->hw.mcast[i].addr[4] == 0) &&
2845 (ha->hw.mcast[i].addr[5] == 0)) {
2847 if (qla_config_mac_addr(ha, mta, 1))
2850 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2860 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2864 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2865 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2867 if (qla_config_mac_addr(ha, mta, 0))
2870 ha->hw.mcast[i].addr[0] = 0;
2871 ha->hw.mcast[i].addr[1] = 0;
2872 ha->hw.mcast[i].addr[2] = 0;
2873 ha->hw.mcast[i].addr[3] = 0;
2874 ha->hw.mcast[i].addr[4] = 0;
2875 ha->hw.mcast[i].addr[5] = 0;
2886 * Name: ql_hw_set_multi
2887 * Function: Sets the Multicast Addresses provided the host O.S into the
2888 * hardware (for the given interface)
2891 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2895 uint8_t *mta = mcast;
2898 for (i = 0; i < mcnt; i++) {
2900 ret = qla_hw_add_mcast(ha, mta);
2904 ret = qla_hw_del_mcast(ha, mta);
2909 mta += Q8_MAC_ADDR_LEN;
2915 * Name: qla_hw_tx_done_locked
2916 * Function: Handle Transmit Completions
2919 qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2922 qla_hw_t *hw = &ha->hw;
2923 uint32_t comp_idx, comp_count = 0;
2924 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2926 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2928 /* retrieve index of last entry in tx ring completed */
2929 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2931 while (comp_idx != hw_tx_cntxt->txr_comp) {
2933 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2935 hw_tx_cntxt->txr_comp++;
2936 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2937 hw_tx_cntxt->txr_comp = 0;
2942 ha->ifp->if_opackets++;
2944 bus_dmamap_sync(ha->tx_tag, txb->map,
2945 BUS_DMASYNC_POSTWRITE);
2946 bus_dmamap_unload(ha->tx_tag, txb->map);
2947 m_freem(txb->m_head);
2953 hw_tx_cntxt->txr_free += comp_count;
2958 * Name: ql_hw_tx_done
2959 * Function: Handle Transmit Completions
2962 ql_hw_tx_done(qla_host_t *ha)
2967 if (!mtx_trylock(&ha->tx_lock)) {
2968 QL_DPRINT8(ha, (ha->pci_dev,
2969 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2972 for (i = 0; i < ha->hw.num_tx_rings; i++) {
2973 qla_hw_tx_done_locked(ha, i);
2974 if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2979 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2986 ql_update_link_state(qla_host_t *ha)
2988 uint32_t link_state;
2989 uint32_t prev_link_state;
2991 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2995 link_state = READ_REG32(ha, Q8_LINK_STATE);
2997 prev_link_state = ha->hw.link_up;
2999 if (ha->pci_func == 0)
3000 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3002 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3004 if (prev_link_state != ha->hw.link_up) {
3005 if (ha->hw.link_up) {
3006 if_link_state_change(ha->ifp, LINK_STATE_UP);
3008 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3015 ql_hw_stop_rcv(qla_host_t *ha)
3017 int i, done, count = 100;
3019 ha->flags.stop_rcv = 1;
3023 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3024 if (ha->hw.sds[i].rcv_active)
3030 qla_mdelay(__func__, 10);
3034 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3040 ql_hw_check_health(qla_host_t *ha)
3044 ha->hw.health_count++;
3046 if (ha->hw.health_count < 1000)
3049 ha->hw.health_count = 0;
3051 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3053 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3054 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3055 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3060 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3062 if ((val != ha->hw.hbeat_value) &&
3063 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3064 ha->hw.hbeat_value = val;
3067 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3074 qla_init_nic_func(qla_host_t *ha)
3077 q80_init_nic_func_t *init_nic;
3078 q80_init_nic_func_rsp_t *init_nic_rsp;
3083 init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3084 bzero(init_nic, sizeof(q80_init_nic_func_t));
3086 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3087 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3088 init_nic->count_version |= Q8_MBX_CMD_VERSION;
3090 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3091 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3092 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3094 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3095 if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3096 (sizeof (q80_init_nic_func_t) >> 2),
3097 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3098 device_printf(dev, "%s: failed\n", __func__);
3102 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3103 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3105 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3108 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3115 qla_stop_nic_func(qla_host_t *ha)
3118 q80_stop_nic_func_t *stop_nic;
3119 q80_stop_nic_func_rsp_t *stop_nic_rsp;
3124 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3125 bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3127 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3128 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3129 stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3131 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3132 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3134 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3135 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3136 (sizeof (q80_stop_nic_func_t) >> 2),
3137 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3138 device_printf(dev, "%s: failed\n", __func__);
3142 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3143 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3145 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3148 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3155 qla_query_fw_dcbx_caps(qla_host_t *ha)
3158 q80_query_fw_dcbx_caps_t *fw_dcbx;
3159 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp;
3164 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3165 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3167 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3168 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3169 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3171 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3172 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3173 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3174 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3175 device_printf(dev, "%s: failed\n", __func__);
3179 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3180 ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3181 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3183 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3186 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3193 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3194 uint32_t aen_mb3, uint32_t aen_mb4)
3197 q80_idc_ack_t *idc_ack;
3198 q80_idc_ack_rsp_t *idc_ack_rsp;
3204 idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3205 bzero(idc_ack, sizeof(q80_idc_ack_t));
3207 idc_ack->opcode = Q8_MBX_IDC_ACK;
3208 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3209 idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3211 idc_ack->aen_mb1 = aen_mb1;
3212 idc_ack->aen_mb2 = aen_mb2;
3213 idc_ack->aen_mb3 = aen_mb3;
3214 idc_ack->aen_mb4 = aen_mb4;
3216 ha->hw.imd_compl= 0;
3218 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3219 (sizeof (q80_idc_ack_t) >> 2),
3220 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3221 device_printf(dev, "%s: failed\n", __func__);
3225 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3227 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3230 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3234 while (count && !ha->hw.imd_compl) {
3235 qla_mdelay(__func__, 100);
3242 device_printf(dev, "%s: count %d\n", __func__, count);
3248 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3251 q80_set_port_cfg_t *pcfg;
3252 q80_set_port_cfg_rsp_t *pfg_rsp;
3258 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3259 bzero(pcfg, sizeof(q80_set_port_cfg_t));
3261 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3262 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3263 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3265 pcfg->cfg_bits = cfg_bits;
3267 device_printf(dev, "%s: cfg_bits"
3268 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3269 " [0x%x, 0x%x, 0x%x]\n", __func__,
3270 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3271 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3272 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3274 ha->hw.imd_compl= 0;
3276 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3277 (sizeof (q80_set_port_cfg_t) >> 2),
3278 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3279 device_printf(dev, "%s: failed\n", __func__);
3283 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3285 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3287 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3288 while (count && !ha->hw.imd_compl) {
3289 qla_mdelay(__func__, 100);
3293 device_printf(dev, "%s: count %d\n", __func__, count);
3300 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3309 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3312 device_t dev = ha->pci_dev;
3313 q80_config_md_templ_size_t *md_size;
3314 q80_config_md_templ_size_rsp_t *md_size_rsp;
3316 #ifndef QL_LDFLASH_FW
3318 ql_minidump_template_hdr_t *hdr;
3320 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3321 *size = hdr->size_of_template;
3324 #endif /* #ifdef QL_LDFLASH_FW */
3326 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3327 bzero(md_size, sizeof(q80_config_md_templ_size_t));
3329 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3330 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3331 md_size->count_version |= Q8_MBX_CMD_VERSION;
3333 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3334 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3335 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3337 device_printf(dev, "%s: failed\n", __func__);
3342 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3344 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3347 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3351 *size = md_size_rsp->templ_size;
3357 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3360 q80_get_port_cfg_t *pcfg;
3361 q80_get_port_cfg_rsp_t *pcfg_rsp;
3366 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3367 bzero(pcfg, sizeof(q80_get_port_cfg_t));
3369 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3370 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3371 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3373 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3374 (sizeof (q80_get_port_cfg_t) >> 2),
3375 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3376 device_printf(dev, "%s: failed\n", __func__);
3380 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3382 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3385 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3389 device_printf(dev, "%s: [cfg_bits, port type]"
3390 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3391 " [0x%x, 0x%x, 0x%x]\n", __func__,
3392 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3393 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3394 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3395 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3398 *cfg_bits = pcfg_rsp->cfg_bits;
3404 qla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3406 struct ether_vlan_header *eh;
3408 struct ip *ip = NULL;
3409 struct ip6_hdr *ip6 = NULL;
3410 struct tcphdr *th = NULL;
3413 uint8_t buf[sizeof(struct ip6_hdr)];
3415 eh = mtod(mp, struct ether_vlan_header *);
3417 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3418 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3419 etype = ntohs(eh->evl_proto);
3421 hdrlen = ETHER_HDR_LEN;
3422 etype = ntohs(eh->evl_encap_proto);
3425 if (etype == ETHERTYPE_IP) {
3427 offset = (hdrlen + sizeof (struct ip));
3429 if (mp->m_len >= offset) {
3430 ip = (struct ip *)(mp->m_data + hdrlen);
3432 m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3433 ip = (struct ip *)buf;
3436 if (ip->ip_p == IPPROTO_TCP) {
3438 hdrlen += ip->ip_hl << 2;
3439 offset = hdrlen + 4;
3441 if (mp->m_len >= offset) {
3442 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3444 m_copydata(mp, hdrlen, 4, buf);
3445 th = (struct tcphdr *)buf;
3449 } else if (etype == ETHERTYPE_IPV6) {
3451 offset = (hdrlen + sizeof (struct ip6_hdr));
3453 if (mp->m_len >= offset) {
3454 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3456 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3457 ip6 = (struct ip6_hdr *)buf;
3460 if (ip6->ip6_nxt == IPPROTO_TCP) {
3462 hdrlen += sizeof(struct ip6_hdr);
3463 offset = hdrlen + 4;
3465 if (mp->m_len >= offset) {
3466 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3468 m_copydata(mp, hdrlen, 4, buf);
3469 th = (struct tcphdr *)buf;
3475 if ((th->th_sport == htons(3260)) ||
3476 (th->th_dport == htons(3260)))
3483 qla_hw_async_event(qla_host_t *ha)
3485 switch (ha->hw.aen_mb0) {
3487 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3488 ha->hw.aen_mb3, ha->hw.aen_mb4);
3499 #ifdef QL_LDFLASH_FW
3501 ql_get_minidump_template(qla_host_t *ha)
3504 device_t dev = ha->pci_dev;
3505 q80_config_md_templ_cmd_t *md_templ;
3506 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
3508 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3509 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3511 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3512 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3513 md_templ->count_version |= Q8_MBX_CMD_VERSION;
3515 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3516 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3518 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3519 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3521 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3523 device_printf(dev, "%s: failed\n", __func__);
3528 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3530 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3533 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3540 #endif /* #ifdef QL_LDFLASH_FW */
3543 * Minidump related functionality
3546 static int ql_parse_template(qla_host_t *ha);
3548 static uint32_t ql_rdcrb(qla_host_t *ha,
3549 ql_minidump_entry_rdcrb_t *crb_entry,
3550 uint32_t * data_buff);
3552 static uint32_t ql_pollrd(qla_host_t *ha,
3553 ql_minidump_entry_pollrd_t *entry,
3554 uint32_t * data_buff);
3556 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3557 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3558 uint32_t *data_buff);
3560 static uint32_t ql_L2Cache(qla_host_t *ha,
3561 ql_minidump_entry_cache_t *cacheEntry,
3562 uint32_t * data_buff);
3564 static uint32_t ql_L1Cache(qla_host_t *ha,
3565 ql_minidump_entry_cache_t *cacheEntry,
3566 uint32_t *data_buff);
3568 static uint32_t ql_rdocm(qla_host_t *ha,
3569 ql_minidump_entry_rdocm_t *ocmEntry,
3570 uint32_t *data_buff);
3572 static uint32_t ql_rdmem(qla_host_t *ha,
3573 ql_minidump_entry_rdmem_t *mem_entry,
3574 uint32_t *data_buff);
3576 static uint32_t ql_rdrom(qla_host_t *ha,
3577 ql_minidump_entry_rdrom_t *romEntry,
3578 uint32_t *data_buff);
3580 static uint32_t ql_rdmux(qla_host_t *ha,
3581 ql_minidump_entry_mux_t *muxEntry,
3582 uint32_t *data_buff);
3584 static uint32_t ql_rdmux2(qla_host_t *ha,
3585 ql_minidump_entry_mux2_t *muxEntry,
3586 uint32_t *data_buff);
3588 static uint32_t ql_rdqueue(qla_host_t *ha,
3589 ql_minidump_entry_queue_t *queueEntry,
3590 uint32_t *data_buff);
3592 static uint32_t ql_cntrl(qla_host_t *ha,
3593 ql_minidump_template_hdr_t *template_hdr,
3594 ql_minidump_entry_cntrl_t *crbEntry);
3598 ql_minidump_size(qla_host_t *ha)
3602 ql_minidump_template_hdr_t *hdr;
3604 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3608 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3609 if (i & ha->hw.mdump_capture_mask)
3610 size += hdr->capture_size_array[k];
3617 ql_free_minidump_buffer(qla_host_t *ha)
3619 if (ha->hw.mdump_buffer != NULL) {
3620 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3621 ha->hw.mdump_buffer = NULL;
3622 ha->hw.mdump_buffer_size = 0;
3628 ql_alloc_minidump_buffer(qla_host_t *ha)
3630 ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3632 if (!ha->hw.mdump_buffer_size)
3635 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3638 if (ha->hw.mdump_buffer == NULL)
3645 ql_free_minidump_template_buffer(qla_host_t *ha)
3647 if (ha->hw.mdump_template != NULL) {
3648 free(ha->hw.mdump_template, M_QLA83XXBUF);
3649 ha->hw.mdump_template = NULL;
3650 ha->hw.mdump_template_size = 0;
3656 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3658 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3660 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3661 M_QLA83XXBUF, M_NOWAIT);
3663 if (ha->hw.mdump_template == NULL)
3670 ql_alloc_minidump_buffers(qla_host_t *ha)
3674 ret = ql_alloc_minidump_template_buffer(ha);
3679 ret = ql_alloc_minidump_buffer(ha);
3682 ql_free_minidump_template_buffer(ha);
3689 ql_validate_minidump_checksum(qla_host_t *ha)
3693 uint32_t *template_buff;
3695 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3696 template_buff = ha->hw.dma_buf.minidump.dma_b;
3698 while (count-- > 0) {
3699 sum += *template_buff++;
3703 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3710 ql_minidump_init(qla_host_t *ha)
3713 uint32_t template_size = 0;
3714 device_t dev = ha->pci_dev;
3717 * Get Minidump Template Size
3719 ret = qla_get_minidump_tmplt_size(ha, &template_size);
3721 if (ret || (template_size == 0)) {
3722 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3728 * Allocate Memory for Minidump Template
3731 ha->hw.dma_buf.minidump.alignment = 8;
3732 ha->hw.dma_buf.minidump.size = template_size;
3734 #ifdef QL_LDFLASH_FW
3735 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3737 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3741 ha->hw.dma_buf.flags.minidump = 1;
3744 * Retrieve Minidump Template
3746 ret = ql_get_minidump_template(ha);
3748 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3750 #endif /* #ifdef QL_LDFLASH_FW */
3754 ret = ql_validate_minidump_checksum(ha);
3758 ret = ql_alloc_minidump_buffers(ha);
3761 ha->hw.mdump_init = 1;
3764 "%s: ql_alloc_minidump_buffers"
3765 " failed\n", __func__);
3767 device_printf(dev, "%s: ql_validate_minidump_checksum"
3768 " failed\n", __func__);
3771 device_printf(dev, "%s: ql_get_minidump_template failed\n",
3776 ql_minidump_free(ha);
3782 ql_minidump_free(qla_host_t *ha)
3784 ha->hw.mdump_init = 0;
3785 if (ha->hw.dma_buf.flags.minidump) {
3786 ha->hw.dma_buf.flags.minidump = 0;
3787 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3790 ql_free_minidump_template_buffer(ha);
3791 ql_free_minidump_buffer(ha);
3797 ql_minidump(qla_host_t *ha)
3799 if (!ha->hw.mdump_init)
3802 if (ha->hw.mdump_done)
3805 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3807 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
3808 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
3810 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
3811 ha->hw.mdump_template_size);
3813 ql_parse_template(ha);
3815 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3817 ha->hw.mdump_done = 1;
3827 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
3829 if (esize != entry->hdr.entry_capture_size) {
3830 entry->hdr.entry_capture_size = esize;
3831 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
3838 ql_parse_template(qla_host_t *ha)
3840 uint32_t num_of_entries, buff_level, e_cnt, esize;
3841 uint32_t end_cnt, rv = 0;
3842 char *dump_buff, *dbuff;
3843 int sane_start = 0, sane_end = 0;
3844 ql_minidump_template_hdr_t *template_hdr;
3845 ql_minidump_entry_t *entry;
3846 uint32_t capture_mask;
3849 /* Setup parameters */
3850 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
3852 if (template_hdr->entry_type == TLHDR)
3855 dump_buff = (char *) ha->hw.mdump_buffer;
3857 num_of_entries = template_hdr->num_of_entries;
3859 entry = (ql_minidump_entry_t *) ((char *)template_hdr
3860 + template_hdr->first_entry_offset );
3862 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
3863 template_hdr->ocm_window_array[ha->pci_func];
3864 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
3866 capture_mask = ha->hw.mdump_capture_mask;
3867 dump_size = ha->hw.mdump_buffer_size;
3869 template_hdr->driver_capture_mask = capture_mask;
3871 QL_DPRINT80(ha, (ha->pci_dev,
3872 "%s: sane_start = %d num_of_entries = %d "
3873 "capture_mask = 0x%x dump_size = %d \n",
3874 __func__, sane_start, num_of_entries, capture_mask, dump_size));
3876 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
3879 * If the capture_mask of the entry does not match capture mask
3880 * skip the entry after marking the driver_flags indicator.
3883 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
3885 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3886 entry = (ql_minidump_entry_t *) ((char *) entry
3887 + entry->hdr.entry_size);
3892 * This is ONLY needed in implementations where
3893 * the capture buffer allocated is too small to capture
3894 * all of the required entries for a given capture mask.
3895 * We need to empty the buffer contents to a file
3896 * if possible, before processing the next entry
3897 * If the buff_full_flag is set, no further capture will happen
3898 * and all remaining non-control entries will be skipped.
3900 if (entry->hdr.entry_capture_size != 0) {
3901 if ((buff_level + entry->hdr.entry_capture_size) >
3903 /* Try to recover by emptying buffer to file */
3904 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3905 entry = (ql_minidump_entry_t *) ((char *) entry
3906 + entry->hdr.entry_size);
3912 * Decode the entry type and process it accordingly
3915 switch (entry->hdr.entry_type) {
3920 if (sane_end == 0) {
3927 dbuff = dump_buff + buff_level;
3928 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
3929 ql_entry_err_chk(entry, esize);
3930 buff_level += esize;
3934 dbuff = dump_buff + buff_level;
3935 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
3936 ql_entry_err_chk(entry, esize);
3937 buff_level += esize;
3941 dbuff = dump_buff + buff_level;
3942 esize = ql_pollrd_modify_write(ha, (void *)entry,
3944 ql_entry_err_chk(entry, esize);
3945 buff_level += esize;
3952 dbuff = dump_buff + buff_level;
3953 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
3955 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3957 ql_entry_err_chk(entry, esize);
3958 buff_level += esize;
3964 dbuff = dump_buff + buff_level;
3965 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
3966 ql_entry_err_chk(entry, esize);
3967 buff_level += esize;
3971 dbuff = dump_buff + buff_level;
3972 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
3973 ql_entry_err_chk(entry, esize);
3974 buff_level += esize;
3978 dbuff = dump_buff + buff_level;
3979 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
3980 ql_entry_err_chk(entry, esize);
3981 buff_level += esize;
3986 dbuff = dump_buff + buff_level;
3987 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
3988 ql_entry_err_chk(entry, esize);
3989 buff_level += esize;
3993 dbuff = dump_buff + buff_level;
3994 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
3995 ql_entry_err_chk(entry, esize);
3996 buff_level += esize;
4000 dbuff = dump_buff + buff_level;
4001 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4002 ql_entry_err_chk(entry, esize);
4003 buff_level += esize;
4007 dbuff = dump_buff + buff_level;
4008 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4009 ql_entry_err_chk(entry, esize);
4010 buff_level += esize;
4014 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4015 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4019 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4022 /* next entry in the template */
4023 entry = (ql_minidump_entry_t *) ((char *) entry
4024 + entry->hdr.entry_size);
4027 if (!sane_start || (sane_end > 1)) {
4028 device_printf(ha->pci_dev,
4029 "\n%s: Template configuration error. Check Template\n",
4033 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4034 __func__, template_hdr->num_of_entries));
4040 * Read CRB operation.
4043 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4044 uint32_t * data_buff)
4048 uint32_t op_count, addr, stride, value = 0;
4050 addr = crb_entry->addr;
4051 op_count = crb_entry->op_count;
4052 stride = crb_entry->addr_stride;
4054 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4056 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4061 *data_buff++ = addr;
4062 *data_buff++ = value;
4063 addr = addr + stride;
4067 * for testing purpose we return amount of data written
4069 return (op_count * (2 * sizeof(uint32_t)));
4077 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4078 uint32_t * data_buff)
4084 uint32_t read_value;
4085 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4086 uint32_t tag_value, read_cnt;
4087 volatile uint8_t cntl_value_r;
4091 loop_cnt = cacheEntry->op_count;
4093 read_addr = cacheEntry->read_addr;
4094 cntrl_addr = cacheEntry->control_addr;
4095 cntl_value_w = (uint32_t) cacheEntry->write_value;
4097 tag_reg_addr = cacheEntry->tag_reg_addr;
4099 tag_value = cacheEntry->init_tag_value;
4100 read_cnt = cacheEntry->read_addr_cnt;
4102 for (i = 0; i < loop_cnt; i++) {
4104 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4108 if (cacheEntry->write_value != 0) {
4110 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4116 if (cacheEntry->poll_mask != 0) {
4118 timeout = cacheEntry->poll_wait;
4120 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4124 cntl_value_r = (uint8_t)data;
4126 while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4129 qla_mdelay(__func__, 1);
4134 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4139 cntl_value_r = (uint8_t)data;
4142 /* Report timeout error.
4143 * core dump capture failed
4144 * Skip remaining entries.
4145 * Write buffer out to file
4146 * Use driver specific fields in template header
4147 * to report this error.
4154 for (k = 0; k < read_cnt; k++) {
4156 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4160 *data_buff++ = read_value;
4161 addr += cacheEntry->read_addr_stride;
4164 tag_value += cacheEntry->tag_value_stride;
4167 return (read_cnt * loop_cnt * sizeof(uint32_t));
4175 ql_L1Cache(qla_host_t *ha,
4176 ql_minidump_entry_cache_t *cacheEntry,
4177 uint32_t *data_buff)
4183 uint32_t read_value;
4184 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4185 uint32_t tag_value, read_cnt;
4186 uint32_t cntl_value_w;
4188 loop_cnt = cacheEntry->op_count;
4190 read_addr = cacheEntry->read_addr;
4191 cntrl_addr = cacheEntry->control_addr;
4192 cntl_value_w = (uint32_t) cacheEntry->write_value;
4194 tag_reg_addr = cacheEntry->tag_reg_addr;
4196 tag_value = cacheEntry->init_tag_value;
4197 read_cnt = cacheEntry->read_addr_cnt;
4199 for (i = 0; i < loop_cnt; i++) {
4201 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4205 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4210 for (k = 0; k < read_cnt; k++) {
4212 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4216 *data_buff++ = read_value;
4217 addr += cacheEntry->read_addr_stride;
4220 tag_value += cacheEntry->tag_value_stride;
4223 return (read_cnt * loop_cnt * sizeof(uint32_t));
4227 * Reading OCM memory
4231 ql_rdocm(qla_host_t *ha,
4232 ql_minidump_entry_rdocm_t *ocmEntry,
4233 uint32_t *data_buff)
4236 volatile uint32_t addr;
4237 volatile uint32_t value;
4239 addr = ocmEntry->read_addr;
4240 loop_cnt = ocmEntry->op_count;
4242 for (i = 0; i < loop_cnt; i++) {
4243 value = READ_REG32(ha, addr);
4244 *data_buff++ = value;
4245 addr += ocmEntry->read_addr_stride;
4247 return (loop_cnt * sizeof(value));
4255 ql_rdmem(qla_host_t *ha,
4256 ql_minidump_entry_rdmem_t *mem_entry,
4257 uint32_t *data_buff)
4261 volatile uint32_t addr;
4262 q80_offchip_mem_val_t val;
4264 addr = mem_entry->read_addr;
4266 /* size in bytes / 16 */
4267 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4269 for (i = 0; i < loop_cnt; i++) {
4271 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4275 *data_buff++ = val.data_lo;
4276 *data_buff++ = val.data_hi;
4277 *data_buff++ = val.data_ulo;
4278 *data_buff++ = val.data_uhi;
4280 addr += (sizeof(uint32_t) * 4);
4283 return (loop_cnt * (sizeof(uint32_t) * 4));
4291 ql_rdrom(qla_host_t *ha,
4292 ql_minidump_entry_rdrom_t *romEntry,
4293 uint32_t *data_buff)
4300 addr = romEntry->read_addr;
4301 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4302 loop_cnt /= sizeof(value);
4304 for (i = 0; i < loop_cnt; i++) {
4306 ret = ql_rd_flash32(ha, addr, &value);
4310 *data_buff++ = value;
4311 addr += sizeof(value);
4314 return (loop_cnt * sizeof(value));
4322 ql_rdmux(qla_host_t *ha,
4323 ql_minidump_entry_mux_t *muxEntry,
4324 uint32_t *data_buff)
4328 uint32_t read_value, sel_value;
4329 uint32_t read_addr, select_addr;
4331 select_addr = muxEntry->select_addr;
4332 sel_value = muxEntry->select_value;
4333 read_addr = muxEntry->read_addr;
4335 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4337 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4341 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4345 *data_buff++ = sel_value;
4346 *data_buff++ = read_value;
4348 sel_value += muxEntry->select_value_stride;
4351 return (loop_cnt * (2 * sizeof(uint32_t)));
4355 ql_rdmux2(qla_host_t *ha,
4356 ql_minidump_entry_mux2_t *muxEntry,
4357 uint32_t *data_buff)
4362 uint32_t select_addr_1, select_addr_2;
4363 uint32_t select_value_1, select_value_2;
4364 uint32_t select_value_count, select_value_mask;
4365 uint32_t read_addr, read_value;
4367 select_addr_1 = muxEntry->select_addr_1;
4368 select_addr_2 = muxEntry->select_addr_2;
4369 select_value_1 = muxEntry->select_value_1;
4370 select_value_2 = muxEntry->select_value_2;
4371 select_value_count = muxEntry->select_value_count;
4372 select_value_mask = muxEntry->select_value_mask;
4374 read_addr = muxEntry->read_addr;
4376 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4379 uint32_t temp_sel_val;
4381 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4385 temp_sel_val = select_value_1 & select_value_mask;
4387 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4391 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4395 *data_buff++ = temp_sel_val;
4396 *data_buff++ = read_value;
4398 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4402 temp_sel_val = select_value_2 & select_value_mask;
4404 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4408 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4412 *data_buff++ = temp_sel_val;
4413 *data_buff++ = read_value;
4415 select_value_1 += muxEntry->select_value_stride;
4416 select_value_2 += muxEntry->select_value_stride;
4419 return (loop_cnt * (4 * sizeof(uint32_t)));
4423 * Handling Queue State Reads.
4427 ql_rdqueue(qla_host_t *ha,
4428 ql_minidump_entry_queue_t *queueEntry,
4429 uint32_t *data_buff)
4433 uint32_t read_value;
4434 uint32_t read_addr, read_stride, select_addr;
4435 uint32_t queue_id, read_cnt;
4437 read_cnt = queueEntry->read_addr_cnt;
4438 read_stride = queueEntry->read_addr_stride;
4439 select_addr = queueEntry->select_addr;
4441 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4444 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4448 read_addr = queueEntry->read_addr;
4450 for (k = 0; k < read_cnt; k++) {
4452 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4456 *data_buff++ = read_value;
4457 read_addr += read_stride;
4460 queue_id += queueEntry->queue_id_stride;
4463 return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4467 * Handling control entries.
4471 ql_cntrl(qla_host_t *ha,
4472 ql_minidump_template_hdr_t *template_hdr,
4473 ql_minidump_entry_cntrl_t *crbEntry)
4477 uint32_t opcode, read_value, addr, entry_addr;
4480 entry_addr = crbEntry->addr;
4482 for (count = 0; count < crbEntry->op_count; count++) {
4483 opcode = crbEntry->opcode;
4485 if (opcode & QL_DBG_OPCODE_WR) {
4487 ret = ql_rdwr_indreg32(ha, entry_addr,
4488 &crbEntry->value_1, 0);
4492 opcode &= ~QL_DBG_OPCODE_WR;
4495 if (opcode & QL_DBG_OPCODE_RW) {
4497 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4501 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4505 opcode &= ~QL_DBG_OPCODE_RW;
4508 if (opcode & QL_DBG_OPCODE_AND) {
4510 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4514 read_value &= crbEntry->value_2;
4515 opcode &= ~QL_DBG_OPCODE_AND;
4517 if (opcode & QL_DBG_OPCODE_OR) {
4518 read_value |= crbEntry->value_3;
4519 opcode &= ~QL_DBG_OPCODE_OR;
4522 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4527 if (opcode & QL_DBG_OPCODE_OR) {
4529 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4533 read_value |= crbEntry->value_3;
4535 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4539 opcode &= ~QL_DBG_OPCODE_OR;
4542 if (opcode & QL_DBG_OPCODE_POLL) {
4544 opcode &= ~QL_DBG_OPCODE_POLL;
4545 timeout = crbEntry->poll_timeout;
4548 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4552 while ((read_value & crbEntry->value_2)
4553 != crbEntry->value_1) {
4556 qla_mdelay(__func__, 1);
4561 ret = ql_rdwr_indreg32(ha, addr,
4569 * Report timeout error.
4570 * core dump capture failed
4571 * Skip remaining entries.
4572 * Write buffer out to file
4573 * Use driver specific fields in template header
4574 * to report this error.
4580 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4582 * decide which address to use.
4584 if (crbEntry->state_index_a) {
4585 addr = template_hdr->saved_state_array[
4586 crbEntry-> state_index_a];
4591 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4595 template_hdr->saved_state_array[crbEntry->state_index_v]
4597 opcode &= ~QL_DBG_OPCODE_RDSTATE;
4600 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4602 * decide which value to use.
4604 if (crbEntry->state_index_v) {
4605 read_value = template_hdr->saved_state_array[
4606 crbEntry->state_index_v];
4608 read_value = crbEntry->value_1;
4611 * decide which address to use.
4613 if (crbEntry->state_index_a) {
4614 addr = template_hdr->saved_state_array[
4615 crbEntry-> state_index_a];
4620 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4624 opcode &= ~QL_DBG_OPCODE_WRSTATE;
4627 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4628 /* Read value from saved state using index */
4629 read_value = template_hdr->saved_state_array[
4630 crbEntry->state_index_v];
4632 read_value <<= crbEntry->shl; /*Shift left operation */
4633 read_value >>= crbEntry->shr; /*Shift right operation */
4635 if (crbEntry->value_2) {
4636 /* check if AND mask is provided */
4637 read_value &= crbEntry->value_2;
4640 read_value |= crbEntry->value_3; /* OR operation */
4641 read_value += crbEntry->value_1; /* increment op */
4643 /* Write value back to state area. */
4645 template_hdr->saved_state_array[crbEntry->state_index_v]
4647 opcode &= ~QL_DBG_OPCODE_MDSTATE;
4650 entry_addr += crbEntry->addr_stride;
4657 * Handling rd poll entry.
4661 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4662 uint32_t *data_buff)
4666 uint32_t op_count, select_addr, select_value_stride, select_value;
4667 uint32_t read_addr, poll, mask, data_size, data;
4668 uint32_t wait_count = 0;
4670 select_addr = entry->select_addr;
4671 read_addr = entry->read_addr;
4672 select_value = entry->select_value;
4673 select_value_stride = entry->select_value_stride;
4674 op_count = entry->op_count;
4677 data_size = entry->data_size;
4679 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4681 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4687 while (wait_count < poll) {
4691 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4695 if ( (temp & mask) != 0 ) {
4701 if (wait_count == poll) {
4702 device_printf(ha->pci_dev,
4703 "%s: Error in processing entry\n", __func__);
4704 device_printf(ha->pci_dev,
4705 "%s: wait_count <0x%x> poll <0x%x>\n",
4706 __func__, wait_count, poll);
4710 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4714 *data_buff++ = select_value;
4715 *data_buff++ = data;
4716 select_value = select_value + select_value_stride;
4720 * for testing purpose we return amount of data written
4722 return (loop_cnt * (2 * sizeof(uint32_t)));
4727 * Handling rd modify write poll entry.
4731 ql_pollrd_modify_write(qla_host_t *ha,
4732 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4733 uint32_t *data_buff)
4736 uint32_t addr_1, addr_2, value_1, value_2, data;
4737 uint32_t poll, mask, data_size, modify_mask;
4738 uint32_t wait_count = 0;
4740 addr_1 = entry->addr_1;
4741 addr_2 = entry->addr_2;
4742 value_1 = entry->value_1;
4743 value_2 = entry->value_2;
4747 modify_mask = entry->modify_mask;
4748 data_size = entry->data_size;
4751 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
4756 while (wait_count < poll) {
4760 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4764 if ( (temp & mask) != 0 ) {
4770 if (wait_count == poll) {
4771 device_printf(ha->pci_dev, "%s Error in processing entry\n",
4775 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
4779 data = (data & modify_mask);
4781 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
4785 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
4791 while (wait_count < poll) {
4795 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4799 if ( (temp & mask) != 0 ) {
4804 *data_buff++ = addr_2;
4805 *data_buff++ = data;
4809 * for testing purpose we return amount of data written
4811 return (2 * sizeof(uint32_t));