2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
44 #include "ql_minidump.h"
50 static void qla_del_rcv_cntxt(qla_host_t *ha);
51 static int qla_init_rcv_cntxt(qla_host_t *ha);
52 static void qla_del_xmt_cntxt(qla_host_t *ha);
53 static int qla_init_xmt_cntxt(qla_host_t *ha);
54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57 uint32_t num_intrs, uint32_t create);
58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60 int tenable, int rcv);
61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66 static int qla_hw_add_all_mcast(qla_host_t *ha);
67 static int qla_hw_del_all_mcast(qla_host_t *ha);
68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70 static int qla_init_nic_func(qla_host_t *ha);
71 static int qla_stop_nic_func(qla_host_t *ha);
72 static int qla_query_fw_dcbx_caps(qla_host_t *ha);
73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75 static void qla_get_quick_stats(qla_host_t *ha);
76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
77 static int qla_get_cam_search_mode(qla_host_t *ha);
79 static void ql_minidump_free(qla_host_t *ha);
83 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
89 err = sysctl_handle_int(oidp, &ret, 0, req);
91 if (err || !req->newptr)
96 ha = (qla_host_t *)arg1;
98 for (i = 0; i < ha->hw.num_sds_rings; i++) {
100 device_printf(ha->pci_dev,
101 "%s: sds_ring[%d] = %p\n", __func__,i,
102 (void *)ha->hw.sds[i].intr_count);
104 device_printf(ha->pci_dev,
105 "%s: sds_ring[%d].spurious_intr_count = %p\n",
107 i, (void *)ha->hw.sds[i].spurious_intr_count);
109 device_printf(ha->pci_dev,
110 "%s: sds_ring[%d].rx_free = %d\n", __func__,i,
111 ha->hw.sds[i].rx_free);
114 for (i = 0; i < ha->hw.num_tx_rings; i++)
115 device_printf(ha->pci_dev,
116 "%s: tx[%d] = %p\n", __func__,i,
117 (void *)ha->tx_ring[i].count);
119 for (i = 0; i < ha->hw.num_rds_rings; i++)
120 device_printf(ha->pci_dev,
121 "%s: rds_ring[%d] = %p\n", __func__,i,
122 (void *)ha->hw.rds[i].count);
124 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
125 (void *)ha->lro_pkt_count);
127 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
128 (void *)ha->lro_bytes);
130 #ifdef QL_ENABLE_ISCSI_TLV
131 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
132 (void *)ha->hw.iscsi_pkt_count);
133 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
140 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
145 err = sysctl_handle_int(oidp, &ret, 0, req);
147 if (err || !req->newptr)
151 ha = (qla_host_t *)arg1;
152 qla_get_quick_stats(ha);
160 qla_stop_pegs(qla_host_t *ha)
164 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
165 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
166 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
167 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
168 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
169 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
173 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
178 err = sysctl_handle_int(oidp, &ret, 0, req);
181 if (err || !req->newptr)
185 ha = (qla_host_t *)arg1;
193 #endif /* #ifdef QL_DBG */
196 qla_validate_set_port_cfg_bit(uint32_t bits)
198 if ((bits & 0xF) > 1)
201 if (((bits >> 4) & 0xF) > 2)
204 if (((bits >> 8) & 0xF) > 2)
211 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
217 err = sysctl_handle_int(oidp, &ret, 0, req);
219 if (err || !req->newptr)
222 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
224 ha = (qla_host_t *)arg1;
226 err = qla_get_port_config(ha, &cfg_bits);
229 goto qla_sysctl_set_port_cfg_exit;
232 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
234 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
238 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
240 if ((ret & 0xF) == 0) {
241 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
242 } else if ((ret & 0xF) == 1){
243 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
245 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
249 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
252 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
253 } else if (ret == 1){
254 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
256 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
259 err = qla_set_port_config(ha, cfg_bits);
261 ha = (qla_host_t *)arg1;
263 err = qla_get_port_config(ha, &cfg_bits);
266 qla_sysctl_set_port_cfg_exit:
271 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
276 err = sysctl_handle_int(oidp, &ret, 0, req);
278 if (err || !req->newptr)
281 ha = (qla_host_t *)arg1;
283 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
284 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
285 err = qla_set_cam_search_mode(ha, (uint32_t)ret);
287 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
294 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
299 err = sysctl_handle_int(oidp, &ret, 0, req);
301 if (err || !req->newptr)
304 ha = (qla_host_t *)arg1;
305 err = qla_get_cam_search_mode(ha);
312 * Name: ql_hw_add_sysctls
313 * Function: Add P3Plus specific sysctls
316 ql_hw_add_sysctls(qla_host_t *ha)
322 ha->hw.num_sds_rings = MAX_SDS_RINGS;
323 ha->hw.num_rds_rings = MAX_RDS_RINGS;
324 ha->hw.num_tx_rings = NUM_TX_RINGS;
326 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
329 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
331 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
334 ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
336 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
337 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
338 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
339 ha->hw.num_tx_rings, "Number of Transmit Rings");
341 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
344 ha->txr_idx, "Tx Ring Used");
346 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
347 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
348 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
350 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
352 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
356 qla_sysctl_get_quick_stats, "I", "Quick Statistics");
358 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
359 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
360 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
361 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
363 ha->hw.sds_cidx_thres = 32;
364 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
365 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
366 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
367 ha->hw.sds_cidx_thres,
368 "Number of SDS entries to process before updating"
369 " SDS Ring Consumer Index");
371 ha->hw.rds_pidx_thres = 32;
372 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
375 ha->hw.rds_pidx_thres,
376 "Number of Rcv Rings Entries to post before updating"
377 " RDS Ring Producer Index");
379 ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
380 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
383 &ha->hw.rcv_intr_coalesce,
384 ha->hw.rcv_intr_coalesce,
385 "Rcv Intr Coalescing Parameters\n"
386 "\tbits 15:0 max packets\n"
387 "\tbits 31:16 max micro-seconds to wait\n"
389 "\tifconfig <if> down && ifconfig <if> up\n"
390 "\tto take effect \n");
392 ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
393 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
394 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
395 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
396 &ha->hw.xmt_intr_coalesce,
397 ha->hw.xmt_intr_coalesce,
398 "Xmt Intr Coalescing Parameters\n"
399 "\tbits 15:0 max packets\n"
400 "\tbits 31:16 max micro-seconds to wait\n"
402 "\tifconfig <if> down && ifconfig <if> up\n"
403 "\tto take effect \n");
405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
409 qla_sysctl_port_cfg, "I",
410 "Set Port Configuration if values below "
411 "otherwise Get Port Configuration\n"
412 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
413 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
414 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
415 " 1 = xmt only; 2 = rcv only;\n"
418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
422 qla_sysctl_set_cam_search_mode, "I",
423 "Set CAM Search Mode"
424 "\t 1 = search mode internal\n"
425 "\t 2 = search mode auto\n");
427 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
431 qla_sysctl_get_cam_search_mode, "I",
432 "Get CAM Search Mode"
433 "\t 1 = search mode internal\n"
434 "\t 2 = search mode auto\n");
436 ha->hw.enable_9kb = 1;
438 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
439 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
440 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
441 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
443 ha->hw.enable_hw_lro = 1;
445 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
448 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
449 "\t 1 : Hardware LRO if LRO is enabled\n"
450 "\t 0 : Software LRO if LRO is enabled\n"
451 "\t Any change requires ifconfig down/up to take effect\n"
452 "\t Note that LRO may be turned off/on via ifconfig\n");
454 ha->hw.mdump_active = 0;
455 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
459 "Minidump retrieval is Active");
461 ha->hw.mdump_done = 0;
462 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
463 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464 OID_AUTO, "mdump_done", CTLFLAG_RW,
465 &ha->hw.mdump_done, ha->hw.mdump_done,
466 "Minidump has been done and available for retrieval");
468 ha->hw.mdump_capture_mask = 0xF;
469 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
470 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
472 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
473 "Minidump capture mask");
477 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479 OID_AUTO, "err_inject",
480 CTLFLAG_RW, &ha->err_inject, ha->err_inject,
481 "Error to be injected\n"
482 "\t\t\t 0: No Errors\n"
483 "\t\t\t 1: rcv: rxb struct invalid\n"
484 "\t\t\t 2: rcv: mp == NULL\n"
485 "\t\t\t 3: lro: rxb struct invalid\n"
486 "\t\t\t 4: lro: mp == NULL\n"
487 "\t\t\t 5: rcv: num handles invalid\n"
488 "\t\t\t 6: reg: indirect reg rd_wr failure\n"
489 "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
490 "\t\t\t 8: mbx: mailbox command failure\n"
491 "\t\t\t 9: heartbeat failure\n"
492 "\t\t\t A: temperature failure\n"
493 "\t\t\t 11: m_getcl or m_getjcl failure\n" );
495 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
497 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
499 qla_sysctl_stop_pegs, "I", "Peg Stop");
501 #endif /* #ifdef QL_DBG */
503 ha->hw.user_pri_nic = 0;
504 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
505 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
508 "VLAN Tag User Priority for Normal Ethernet Packets");
510 ha->hw.user_pri_iscsi = 4;
511 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
512 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
514 ha->hw.user_pri_iscsi,
515 "VLAN Tag User Priority for iSCSI Packets");
520 ql_hw_link_status(qla_host_t *ha)
522 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
524 if (ha->hw.link_up) {
525 device_printf(ha->pci_dev, "link Up\n");
527 device_printf(ha->pci_dev, "link Down\n");
530 if (ha->hw.flags.fduplex) {
531 device_printf(ha->pci_dev, "Full Duplex\n");
533 device_printf(ha->pci_dev, "Half Duplex\n");
536 if (ha->hw.flags.autoneg) {
537 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
539 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
542 switch (ha->hw.link_speed) {
544 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
548 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
552 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
556 device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
560 switch (ha->hw.module_type) {
563 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
567 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
571 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
575 device_printf(ha->pci_dev,
576 "Module Type 10GE Passive Copper(Compliant)[%d m]\n",
577 ha->hw.cable_length);
581 device_printf(ha->pci_dev, "Module Type 10GE Active"
582 " Limiting Copper(Compliant)[%d m]\n",
583 ha->hw.cable_length);
587 device_printf(ha->pci_dev,
588 "Module Type 10GE Passive Copper"
589 " (Legacy, Best Effort)[%d m]\n",
590 ha->hw.cable_length);
594 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
598 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
602 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
606 device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
610 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
611 "(Legacy, Best Effort)\n");
615 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
620 if (ha->hw.link_faults == 1)
621 device_printf(ha->pci_dev, "SFP Power Fault\n");
626 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
629 ql_free_dma(qla_host_t *ha)
633 if (ha->hw.dma_buf.flags.sds_ring) {
634 for (i = 0; i < ha->hw.num_sds_rings; i++) {
635 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
637 ha->hw.dma_buf.flags.sds_ring = 0;
640 if (ha->hw.dma_buf.flags.rds_ring) {
641 for (i = 0; i < ha->hw.num_rds_rings; i++) {
642 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
644 ha->hw.dma_buf.flags.rds_ring = 0;
647 if (ha->hw.dma_buf.flags.tx_ring) {
648 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
649 ha->hw.dma_buf.flags.tx_ring = 0;
651 ql_minidump_free(ha);
656 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
659 ql_alloc_dma(qla_host_t *ha)
662 uint32_t i, j, size, tx_ring_size;
664 qla_hw_tx_cntxt_t *tx_cntxt;
670 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
674 * Allocate Transmit Ring
676 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
677 size = (tx_ring_size * ha->hw.num_tx_rings);
679 hw->dma_buf.tx_ring.alignment = 8;
680 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
682 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
683 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
684 goto ql_alloc_dma_exit;
687 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
688 paddr = hw->dma_buf.tx_ring.dma_addr;
690 for (i = 0; i < ha->hw.num_tx_rings; i++) {
691 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
693 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
694 tx_cntxt->tx_ring_paddr = paddr;
696 vaddr += tx_ring_size;
697 paddr += tx_ring_size;
700 for (i = 0; i < ha->hw.num_tx_rings; i++) {
701 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
703 tx_cntxt->tx_cons = (uint32_t *)vaddr;
704 tx_cntxt->tx_cons_paddr = paddr;
706 vaddr += sizeof (uint32_t);
707 paddr += sizeof (uint32_t);
710 ha->hw.dma_buf.flags.tx_ring = 1;
712 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
713 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
714 hw->dma_buf.tx_ring.dma_b));
716 * Allocate Receive Descriptor Rings
719 for (i = 0; i < hw->num_rds_rings; i++) {
721 hw->dma_buf.rds_ring[i].alignment = 8;
722 hw->dma_buf.rds_ring[i].size =
723 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
725 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
726 device_printf(dev, "%s: rds ring[%d] alloc failed\n",
729 for (j = 0; j < i; j++)
730 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
732 goto ql_alloc_dma_exit;
734 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
735 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
736 hw->dma_buf.rds_ring[i].dma_b));
739 hw->dma_buf.flags.rds_ring = 1;
742 * Allocate Status Descriptor Rings
745 for (i = 0; i < hw->num_sds_rings; i++) {
746 hw->dma_buf.sds_ring[i].alignment = 8;
747 hw->dma_buf.sds_ring[i].size =
748 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
750 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
751 device_printf(dev, "%s: sds ring alloc failed\n",
754 for (j = 0; j < i; j++)
755 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
757 goto ql_alloc_dma_exit;
759 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
761 (void *)(hw->dma_buf.sds_ring[i].dma_addr),
762 hw->dma_buf.sds_ring[i].dma_b));
764 for (i = 0; i < hw->num_sds_rings; i++) {
765 hw->sds[i].sds_ring_base =
766 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
769 hw->dma_buf.flags.sds_ring = 1;
778 #define Q8_MBX_MSEC_DELAY 5000
781 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
782 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
788 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
790 ha->qla_initiate_recovery = 1;
791 goto exit_qla_mbx_cmd;
797 i = Q8_MBX_MSEC_DELAY;
800 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
806 qla_mdelay(__func__, 1);
812 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
815 ha->qla_initiate_recovery = 1;
816 goto exit_qla_mbx_cmd;
819 for (i = 0; i < n_hmbox; i++) {
820 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
824 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
827 i = Q8_MBX_MSEC_DELAY;
829 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
831 if ((data & 0x3) == 1) {
832 data = READ_REG32(ha, Q8_FW_MBOX0);
833 if ((data & 0xF000) != 0x8000)
839 qla_mdelay(__func__, 1);
844 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
847 ha->qla_initiate_recovery = 1;
848 goto exit_qla_mbx_cmd;
851 for (i = 0; i < n_fwmbox; i++) {
852 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
855 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
856 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
863 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
867 device_t dev = ha->pci_dev;
869 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
873 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
875 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
876 device_printf(dev, "%s: failed0\n", __func__);
881 if (supports_9kb != NULL) {
882 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
888 if (num_rcvq != NULL)
889 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF);
891 if ((err != 1) && (err != 0)) {
892 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
899 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
903 device_t dev = ha->pci_dev;
904 q80_config_intr_t *c_intr;
905 q80_config_intr_rsp_t *c_intr_rsp;
907 c_intr = (q80_config_intr_t *)ha->hw.mbox;
908 bzero(c_intr, (sizeof (q80_config_intr_t)));
910 c_intr->opcode = Q8_MBX_CONFIG_INTR;
912 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
913 c_intr->count_version |= Q8_MBX_CMD_VERSION;
915 c_intr->nentries = num_intrs;
917 for (i = 0; i < num_intrs; i++) {
919 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
920 c_intr->intr[i].msix_index = start_idx + 1 + i;
922 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
923 c_intr->intr[i].msix_index =
924 ha->hw.intr_id[(start_idx + i)];
927 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
930 if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
931 (sizeof (q80_config_intr_t) >> 2),
932 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
933 device_printf(dev, "%s: failed0\n", __func__);
937 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
939 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
942 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
943 c_intr_rsp->nentries);
945 for (i = 0; i < c_intr_rsp->nentries; i++) {
946 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
948 c_intr_rsp->intr[i].status,
949 c_intr_rsp->intr[i].intr_id,
950 c_intr_rsp->intr[i].intr_src);
956 for (i = 0; ((i < num_intrs) && create); i++) {
957 if (!c_intr_rsp->intr[i].status) {
958 ha->hw.intr_id[(start_idx + i)] =
959 c_intr_rsp->intr[i].intr_id;
960 ha->hw.intr_src[(start_idx + i)] =
961 c_intr_rsp->intr[i].intr_src;
969 * Name: qla_config_rss
970 * Function: Configure RSS for the context/interface.
972 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
973 0x8030f20c77cb2da3ULL,
974 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
975 0x255b0ec26d5a56daULL };
978 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
980 q80_config_rss_t *c_rss;
981 q80_config_rss_rsp_t *c_rss_rsp;
983 device_t dev = ha->pci_dev;
985 c_rss = (q80_config_rss_t *)ha->hw.mbox;
986 bzero(c_rss, (sizeof (q80_config_rss_t)));
988 c_rss->opcode = Q8_MBX_CONFIG_RSS;
990 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
991 c_rss->count_version |= Q8_MBX_CMD_VERSION;
993 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
994 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
995 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
996 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
998 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
999 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1001 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1003 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1004 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1006 c_rss->cntxt_id = cntxt_id;
1008 for (i = 0; i < 5; i++) {
1009 c_rss->rss_key[i] = rss_key[i];
1012 if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1013 (sizeof (q80_config_rss_t) >> 2),
1014 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1015 device_printf(dev, "%s: failed0\n", __func__);
1018 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1020 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1023 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1030 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1031 uint16_t cntxt_id, uint8_t *ind_table)
1033 q80_config_rss_ind_table_t *c_rss_ind;
1034 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp;
1036 device_t dev = ha->pci_dev;
1038 if ((count > Q8_RSS_IND_TBL_SIZE) ||
1039 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1040 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1045 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1046 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1048 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1049 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1050 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1052 c_rss_ind->start_idx = start_idx;
1053 c_rss_ind->end_idx = start_idx + count - 1;
1054 c_rss_ind->cntxt_id = cntxt_id;
1055 bcopy(ind_table, c_rss_ind->ind_table, count);
1057 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1058 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1059 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1060 device_printf(dev, "%s: failed0\n", __func__);
1064 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1065 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1068 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1075 * Name: qla_config_intr_coalesce
1076 * Function: Configure Interrupt Coalescing.
1079 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1082 q80_config_intr_coalesc_t *intrc;
1083 q80_config_intr_coalesc_rsp_t *intrc_rsp;
1085 device_t dev = ha->pci_dev;
1087 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1088 bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1090 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1091 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1092 intrc->count_version |= Q8_MBX_CMD_VERSION;
1095 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1096 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1097 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1099 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1100 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1101 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1104 intrc->cntxt_id = cntxt_id;
1107 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1108 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1110 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1111 intrc->sds_ring_mask |= (1 << i);
1113 intrc->ms_timeout = 1000;
1116 if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1117 (sizeof (q80_config_intr_coalesc_t) >> 2),
1118 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1119 device_printf(dev, "%s: failed0\n", __func__);
1122 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1124 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1127 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1136 * Name: qla_config_mac_addr
1137 * Function: binds a MAC address to the context/interface.
1138 * Can be unicast, multicast or broadcast.
1141 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1144 q80_config_mac_addr_t *cmac;
1145 q80_config_mac_addr_rsp_t *cmac_rsp;
1147 device_t dev = ha->pci_dev;
1149 uint8_t *mac_cpy = mac_addr;
1151 if (num_mac > Q8_MAX_MAC_ADDRS) {
1152 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1153 __func__, (add_mac ? "Add" : "Del"), num_mac);
1157 cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1158 bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1160 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1161 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1162 cmac->count_version |= Q8_MBX_CMD_VERSION;
1165 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1167 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1169 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1171 cmac->nmac_entries = num_mac;
1172 cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1174 for (i = 0; i < num_mac; i++) {
1175 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1176 mac_addr = mac_addr + ETHER_ADDR_LEN;
1179 if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1180 (sizeof (q80_config_mac_addr_t) >> 2),
1181 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1182 device_printf(dev, "%s: %s failed0\n", __func__,
1183 (add_mac ? "Add" : "Del"));
1186 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1188 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1191 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1192 (add_mac ? "Add" : "Del"), err);
1193 for (i = 0; i < num_mac; i++) {
1194 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1195 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1196 mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1197 mac_cpy += ETHER_ADDR_LEN;
1207 * Name: qla_set_mac_rcv_mode
1208 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1211 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1213 q80_config_mac_rcv_mode_t *rcv_mode;
1215 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp;
1216 device_t dev = ha->pci_dev;
1218 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1219 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1221 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1222 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1223 rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1225 rcv_mode->mode = mode;
1227 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1229 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1230 (sizeof (q80_config_mac_rcv_mode_t) >> 2),
1231 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1232 device_printf(dev, "%s: failed0\n", __func__);
1235 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1237 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1240 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1248 ql_set_promisc(qla_host_t *ha)
1252 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1253 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1258 qla_reset_promisc(qla_host_t *ha)
1260 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1261 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1265 ql_set_allmulti(qla_host_t *ha)
1269 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1270 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1275 qla_reset_allmulti(qla_host_t *ha)
1277 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1278 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1282 * Name: ql_set_max_mtu
1284 * Sets the maximum transfer unit size for the specified rcv context.
1287 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1290 q80_set_max_mtu_t *max_mtu;
1291 q80_set_max_mtu_rsp_t *max_mtu_rsp;
1296 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1297 bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1299 max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1300 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1301 max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1303 max_mtu->cntxt_id = cntxt_id;
1306 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1307 (sizeof (q80_set_max_mtu_t) >> 2),
1308 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1309 device_printf(dev, "%s: failed\n", __func__);
1313 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1315 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1318 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1325 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1328 q80_link_event_t *lnk;
1329 q80_link_event_rsp_t *lnk_rsp;
1334 lnk = (q80_link_event_t *)ha->hw.mbox;
1335 bzero(lnk, (sizeof (q80_link_event_t)));
1337 lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1338 lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1339 lnk->count_version |= Q8_MBX_CMD_VERSION;
1341 lnk->cntxt_id = cntxt_id;
1342 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1344 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1345 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1346 device_printf(dev, "%s: failed\n", __func__);
1350 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1352 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1355 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1362 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1365 q80_config_fw_lro_t *fw_lro;
1366 q80_config_fw_lro_rsp_t *fw_lro_rsp;
1371 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1372 bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1374 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1375 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1376 fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1378 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1379 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1381 fw_lro->cntxt_id = cntxt_id;
1383 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1384 (sizeof (q80_config_fw_lro_t) >> 2),
1385 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1386 device_printf(dev, "%s: failed\n", __func__);
1390 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1392 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1395 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1402 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1405 q80_hw_config_t *hw_config;
1406 q80_hw_config_rsp_t *hw_config_rsp;
1411 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1412 bzero(hw_config, sizeof (q80_hw_config_t));
1414 hw_config->opcode = Q8_MBX_HW_CONFIG;
1415 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1416 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1418 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1420 hw_config->u.set_cam_search_mode.mode = search_mode;
1422 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1423 (sizeof (q80_hw_config_t) >> 2),
1424 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1425 device_printf(dev, "%s: failed\n", __func__);
1428 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1430 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1433 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1440 qla_get_cam_search_mode(qla_host_t *ha)
1443 q80_hw_config_t *hw_config;
1444 q80_hw_config_rsp_t *hw_config_rsp;
1449 hw_config = (q80_hw_config_t *)ha->hw.mbox;
1450 bzero(hw_config, sizeof (q80_hw_config_t));
1452 hw_config->opcode = Q8_MBX_HW_CONFIG;
1453 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1454 hw_config->count_version |= Q8_MBX_CMD_VERSION;
1456 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1458 if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1459 (sizeof (q80_hw_config_t) >> 2),
1460 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1461 device_printf(dev, "%s: failed\n", __func__);
1464 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1466 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1469 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1471 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1472 hw_config_rsp->u.get_cam_search_mode.mode);
1481 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1483 device_t dev = ha->pci_dev;
1485 if (i < ha->hw.num_tx_rings) {
1486 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1487 __func__, i, xstat->total_bytes);
1488 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1489 __func__, i, xstat->total_pkts);
1490 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1491 __func__, i, xstat->errors);
1492 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1493 __func__, i, xstat->pkts_dropped);
1494 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1495 __func__, i, xstat->switch_pkts);
1496 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1497 __func__, i, xstat->num_buffers);
1499 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1500 __func__, xstat->total_bytes);
1501 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1502 __func__, xstat->total_pkts);
1503 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1504 __func__, xstat->errors);
1505 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1506 __func__, xstat->pkts_dropped);
1507 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1508 __func__, xstat->switch_pkts);
1509 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1510 __func__, xstat->num_buffers);
1515 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1517 device_t dev = ha->pci_dev;
1519 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1520 rstat->total_bytes);
1521 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1523 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1524 rstat->lro_pkt_count);
1525 device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1526 rstat->sw_pkt_count);
1527 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1528 rstat->ip_chksum_err);
1529 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1530 rstat->pkts_wo_acntxts);
1531 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1532 __func__, rstat->pkts_dropped_no_sds_card);
1533 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1534 __func__, rstat->pkts_dropped_no_sds_host);
1535 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1536 rstat->oversized_pkts);
1537 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1538 __func__, rstat->pkts_dropped_no_rds);
1539 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1540 __func__, rstat->unxpctd_mcast_pkts);
1541 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1542 rstat->re1_fbq_error);
1543 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1544 rstat->invalid_mac_addr);
1545 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1546 rstat->rds_prime_trys);
1547 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1548 rstat->rds_prime_success);
1549 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1550 rstat->lro_flows_added);
1551 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1552 rstat->lro_flows_deleted);
1553 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1554 rstat->lro_flows_active);
1555 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1556 __func__, rstat->pkts_droped_unknown);
1557 device_printf(dev, "%s: pkts_cnt_oversized\t\t%" PRIu64 "\n",
1558 __func__, rstat->pkts_cnt_oversized);
1562 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1564 device_t dev = ha->pci_dev;
1566 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1568 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1570 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1571 mstat->xmt_mcast_pkts);
1572 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1573 mstat->xmt_bcast_pkts);
1574 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1575 mstat->xmt_pause_frames);
1576 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1577 mstat->xmt_cntrl_pkts);
1578 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1579 __func__, mstat->xmt_pkt_lt_64bytes);
1580 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1581 __func__, mstat->xmt_pkt_lt_127bytes);
1582 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1583 __func__, mstat->xmt_pkt_lt_255bytes);
1584 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1585 __func__, mstat->xmt_pkt_lt_511bytes);
1586 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1587 __func__, mstat->xmt_pkt_lt_1023bytes);
1588 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1589 __func__, mstat->xmt_pkt_lt_1518bytes);
1590 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1591 __func__, mstat->xmt_pkt_gt_1518bytes);
1593 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1595 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1597 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1598 mstat->rcv_mcast_pkts);
1599 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1600 mstat->rcv_bcast_pkts);
1601 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1602 mstat->rcv_pause_frames);
1603 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1604 mstat->rcv_cntrl_pkts);
1605 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1606 __func__, mstat->rcv_pkt_lt_64bytes);
1607 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1608 __func__, mstat->rcv_pkt_lt_127bytes);
1609 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1610 __func__, mstat->rcv_pkt_lt_255bytes);
1611 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1612 __func__, mstat->rcv_pkt_lt_511bytes);
1613 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1614 __func__, mstat->rcv_pkt_lt_1023bytes);
1615 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1616 __func__, mstat->rcv_pkt_lt_1518bytes);
1617 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1618 __func__, mstat->rcv_pkt_gt_1518bytes);
1620 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1621 mstat->rcv_len_error);
1622 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1623 mstat->rcv_len_small);
1624 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1625 mstat->rcv_len_large);
1626 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1628 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1629 mstat->rcv_dropped);
1630 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1632 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1633 mstat->align_error);
1638 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1641 q80_get_stats_t *stat;
1642 q80_get_stats_rsp_t *stat_rsp;
1647 stat = (q80_get_stats_t *)ha->hw.mbox;
1648 bzero(stat, (sizeof (q80_get_stats_t)));
1650 stat->opcode = Q8_MBX_GET_STATS;
1651 stat->count_version = 2;
1652 stat->count_version |= Q8_MBX_CMD_VERSION;
1656 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1657 ha->hw.mbox, (rsp_size >> 2), 0)) {
1658 device_printf(dev, "%s: failed\n", __func__);
1662 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1664 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1674 ql_get_stats(qla_host_t *ha)
1676 q80_get_stats_rsp_t *stat_rsp;
1677 q80_mac_stats_t *mstat;
1678 q80_xmt_stats_t *xstat;
1679 q80_rcv_stats_t *rstat;
1683 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1685 * Get MAC Statistics
1687 cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1688 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1690 cmd |= ((ha->pci_func & 0x1) << 16);
1692 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1693 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1694 qla_mac_stats(ha, mstat);
1696 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1697 __func__, ha->hw.mbox[0]);
1700 * Get RCV Statistics
1702 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1703 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1704 cmd |= (ha->hw.rcv_cntxt_id << 16);
1706 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1707 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1708 qla_rcv_stats(ha, rstat);
1710 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1711 __func__, ha->hw.mbox[0]);
1714 * Get XMT Statistics
1716 for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1717 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1718 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1719 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1721 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1723 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1724 qla_xmt_stats(ha, xstat, i);
1726 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1727 __func__, ha->hw.mbox[0]);
1734 qla_get_quick_stats(qla_host_t *ha)
1736 q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1737 q80_mac_stats_t *mstat;
1738 q80_xmt_stats_t *xstat;
1739 q80_rcv_stats_t *rstat;
1742 stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1744 cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1745 // cmd |= Q8_GET_STATS_CMD_CLEAR;
1747 // cmd |= ((ha->pci_func & 0x3) << 16);
1748 cmd |= (0xFFFF << 16);
1750 if (qla_get_hw_stats(ha, cmd,
1751 sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1753 mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1754 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1755 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1756 qla_mac_stats(ha, mstat);
1757 qla_rcv_stats(ha, rstat);
1758 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1760 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1761 __func__, ha->hw.mbox[0]);
1768 * Function: Checks if the packet to be transmitted is a candidate for
1769 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1770 * Ring Structure are plugged in.
1773 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1775 struct ether_vlan_header *eh;
1776 struct ip *ip = NULL;
1777 struct ip6_hdr *ip6 = NULL;
1778 struct tcphdr *th = NULL;
1779 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1780 uint16_t etype, opcode, offload = 1;
1786 eh = mtod(mp, struct ether_vlan_header *);
1788 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1789 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1790 etype = ntohs(eh->evl_proto);
1792 ehdrlen = ETHER_HDR_LEN;
1793 etype = ntohs(eh->evl_encap_proto);
1801 tcp_opt_off = ehdrlen + sizeof(struct ip) +
1802 sizeof(struct tcphdr);
1804 if (mp->m_len < tcp_opt_off) {
1805 m_copydata(mp, 0, tcp_opt_off, hdr);
1806 ip = (struct ip *)(hdr + ehdrlen);
1808 ip = (struct ip *)(mp->m_data + ehdrlen);
1811 ip_hlen = ip->ip_hl << 2;
1812 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1815 if ((ip->ip_p != IPPROTO_TCP) ||
1816 (ip_hlen != sizeof (struct ip))){
1817 /* IP Options are not supported */
1821 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1825 case ETHERTYPE_IPV6:
1827 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1828 sizeof (struct tcphdr);
1830 if (mp->m_len < tcp_opt_off) {
1831 m_copydata(mp, 0, tcp_opt_off, hdr);
1832 ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1834 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1837 ip_hlen = sizeof(struct ip6_hdr);
1838 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1840 if (ip6->ip6_nxt != IPPROTO_TCP) {
1841 //device_printf(dev, "%s: ipv6\n", __func__);
1844 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1848 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1856 tcp_hlen = th->th_off << 2;
1857 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1859 if (mp->m_len < hdrlen) {
1860 if (mp->m_len < tcp_opt_off) {
1861 if (tcp_hlen > sizeof(struct tcphdr)) {
1862 m_copydata(mp, tcp_opt_off,
1863 (tcp_hlen - sizeof(struct tcphdr)),
1867 m_copydata(mp, 0, hdrlen, hdr);
1871 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1873 tx_cmd->flags_opcode = opcode ;
1874 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1875 tx_cmd->total_hdr_len = hdrlen;
1877 /* Check for Multicast least significant bit of MSB == 1 */
1878 if (eh->evl_dhost[0] & 0x01) {
1879 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1882 if (mp->m_len < hdrlen) {
1883 printf("%d\n", hdrlen);
1891 * Name: qla_tx_chksum
1892 * Function: Checks if the packet to be transmitted is a candidate for
1893 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1894 * Ring Structure are plugged in.
1897 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1898 uint32_t *tcp_hdr_off)
1900 struct ether_vlan_header *eh;
1902 struct ip6_hdr *ip6;
1903 uint32_t ehdrlen, ip_hlen;
1904 uint16_t etype, opcode, offload = 1;
1906 uint8_t buf[sizeof(struct ip6_hdr)];
1912 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1915 eh = mtod(mp, struct ether_vlan_header *);
1917 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1918 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1919 etype = ntohs(eh->evl_proto);
1921 ehdrlen = ETHER_HDR_LEN;
1922 etype = ntohs(eh->evl_encap_proto);
1928 ip = (struct ip *)(mp->m_data + ehdrlen);
1930 ip_hlen = sizeof (struct ip);
1932 if (mp->m_len < (ehdrlen + ip_hlen)) {
1933 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1934 ip = (struct ip *)buf;
1937 if (ip->ip_p == IPPROTO_TCP)
1938 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1939 else if (ip->ip_p == IPPROTO_UDP)
1940 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1942 //device_printf(dev, "%s: ipv4\n", __func__);
1947 case ETHERTYPE_IPV6:
1948 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1950 ip_hlen = sizeof(struct ip6_hdr);
1952 if (mp->m_len < (ehdrlen + ip_hlen)) {
1953 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1955 ip6 = (struct ip6_hdr *)buf;
1958 if (ip6->ip6_nxt == IPPROTO_TCP)
1959 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1960 else if (ip6->ip6_nxt == IPPROTO_UDP)
1961 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1963 //device_printf(dev, "%s: ipv6\n", __func__);
1976 *tcp_hdr_off = (ip_hlen + ehdrlen);
1981 #define QLA_TX_MIN_FREE 2
1984 * Function: Transmits a packet. It first checks if the packet is a
1985 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1986 * offload. If either of these creteria are not met, it is transmitted
1987 * as a regular ethernet frame.
1990 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1991 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1993 struct ether_vlan_header *eh;
1994 qla_hw_t *hw = &ha->hw;
1995 q80_tx_cmd_t *tx_cmd, tso_cmd;
1996 bus_dma_segment_t *c_seg;
1997 uint32_t num_tx_cmds, hdr_len = 0;
1998 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2001 uint8_t *src = NULL, *dst = NULL;
2002 uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2003 uint32_t op_code = 0;
2004 uint32_t tcp_hdr_off = 0;
2009 * Always make sure there is atleast one empty slot in the tx_ring
2010 * tx_ring is considered full when there only one entry available
2012 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2014 total_length = mp->m_pkthdr.len;
2015 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2016 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2017 __func__, total_length);
2020 eh = mtod(mp, struct ether_vlan_header *);
2022 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2024 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2027 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2030 /* find the additional tx_cmd descriptors required */
2032 if (mp->m_flags & M_VLANTAG)
2033 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2035 hdr_len = tso_cmd.total_hdr_len;
2037 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2038 bytes = QL_MIN(bytes, hdr_len);
2044 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2048 hdr_len = tso_cmd.total_hdr_len;
2051 src = (uint8_t *)eh;
2055 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2059 ha->hw.iscsi_pkt_count++;
2061 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2062 ql_hw_tx_done_locked(ha, txr_idx);
2063 if (hw->tx_cntxt[txr_idx].txr_free <=
2064 (num_tx_cmds + QLA_TX_MIN_FREE)) {
2065 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2066 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2072 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2074 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2076 if (nsegs > ha->hw.max_tx_segs)
2077 ha->hw.max_tx_segs = nsegs;
2079 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2082 tx_cmd->flags_opcode = op_code;
2083 tx_cmd->tcp_hdr_off = tcp_hdr_off;
2086 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2089 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2090 ha->tx_tso_frames++;
2093 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2094 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2097 eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2099 } else if (mp->m_flags & M_VLANTAG) {
2101 if (hdr_len) { /* TSO */
2102 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2103 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2104 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2106 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2108 ha->hw_vlan_tx_frames++;
2109 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2112 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2113 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2118 tx_cmd->n_bufs = (uint8_t)nsegs;
2119 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2120 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2121 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2126 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2130 tx_cmd->buf1_addr = c_seg->ds_addr;
2131 tx_cmd->buf1_len = c_seg->ds_len;
2135 tx_cmd->buf2_addr = c_seg->ds_addr;
2136 tx_cmd->buf2_len = c_seg->ds_len;
2140 tx_cmd->buf3_addr = c_seg->ds_addr;
2141 tx_cmd->buf3_len = c_seg->ds_len;
2145 tx_cmd->buf4_addr = c_seg->ds_addr;
2146 tx_cmd->buf4_len = c_seg->ds_len;
2154 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2155 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2156 (NUM_TX_DESCRIPTORS - 1);
2162 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2163 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2166 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2168 /* TSO : Copy the header in the following tx cmd descriptors */
2170 txr_next = hw->tx_cntxt[txr_idx].txr_next;
2172 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2173 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2175 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2176 bytes = QL_MIN(bytes, hdr_len);
2178 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2180 if (mp->m_flags & M_VLANTAG) {
2181 /* first copy the src/dst MAC addresses */
2182 bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2183 dst += (ETHER_ADDR_LEN * 2);
2184 src += (ETHER_ADDR_LEN * 2);
2186 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2188 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2191 /* bytes left in src header */
2192 hdr_len -= ((ETHER_ADDR_LEN * 2) +
2193 ETHER_VLAN_ENCAP_LEN);
2195 /* bytes left in TxCmd Entry */
2196 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2199 bcopy(src, dst, bytes);
2203 bcopy(src, dst, bytes);
2208 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2209 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2210 (NUM_TX_DESCRIPTORS - 1);
2214 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2215 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2217 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2219 bcopy(src, tx_cmd, bytes);
2223 txr_next = hw->tx_cntxt[txr_idx].txr_next =
2224 (hw->tx_cntxt[txr_idx].txr_next + 1) &
2225 (NUM_TX_DESCRIPTORS - 1);
2230 hw->tx_cntxt[txr_idx].txr_free =
2231 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2233 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2235 QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2242 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2244 qla_config_rss_ind_table(qla_host_t *ha)
2247 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2250 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2251 rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2254 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2255 i = i + Q8_CONFIG_IND_TBL_SIZE) {
2257 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2258 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2260 count = Q8_CONFIG_IND_TBL_SIZE;
2263 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2272 qla_config_soft_lro(qla_host_t *ha)
2275 qla_hw_t *hw = &ha->hw;
2276 struct lro_ctrl *lro;
2278 for (i = 0; i < hw->num_sds_rings; i++) {
2279 lro = &hw->sds[i].lro;
2281 bzero(lro, sizeof(struct lro_ctrl));
2283 #if (__FreeBSD_version >= 1100101)
2284 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2285 device_printf(ha->pci_dev,
2286 "%s: tcp_lro_init_args [%d] failed\n",
2291 if (tcp_lro_init(lro)) {
2292 device_printf(ha->pci_dev,
2293 "%s: tcp_lro_init [%d] failed\n",
2297 #endif /* #if (__FreeBSD_version >= 1100101) */
2302 QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2307 qla_drain_soft_lro(qla_host_t *ha)
2310 qla_hw_t *hw = &ha->hw;
2311 struct lro_ctrl *lro;
2313 for (i = 0; i < hw->num_sds_rings; i++) {
2314 lro = &hw->sds[i].lro;
2316 #if (__FreeBSD_version >= 1100101)
2317 tcp_lro_flush_all(lro);
2319 struct lro_entry *queued;
2321 while ((!SLIST_EMPTY(&lro->lro_active))) {
2322 queued = SLIST_FIRST(&lro->lro_active);
2323 SLIST_REMOVE_HEAD(&lro->lro_active, next);
2324 tcp_lro_flush(lro, queued);
2326 #endif /* #if (__FreeBSD_version >= 1100101) */
2333 qla_free_soft_lro(qla_host_t *ha)
2336 qla_hw_t *hw = &ha->hw;
2337 struct lro_ctrl *lro;
2339 for (i = 0; i < hw->num_sds_rings; i++) {
2340 lro = &hw->sds[i].lro;
2349 * Name: ql_del_hw_if
2350 * Function: Destroys the hardware specific entities corresponding to an
2351 * Ethernet Interface
2354 ql_del_hw_if(qla_host_t *ha)
2359 (void)qla_stop_nic_func(ha);
2361 qla_del_rcv_cntxt(ha);
2363 qla_del_xmt_cntxt(ha);
2365 if (ha->hw.flags.init_intr_cnxt) {
2366 for (i = 0; i < ha->hw.num_sds_rings; ) {
2368 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2369 num_msix = Q8_MAX_INTR_VECTORS;
2371 num_msix = ha->hw.num_sds_rings - i;
2372 qla_config_intr_cntxt(ha, i, num_msix, 0);
2377 ha->hw.flags.init_intr_cnxt = 0;
2380 if (ha->hw.enable_soft_lro) {
2381 qla_drain_soft_lro(ha);
2382 qla_free_soft_lro(ha);
2389 qla_confirm_9kb_enable(qla_host_t *ha)
2391 uint32_t supports_9kb = 0;
2393 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2395 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2396 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2397 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2399 qla_get_nic_partition(ha, &supports_9kb, NULL);
2402 ha->hw.enable_9kb = 0;
2408 * Name: ql_init_hw_if
2409 * Function: Creates the hardware specific entities corresponding to an
2410 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2411 * corresponding to the interface. Enables LRO if allowed.
2414 ql_init_hw_if(qla_host_t *ha)
2418 uint8_t bcast_mac[6];
2424 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2425 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2426 ha->hw.dma_buf.sds_ring[i].size);
2429 for (i = 0; i < ha->hw.num_sds_rings; ) {
2431 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2432 num_msix = Q8_MAX_INTR_VECTORS;
2434 num_msix = ha->hw.num_sds_rings - i;
2436 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2442 for (i = 0; i < num_msix; ) {
2443 qla_config_intr_cntxt(ha, i,
2444 Q8_MAX_INTR_VECTORS, 0);
2445 i += Q8_MAX_INTR_VECTORS;
2454 ha->hw.flags.init_intr_cnxt = 1;
2457 * Create Receive Context
2459 if (qla_init_rcv_cntxt(ha)) {
2463 for (i = 0; i < ha->hw.num_rds_rings; i++) {
2464 rdesc = &ha->hw.rds[i];
2465 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2467 /* Update the RDS Producer Indices */
2468 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2474 * Create Transmit Context
2476 if (qla_init_xmt_cntxt(ha)) {
2477 qla_del_rcv_cntxt(ha);
2480 ha->hw.max_tx_segs = 0;
2482 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2485 ha->hw.flags.unicast_mac = 1;
2487 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2488 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2490 if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2493 ha->hw.flags.bcast_mac = 1;
2496 * program any cached multicast addresses
2498 if (qla_hw_add_all_mcast(ha))
2501 if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2504 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2507 if (qla_config_rss_ind_table(ha))
2510 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2513 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2516 if (ha->ifp->if_capenable & IFCAP_LRO) {
2517 if (ha->hw.enable_hw_lro) {
2518 ha->hw.enable_soft_lro = 0;
2520 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2523 ha->hw.enable_soft_lro = 1;
2525 if (qla_config_soft_lro(ha))
2530 if (qla_init_nic_func(ha))
2533 if (qla_query_fw_dcbx_caps(ha))
2536 for (i = 0; i < ha->hw.num_sds_rings; i++)
2537 QL_ENABLE_INTERRUPTS(ha, i);
2543 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2545 device_t dev = ha->pci_dev;
2546 q80_rq_map_sds_to_rds_t *map_rings;
2547 q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2549 qla_hw_t *hw = &ha->hw;
2551 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2552 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2554 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2555 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2556 map_rings->count_version |= Q8_MBX_CMD_VERSION;
2558 map_rings->cntxt_id = hw->rcv_cntxt_id;
2559 map_rings->num_rings = num_idx;
2561 for (i = 0; i < num_idx; i++) {
2562 map_rings->sds_rds[i].sds_ring = i + start_idx;
2563 map_rings->sds_rds[i].rds_ring = i + start_idx;
2566 if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2567 (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2568 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2569 device_printf(dev, "%s: failed0\n", __func__);
2573 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2575 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2578 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2586 * Name: qla_init_rcv_cntxt
2587 * Function: Creates the Receive Context.
2590 qla_init_rcv_cntxt(qla_host_t *ha)
2592 q80_rq_rcv_cntxt_t *rcntxt;
2593 q80_rsp_rcv_cntxt_t *rcntxt_rsp;
2594 q80_stat_desc_t *sdesc;
2596 qla_hw_t *hw = &ha->hw;
2599 uint32_t rcntxt_sds_rings;
2600 uint32_t rcntxt_rds_rings;
2606 * Create Receive Context
2609 for (i = 0; i < hw->num_sds_rings; i++) {
2610 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2612 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2613 sdesc->data[0] = 1ULL;
2614 sdesc->data[1] = 1ULL;
2618 rcntxt_sds_rings = hw->num_sds_rings;
2619 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2620 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2622 rcntxt_rds_rings = hw->num_rds_rings;
2624 if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2625 rcntxt_rds_rings = MAX_RDS_RING_SETS;
2627 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2628 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2630 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2631 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2632 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2634 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2635 Q8_RCV_CNTXT_CAP0_LRO |
2636 Q8_RCV_CNTXT_CAP0_HW_LRO |
2637 Q8_RCV_CNTXT_CAP0_RSS |
2638 Q8_RCV_CNTXT_CAP0_SGL_LRO;
2640 if (ha->hw.enable_9kb)
2641 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2643 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2645 if (ha->hw.num_rds_rings > 1) {
2646 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2647 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2649 rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2651 rcntxt->nsds_rings = rcntxt_sds_rings;
2653 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2655 rcntxt->rcv_vpid = 0;
2657 for (i = 0; i < rcntxt_sds_rings; i++) {
2658 rcntxt->sds[i].paddr =
2659 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2660 rcntxt->sds[i].size =
2661 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2662 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2663 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2666 for (i = 0; i < rcntxt_rds_rings; i++) {
2667 rcntxt->rds[i].paddr_std =
2668 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2670 if (ha->hw.enable_9kb)
2671 rcntxt->rds[i].std_bsize =
2672 qla_host_to_le64(MJUM9BYTES);
2674 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2676 rcntxt->rds[i].std_nentries =
2677 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2680 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2681 (sizeof (q80_rq_rcv_cntxt_t) >> 2),
2682 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2683 device_printf(dev, "%s: failed0\n", __func__);
2687 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2689 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2692 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2696 for (i = 0; i < rcntxt_sds_rings; i++) {
2697 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2700 for (i = 0; i < rcntxt_rds_rings; i++) {
2701 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2704 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2706 ha->hw.flags.init_rx_cnxt = 1;
2708 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2710 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2712 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2713 max_idx = MAX_RCNTXT_SDS_RINGS;
2715 max_idx = hw->num_sds_rings - i;
2717 err = qla_add_rcv_rings(ha, i, max_idx);
2725 if (hw->num_rds_rings > 1) {
2727 for (i = 0; i < hw->num_rds_rings; ) {
2729 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2730 max_idx = MAX_SDS_TO_RDS_MAP;
2732 max_idx = hw->num_rds_rings - i;
2734 err = qla_map_sds_to_rds(ha, i, max_idx);
2746 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2748 device_t dev = ha->pci_dev;
2749 q80_rq_add_rcv_rings_t *add_rcv;
2750 q80_rsp_add_rcv_rings_t *add_rcv_rsp;
2752 qla_hw_t *hw = &ha->hw;
2754 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2755 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2757 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2758 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2759 add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2761 add_rcv->nrds_sets_rings = nsds | (1 << 5);
2762 add_rcv->nsds_rings = nsds;
2763 add_rcv->cntxt_id = hw->rcv_cntxt_id;
2765 for (i = 0; i < nsds; i++) {
2769 add_rcv->sds[i].paddr =
2770 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2772 add_rcv->sds[i].size =
2773 qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2775 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
2776 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2780 for (i = 0; (i < nsds); i++) {
2783 add_rcv->rds[i].paddr_std =
2784 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2786 if (ha->hw.enable_9kb)
2787 add_rcv->rds[i].std_bsize =
2788 qla_host_to_le64(MJUM9BYTES);
2790 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2792 add_rcv->rds[i].std_nentries =
2793 qla_host_to_le32(NUM_RX_DESCRIPTORS);
2797 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2798 (sizeof (q80_rq_add_rcv_rings_t) >> 2),
2799 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2800 device_printf(dev, "%s: failed0\n", __func__);
2804 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2806 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2809 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2813 for (i = 0; i < nsds; i++) {
2814 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2817 for (i = 0; i < nsds; i++) {
2818 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2825 * Name: qla_del_rcv_cntxt
2826 * Function: Destroys the Receive Context.
2829 qla_del_rcv_cntxt(qla_host_t *ha)
2831 device_t dev = ha->pci_dev;
2832 q80_rcv_cntxt_destroy_t *rcntxt;
2833 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp;
2835 uint8_t bcast_mac[6];
2837 if (!ha->hw.flags.init_rx_cnxt)
2840 if (qla_hw_del_all_mcast(ha))
2843 if (ha->hw.flags.bcast_mac) {
2845 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2846 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2848 if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
2850 ha->hw.flags.bcast_mac = 0;
2854 if (ha->hw.flags.unicast_mac) {
2855 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
2857 ha->hw.flags.unicast_mac = 0;
2860 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2861 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2863 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2864 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2865 rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2867 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2869 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2870 (sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2871 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2872 device_printf(dev, "%s: failed0\n", __func__);
2875 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2877 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2880 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2883 ha->hw.flags.init_rx_cnxt = 0;
2888 * Name: qla_init_xmt_cntxt
2889 * Function: Creates the Transmit Context.
2892 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2895 qla_hw_t *hw = &ha->hw;
2896 q80_rq_tx_cntxt_t *tcntxt;
2897 q80_rsp_tx_cntxt_t *tcntxt_rsp;
2899 qla_hw_tx_cntxt_t *hw_tx_cntxt;
2902 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2907 * Create Transmit Context
2909 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2910 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2912 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2913 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2914 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2918 #ifdef QL_ENABLE_ISCSI_TLV
2920 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2921 Q8_TX_CNTXT_CAP0_TC;
2923 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2924 tcntxt->traffic_class = 1;
2927 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
2930 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2932 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2934 tcntxt->ntx_rings = 1;
2936 tcntxt->tx_ring[0].paddr =
2937 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2938 tcntxt->tx_ring[0].tx_consumer =
2939 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2940 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2942 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
2943 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2945 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2946 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2948 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2949 (sizeof (q80_rq_tx_cntxt_t) >> 2),
2951 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2952 device_printf(dev, "%s: failed0\n", __func__);
2955 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2957 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2960 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2964 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2965 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2967 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2975 * Name: qla_del_xmt_cntxt
2976 * Function: Destroys the Transmit Context.
2979 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2981 device_t dev = ha->pci_dev;
2982 q80_tx_cntxt_destroy_t *tcntxt;
2983 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp;
2986 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2987 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2989 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2990 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2991 tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2993 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2995 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2996 (sizeof (q80_tx_cntxt_destroy_t) >> 2),
2997 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2998 device_printf(dev, "%s: failed0\n", __func__);
3001 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3003 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3006 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3013 qla_del_xmt_cntxt(qla_host_t *ha)
3017 if (!ha->hw.flags.init_tx_cnxt)
3020 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3021 if (qla_del_xmt_cntxt_i(ha, i))
3024 ha->hw.flags.init_tx_cnxt = 0;
3028 qla_init_xmt_cntxt(qla_host_t *ha)
3032 for (i = 0; i < ha->hw.num_tx_rings; i++) {
3033 if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3034 for (j = 0; j < i; j++)
3035 qla_del_xmt_cntxt_i(ha, j);
3039 ha->hw.flags.init_tx_cnxt = 1;
3044 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3050 nmcast = ha->hw.nmcast;
3052 QL_DPRINT2(ha, (ha->pci_dev,
3053 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3055 mcast = ha->hw.mac_addr_arr;
3056 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3058 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3059 if ((ha->hw.mcast[i].addr[0] != 0) ||
3060 (ha->hw.mcast[i].addr[1] != 0) ||
3061 (ha->hw.mcast[i].addr[2] != 0) ||
3062 (ha->hw.mcast[i].addr[3] != 0) ||
3063 (ha->hw.mcast[i].addr[4] != 0) ||
3064 (ha->hw.mcast[i].addr[5] != 0)) {
3066 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3067 mcast = mcast + ETHER_ADDR_LEN;
3070 if (count == Q8_MAX_MAC_ADDRS) {
3071 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3072 add_mcast, count)) {
3073 device_printf(ha->pci_dev,
3074 "%s: failed\n", __func__);
3079 mcast = ha->hw.mac_addr_arr;
3081 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3089 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3091 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3095 QL_DPRINT2(ha, (ha->pci_dev,
3096 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3102 qla_hw_add_all_mcast(qla_host_t *ha)
3106 ret = qla_hw_all_mcast(ha, 1);
3112 qla_hw_del_all_mcast(qla_host_t *ha)
3116 ret = qla_hw_all_mcast(ha, 0);
3118 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3125 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3129 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3130 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3131 return (0); /* its been already added */
3137 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3141 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3143 if ((ha->hw.mcast[i].addr[0] == 0) &&
3144 (ha->hw.mcast[i].addr[1] == 0) &&
3145 (ha->hw.mcast[i].addr[2] == 0) &&
3146 (ha->hw.mcast[i].addr[3] == 0) &&
3147 (ha->hw.mcast[i].addr[4] == 0) &&
3148 (ha->hw.mcast[i].addr[5] == 0)) {
3150 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3153 mta = mta + ETHER_ADDR_LEN;
3165 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3169 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3170 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3172 ha->hw.mcast[i].addr[0] = 0;
3173 ha->hw.mcast[i].addr[1] = 0;
3174 ha->hw.mcast[i].addr[2] = 0;
3175 ha->hw.mcast[i].addr[3] = 0;
3176 ha->hw.mcast[i].addr[4] = 0;
3177 ha->hw.mcast[i].addr[5] = 0;
3181 mta = mta + ETHER_ADDR_LEN;
3192 * Name: ql_hw_set_multi
3193 * Function: Sets the Multicast Addresses provided by the host O.S into the
3194 * hardware (for the given interface)
3197 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3200 uint8_t *mta = mcast_addr;
3206 mcast = ha->hw.mac_addr_arr;
3207 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3209 for (i = 0; i < mcnt; i++) {
3210 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3212 if (qla_hw_mac_addr_present(ha, mta) != 0) {
3213 bcopy(mta, mcast, ETHER_ADDR_LEN);
3214 mcast = mcast + ETHER_ADDR_LEN;
3218 if (qla_hw_mac_addr_present(ha, mta) == 0) {
3219 bcopy(mta, mcast, ETHER_ADDR_LEN);
3220 mcast = mcast + ETHER_ADDR_LEN;
3225 if (count == Q8_MAX_MAC_ADDRS) {
3226 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3228 device_printf(ha->pci_dev, "%s: failed\n",
3234 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3237 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3242 mcast = ha->hw.mac_addr_arr;
3243 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3246 mta += Q8_MAC_ADDR_LEN;
3250 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3252 device_printf(ha->pci_dev, "%s: failed\n", __func__);
3256 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3258 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3266 * Name: ql_hw_tx_done_locked
3267 * Function: Handle Transmit Completions
3270 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3273 qla_hw_t *hw = &ha->hw;
3274 uint32_t comp_idx, comp_count = 0;
3275 qla_hw_tx_cntxt_t *hw_tx_cntxt;
3277 hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3279 /* retrieve index of last entry in tx ring completed */
3280 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3282 while (comp_idx != hw_tx_cntxt->txr_comp) {
3284 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3286 hw_tx_cntxt->txr_comp++;
3287 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3288 hw_tx_cntxt->txr_comp = 0;
3293 ha->ifp->if_opackets++;
3295 bus_dmamap_sync(ha->tx_tag, txb->map,
3296 BUS_DMASYNC_POSTWRITE);
3297 bus_dmamap_unload(ha->tx_tag, txb->map);
3298 m_freem(txb->m_head);
3304 hw_tx_cntxt->txr_free += comp_count;
3309 ql_update_link_state(qla_host_t *ha)
3311 uint32_t link_state;
3312 uint32_t prev_link_state;
3314 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3318 link_state = READ_REG32(ha, Q8_LINK_STATE);
3320 prev_link_state = ha->hw.link_up;
3322 if (ha->pci_func == 0)
3323 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3325 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3327 if (prev_link_state != ha->hw.link_up) {
3328 if (ha->hw.link_up) {
3329 if_link_state_change(ha->ifp, LINK_STATE_UP);
3331 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3338 ql_hw_stop_rcv(qla_host_t *ha)
3340 int i, done, count = 100;
3342 ha->flags.stop_rcv = 1;
3346 for (i = 0; i < ha->hw.num_sds_rings; i++) {
3347 if (ha->hw.sds[i].rcv_active)
3353 qla_mdelay(__func__, 10);
3357 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3363 ql_hw_check_health(qla_host_t *ha)
3367 ha->hw.health_count++;
3369 if (ha->hw.health_count < 500)
3372 ha->hw.health_count = 0;
3374 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3376 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3377 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3378 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3383 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3385 if ((val != ha->hw.hbeat_value) &&
3386 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3387 ha->hw.hbeat_value = val;
3388 ha->hw.hbeat_failure = 0;
3392 ha->hw.hbeat_failure++;
3394 if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3397 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3405 qla_init_nic_func(qla_host_t *ha)
3408 q80_init_nic_func_t *init_nic;
3409 q80_init_nic_func_rsp_t *init_nic_rsp;
3414 init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3415 bzero(init_nic, sizeof(q80_init_nic_func_t));
3417 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3418 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3419 init_nic->count_version |= Q8_MBX_CMD_VERSION;
3421 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3422 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3423 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3425 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3426 if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3427 (sizeof (q80_init_nic_func_t) >> 2),
3428 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3429 device_printf(dev, "%s: failed\n", __func__);
3433 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3434 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3436 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3439 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3446 qla_stop_nic_func(qla_host_t *ha)
3449 q80_stop_nic_func_t *stop_nic;
3450 q80_stop_nic_func_rsp_t *stop_nic_rsp;
3455 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3456 bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3458 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3459 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3460 stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3462 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3463 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3465 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3466 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3467 (sizeof (q80_stop_nic_func_t) >> 2),
3468 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3469 device_printf(dev, "%s: failed\n", __func__);
3473 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3474 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3476 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3479 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3486 qla_query_fw_dcbx_caps(qla_host_t *ha)
3489 q80_query_fw_dcbx_caps_t *fw_dcbx;
3490 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp;
3495 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3496 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3498 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3499 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3500 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3502 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3503 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3504 (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3505 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3506 device_printf(dev, "%s: failed\n", __func__);
3510 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3511 ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3512 sizeof (q80_query_fw_dcbx_caps_rsp_t));
3514 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3517 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3524 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3525 uint32_t aen_mb3, uint32_t aen_mb4)
3528 q80_idc_ack_t *idc_ack;
3529 q80_idc_ack_rsp_t *idc_ack_rsp;
3535 idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3536 bzero(idc_ack, sizeof(q80_idc_ack_t));
3538 idc_ack->opcode = Q8_MBX_IDC_ACK;
3539 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3540 idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3542 idc_ack->aen_mb1 = aen_mb1;
3543 idc_ack->aen_mb2 = aen_mb2;
3544 idc_ack->aen_mb3 = aen_mb3;
3545 idc_ack->aen_mb4 = aen_mb4;
3547 ha->hw.imd_compl= 0;
3549 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3550 (sizeof (q80_idc_ack_t) >> 2),
3551 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3552 device_printf(dev, "%s: failed\n", __func__);
3556 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3558 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3561 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3565 while (count && !ha->hw.imd_compl) {
3566 qla_mdelay(__func__, 100);
3573 device_printf(dev, "%s: count %d\n", __func__, count);
3579 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3582 q80_set_port_cfg_t *pcfg;
3583 q80_set_port_cfg_rsp_t *pfg_rsp;
3589 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3590 bzero(pcfg, sizeof(q80_set_port_cfg_t));
3592 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3593 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3594 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3596 pcfg->cfg_bits = cfg_bits;
3598 device_printf(dev, "%s: cfg_bits"
3599 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3600 " [0x%x, 0x%x, 0x%x]\n", __func__,
3601 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3602 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3603 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3605 ha->hw.imd_compl= 0;
3607 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3608 (sizeof (q80_set_port_cfg_t) >> 2),
3609 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3610 device_printf(dev, "%s: failed\n", __func__);
3614 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3616 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3618 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3619 while (count && !ha->hw.imd_compl) {
3620 qla_mdelay(__func__, 100);
3624 device_printf(dev, "%s: count %d\n", __func__, count);
3631 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3640 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3643 device_t dev = ha->pci_dev;
3644 q80_config_md_templ_size_t *md_size;
3645 q80_config_md_templ_size_rsp_t *md_size_rsp;
3647 #ifndef QL_LDFLASH_FW
3649 ql_minidump_template_hdr_t *hdr;
3651 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3652 *size = hdr->size_of_template;
3655 #endif /* #ifdef QL_LDFLASH_FW */
3657 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3658 bzero(md_size, sizeof(q80_config_md_templ_size_t));
3660 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3661 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3662 md_size->count_version |= Q8_MBX_CMD_VERSION;
3664 if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3665 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3666 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3668 device_printf(dev, "%s: failed\n", __func__);
3673 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3675 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3678 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3682 *size = md_size_rsp->templ_size;
3688 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3691 q80_get_port_cfg_t *pcfg;
3692 q80_get_port_cfg_rsp_t *pcfg_rsp;
3697 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3698 bzero(pcfg, sizeof(q80_get_port_cfg_t));
3700 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3701 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3702 pcfg->count_version |= Q8_MBX_CMD_VERSION;
3704 if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3705 (sizeof (q80_get_port_cfg_t) >> 2),
3706 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3707 device_printf(dev, "%s: failed\n", __func__);
3711 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3713 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3716 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3720 device_printf(dev, "%s: [cfg_bits, port type]"
3721 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3722 " [0x%x, 0x%x, 0x%x]\n", __func__,
3723 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3724 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3725 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3726 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3729 *cfg_bits = pcfg_rsp->cfg_bits;
3735 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3737 struct ether_vlan_header *eh;
3739 struct ip *ip = NULL;
3740 struct ip6_hdr *ip6 = NULL;
3741 struct tcphdr *th = NULL;
3744 uint8_t buf[sizeof(struct ip6_hdr)];
3746 eh = mtod(mp, struct ether_vlan_header *);
3748 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3749 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3750 etype = ntohs(eh->evl_proto);
3752 hdrlen = ETHER_HDR_LEN;
3753 etype = ntohs(eh->evl_encap_proto);
3756 if (etype == ETHERTYPE_IP) {
3758 offset = (hdrlen + sizeof (struct ip));
3760 if (mp->m_len >= offset) {
3761 ip = (struct ip *)(mp->m_data + hdrlen);
3763 m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3764 ip = (struct ip *)buf;
3767 if (ip->ip_p == IPPROTO_TCP) {
3769 hdrlen += ip->ip_hl << 2;
3770 offset = hdrlen + 4;
3772 if (mp->m_len >= offset) {
3773 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3775 m_copydata(mp, hdrlen, 4, buf);
3776 th = (struct tcphdr *)buf;
3780 } else if (etype == ETHERTYPE_IPV6) {
3782 offset = (hdrlen + sizeof (struct ip6_hdr));
3784 if (mp->m_len >= offset) {
3785 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3787 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3788 ip6 = (struct ip6_hdr *)buf;
3791 if (ip6->ip6_nxt == IPPROTO_TCP) {
3793 hdrlen += sizeof(struct ip6_hdr);
3794 offset = hdrlen + 4;
3796 if (mp->m_len >= offset) {
3797 th = (struct tcphdr *)(mp->m_data + hdrlen);;
3799 m_copydata(mp, hdrlen, 4, buf);
3800 th = (struct tcphdr *)buf;
3806 if ((th->th_sport == htons(3260)) ||
3807 (th->th_dport == htons(3260)))
3814 qla_hw_async_event(qla_host_t *ha)
3816 switch (ha->hw.aen_mb0) {
3818 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3819 ha->hw.aen_mb3, ha->hw.aen_mb4);
3830 #ifdef QL_LDFLASH_FW
3832 ql_get_minidump_template(qla_host_t *ha)
3835 device_t dev = ha->pci_dev;
3836 q80_config_md_templ_cmd_t *md_templ;
3837 q80_config_md_templ_cmd_rsp_t *md_templ_rsp;
3839 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3840 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3842 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3843 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3844 md_templ->count_version |= Q8_MBX_CMD_VERSION;
3846 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3847 md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3849 if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3850 (sizeof(q80_config_md_templ_cmd_t) >> 2),
3852 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3854 device_printf(dev, "%s: failed\n", __func__);
3859 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3861 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3864 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3871 #endif /* #ifdef QL_LDFLASH_FW */
3874 * Minidump related functionality
3877 static int ql_parse_template(qla_host_t *ha);
3879 static uint32_t ql_rdcrb(qla_host_t *ha,
3880 ql_minidump_entry_rdcrb_t *crb_entry,
3881 uint32_t * data_buff);
3883 static uint32_t ql_pollrd(qla_host_t *ha,
3884 ql_minidump_entry_pollrd_t *entry,
3885 uint32_t * data_buff);
3887 static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3888 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3889 uint32_t *data_buff);
3891 static uint32_t ql_L2Cache(qla_host_t *ha,
3892 ql_minidump_entry_cache_t *cacheEntry,
3893 uint32_t * data_buff);
3895 static uint32_t ql_L1Cache(qla_host_t *ha,
3896 ql_minidump_entry_cache_t *cacheEntry,
3897 uint32_t *data_buff);
3899 static uint32_t ql_rdocm(qla_host_t *ha,
3900 ql_minidump_entry_rdocm_t *ocmEntry,
3901 uint32_t *data_buff);
3903 static uint32_t ql_rdmem(qla_host_t *ha,
3904 ql_minidump_entry_rdmem_t *mem_entry,
3905 uint32_t *data_buff);
3907 static uint32_t ql_rdrom(qla_host_t *ha,
3908 ql_minidump_entry_rdrom_t *romEntry,
3909 uint32_t *data_buff);
3911 static uint32_t ql_rdmux(qla_host_t *ha,
3912 ql_minidump_entry_mux_t *muxEntry,
3913 uint32_t *data_buff);
3915 static uint32_t ql_rdmux2(qla_host_t *ha,
3916 ql_minidump_entry_mux2_t *muxEntry,
3917 uint32_t *data_buff);
3919 static uint32_t ql_rdqueue(qla_host_t *ha,
3920 ql_minidump_entry_queue_t *queueEntry,
3921 uint32_t *data_buff);
3923 static uint32_t ql_cntrl(qla_host_t *ha,
3924 ql_minidump_template_hdr_t *template_hdr,
3925 ql_minidump_entry_cntrl_t *crbEntry);
3929 ql_minidump_size(qla_host_t *ha)
3933 ql_minidump_template_hdr_t *hdr;
3935 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3939 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3940 if (i & ha->hw.mdump_capture_mask)
3941 size += hdr->capture_size_array[k];
3948 ql_free_minidump_buffer(qla_host_t *ha)
3950 if (ha->hw.mdump_buffer != NULL) {
3951 free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3952 ha->hw.mdump_buffer = NULL;
3953 ha->hw.mdump_buffer_size = 0;
3959 ql_alloc_minidump_buffer(qla_host_t *ha)
3961 ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3963 if (!ha->hw.mdump_buffer_size)
3966 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3969 if (ha->hw.mdump_buffer == NULL)
3976 ql_free_minidump_template_buffer(qla_host_t *ha)
3978 if (ha->hw.mdump_template != NULL) {
3979 free(ha->hw.mdump_template, M_QLA83XXBUF);
3980 ha->hw.mdump_template = NULL;
3981 ha->hw.mdump_template_size = 0;
3987 ql_alloc_minidump_template_buffer(qla_host_t *ha)
3989 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3991 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3992 M_QLA83XXBUF, M_NOWAIT);
3994 if (ha->hw.mdump_template == NULL)
4001 ql_alloc_minidump_buffers(qla_host_t *ha)
4005 ret = ql_alloc_minidump_template_buffer(ha);
4010 ret = ql_alloc_minidump_buffer(ha);
4013 ql_free_minidump_template_buffer(ha);
4020 ql_validate_minidump_checksum(qla_host_t *ha)
4024 uint32_t *template_buff;
4026 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4027 template_buff = ha->hw.dma_buf.minidump.dma_b;
4029 while (count-- > 0) {
4030 sum += *template_buff++;
4034 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4041 ql_minidump_init(qla_host_t *ha)
4044 uint32_t template_size = 0;
4045 device_t dev = ha->pci_dev;
4048 * Get Minidump Template Size
4050 ret = qla_get_minidump_tmplt_size(ha, &template_size);
4052 if (ret || (template_size == 0)) {
4053 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4059 * Allocate Memory for Minidump Template
4062 ha->hw.dma_buf.minidump.alignment = 8;
4063 ha->hw.dma_buf.minidump.size = template_size;
4065 #ifdef QL_LDFLASH_FW
4066 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4068 device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4072 ha->hw.dma_buf.flags.minidump = 1;
4075 * Retrieve Minidump Template
4077 ret = ql_get_minidump_template(ha);
4079 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4081 #endif /* #ifdef QL_LDFLASH_FW */
4085 ret = ql_validate_minidump_checksum(ha);
4089 ret = ql_alloc_minidump_buffers(ha);
4092 ha->hw.mdump_init = 1;
4095 "%s: ql_alloc_minidump_buffers"
4096 " failed\n", __func__);
4098 device_printf(dev, "%s: ql_validate_minidump_checksum"
4099 " failed\n", __func__);
4102 device_printf(dev, "%s: ql_get_minidump_template failed\n",
4107 ql_minidump_free(ha);
4113 ql_minidump_free(qla_host_t *ha)
4115 ha->hw.mdump_init = 0;
4116 if (ha->hw.dma_buf.flags.minidump) {
4117 ha->hw.dma_buf.flags.minidump = 0;
4118 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4121 ql_free_minidump_template_buffer(ha);
4122 ql_free_minidump_buffer(ha);
4128 ql_minidump(qla_host_t *ha)
4130 if (!ha->hw.mdump_init)
4133 if (ha->hw.mdump_done)
4136 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4138 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4139 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4141 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4142 ha->hw.mdump_template_size);
4144 ql_parse_template(ha);
4146 ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4148 ha->hw.mdump_done = 1;
4158 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4160 if (esize != entry->hdr.entry_capture_size) {
4161 entry->hdr.entry_capture_size = esize;
4162 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4169 ql_parse_template(qla_host_t *ha)
4171 uint32_t num_of_entries, buff_level, e_cnt, esize;
4172 uint32_t end_cnt, rv = 0;
4173 char *dump_buff, *dbuff;
4174 int sane_start = 0, sane_end = 0;
4175 ql_minidump_template_hdr_t *template_hdr;
4176 ql_minidump_entry_t *entry;
4177 uint32_t capture_mask;
4180 /* Setup parameters */
4181 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4183 if (template_hdr->entry_type == TLHDR)
4186 dump_buff = (char *) ha->hw.mdump_buffer;
4188 num_of_entries = template_hdr->num_of_entries;
4190 entry = (ql_minidump_entry_t *) ((char *)template_hdr
4191 + template_hdr->first_entry_offset );
4193 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4194 template_hdr->ocm_window_array[ha->pci_func];
4195 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4197 capture_mask = ha->hw.mdump_capture_mask;
4198 dump_size = ha->hw.mdump_buffer_size;
4200 template_hdr->driver_capture_mask = capture_mask;
4202 QL_DPRINT80(ha, (ha->pci_dev,
4203 "%s: sane_start = %d num_of_entries = %d "
4204 "capture_mask = 0x%x dump_size = %d \n",
4205 __func__, sane_start, num_of_entries, capture_mask, dump_size));
4207 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4210 * If the capture_mask of the entry does not match capture mask
4211 * skip the entry after marking the driver_flags indicator.
4214 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4216 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4217 entry = (ql_minidump_entry_t *) ((char *) entry
4218 + entry->hdr.entry_size);
4223 * This is ONLY needed in implementations where
4224 * the capture buffer allocated is too small to capture
4225 * all of the required entries for a given capture mask.
4226 * We need to empty the buffer contents to a file
4227 * if possible, before processing the next entry
4228 * If the buff_full_flag is set, no further capture will happen
4229 * and all remaining non-control entries will be skipped.
4231 if (entry->hdr.entry_capture_size != 0) {
4232 if ((buff_level + entry->hdr.entry_capture_size) >
4234 /* Try to recover by emptying buffer to file */
4235 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4236 entry = (ql_minidump_entry_t *) ((char *) entry
4237 + entry->hdr.entry_size);
4243 * Decode the entry type and process it accordingly
4246 switch (entry->hdr.entry_type) {
4251 if (sane_end == 0) {
4258 dbuff = dump_buff + buff_level;
4259 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4260 ql_entry_err_chk(entry, esize);
4261 buff_level += esize;
4265 dbuff = dump_buff + buff_level;
4266 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4267 ql_entry_err_chk(entry, esize);
4268 buff_level += esize;
4272 dbuff = dump_buff + buff_level;
4273 esize = ql_pollrd_modify_write(ha, (void *)entry,
4275 ql_entry_err_chk(entry, esize);
4276 buff_level += esize;
4283 dbuff = dump_buff + buff_level;
4284 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4286 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4288 ql_entry_err_chk(entry, esize);
4289 buff_level += esize;
4295 dbuff = dump_buff + buff_level;
4296 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4297 ql_entry_err_chk(entry, esize);
4298 buff_level += esize;
4302 dbuff = dump_buff + buff_level;
4303 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4304 ql_entry_err_chk(entry, esize);
4305 buff_level += esize;
4309 dbuff = dump_buff + buff_level;
4310 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4311 ql_entry_err_chk(entry, esize);
4312 buff_level += esize;
4317 dbuff = dump_buff + buff_level;
4318 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4319 ql_entry_err_chk(entry, esize);
4320 buff_level += esize;
4324 dbuff = dump_buff + buff_level;
4325 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4326 ql_entry_err_chk(entry, esize);
4327 buff_level += esize;
4331 dbuff = dump_buff + buff_level;
4332 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4333 ql_entry_err_chk(entry, esize);
4334 buff_level += esize;
4338 dbuff = dump_buff + buff_level;
4339 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4340 ql_entry_err_chk(entry, esize);
4341 buff_level += esize;
4345 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4346 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4350 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4353 /* next entry in the template */
4354 entry = (ql_minidump_entry_t *) ((char *) entry
4355 + entry->hdr.entry_size);
4358 if (!sane_start || (sane_end > 1)) {
4359 device_printf(ha->pci_dev,
4360 "\n%s: Template configuration error. Check Template\n",
4364 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4365 __func__, template_hdr->num_of_entries));
4371 * Read CRB operation.
4374 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4375 uint32_t * data_buff)
4379 uint32_t op_count, addr, stride, value = 0;
4381 addr = crb_entry->addr;
4382 op_count = crb_entry->op_count;
4383 stride = crb_entry->addr_stride;
4385 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4387 ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4392 *data_buff++ = addr;
4393 *data_buff++ = value;
4394 addr = addr + stride;
4398 * for testing purpose we return amount of data written
4400 return (op_count * (2 * sizeof(uint32_t)));
4408 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4409 uint32_t * data_buff)
4415 uint32_t read_value;
4416 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4417 uint32_t tag_value, read_cnt;
4418 volatile uint8_t cntl_value_r;
4422 loop_cnt = cacheEntry->op_count;
4424 read_addr = cacheEntry->read_addr;
4425 cntrl_addr = cacheEntry->control_addr;
4426 cntl_value_w = (uint32_t) cacheEntry->write_value;
4428 tag_reg_addr = cacheEntry->tag_reg_addr;
4430 tag_value = cacheEntry->init_tag_value;
4431 read_cnt = cacheEntry->read_addr_cnt;
4433 for (i = 0; i < loop_cnt; i++) {
4435 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4439 if (cacheEntry->write_value != 0) {
4441 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4447 if (cacheEntry->poll_mask != 0) {
4449 timeout = cacheEntry->poll_wait;
4451 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4455 cntl_value_r = (uint8_t)data;
4457 while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4460 qla_mdelay(__func__, 1);
4465 ret = ql_rdwr_indreg32(ha, cntrl_addr,
4470 cntl_value_r = (uint8_t)data;
4473 /* Report timeout error.
4474 * core dump capture failed
4475 * Skip remaining entries.
4476 * Write buffer out to file
4477 * Use driver specific fields in template header
4478 * to report this error.
4485 for (k = 0; k < read_cnt; k++) {
4487 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4491 *data_buff++ = read_value;
4492 addr += cacheEntry->read_addr_stride;
4495 tag_value += cacheEntry->tag_value_stride;
4498 return (read_cnt * loop_cnt * sizeof(uint32_t));
4506 ql_L1Cache(qla_host_t *ha,
4507 ql_minidump_entry_cache_t *cacheEntry,
4508 uint32_t *data_buff)
4514 uint32_t read_value;
4515 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4516 uint32_t tag_value, read_cnt;
4517 uint32_t cntl_value_w;
4519 loop_cnt = cacheEntry->op_count;
4521 read_addr = cacheEntry->read_addr;
4522 cntrl_addr = cacheEntry->control_addr;
4523 cntl_value_w = (uint32_t) cacheEntry->write_value;
4525 tag_reg_addr = cacheEntry->tag_reg_addr;
4527 tag_value = cacheEntry->init_tag_value;
4528 read_cnt = cacheEntry->read_addr_cnt;
4530 for (i = 0; i < loop_cnt; i++) {
4532 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4536 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4541 for (k = 0; k < read_cnt; k++) {
4543 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4547 *data_buff++ = read_value;
4548 addr += cacheEntry->read_addr_stride;
4551 tag_value += cacheEntry->tag_value_stride;
4554 return (read_cnt * loop_cnt * sizeof(uint32_t));
4558 * Reading OCM memory
4562 ql_rdocm(qla_host_t *ha,
4563 ql_minidump_entry_rdocm_t *ocmEntry,
4564 uint32_t *data_buff)
4567 volatile uint32_t addr;
4568 volatile uint32_t value;
4570 addr = ocmEntry->read_addr;
4571 loop_cnt = ocmEntry->op_count;
4573 for (i = 0; i < loop_cnt; i++) {
4574 value = READ_REG32(ha, addr);
4575 *data_buff++ = value;
4576 addr += ocmEntry->read_addr_stride;
4578 return (loop_cnt * sizeof(value));
4586 ql_rdmem(qla_host_t *ha,
4587 ql_minidump_entry_rdmem_t *mem_entry,
4588 uint32_t *data_buff)
4592 volatile uint32_t addr;
4593 q80_offchip_mem_val_t val;
4595 addr = mem_entry->read_addr;
4597 /* size in bytes / 16 */
4598 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4600 for (i = 0; i < loop_cnt; i++) {
4602 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4606 *data_buff++ = val.data_lo;
4607 *data_buff++ = val.data_hi;
4608 *data_buff++ = val.data_ulo;
4609 *data_buff++ = val.data_uhi;
4611 addr += (sizeof(uint32_t) * 4);
4614 return (loop_cnt * (sizeof(uint32_t) * 4));
4622 ql_rdrom(qla_host_t *ha,
4623 ql_minidump_entry_rdrom_t *romEntry,
4624 uint32_t *data_buff)
4631 addr = romEntry->read_addr;
4632 loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4633 loop_cnt /= sizeof(value);
4635 for (i = 0; i < loop_cnt; i++) {
4637 ret = ql_rd_flash32(ha, addr, &value);
4641 *data_buff++ = value;
4642 addr += sizeof(value);
4645 return (loop_cnt * sizeof(value));
4653 ql_rdmux(qla_host_t *ha,
4654 ql_minidump_entry_mux_t *muxEntry,
4655 uint32_t *data_buff)
4659 uint32_t read_value, sel_value;
4660 uint32_t read_addr, select_addr;
4662 select_addr = muxEntry->select_addr;
4663 sel_value = muxEntry->select_value;
4664 read_addr = muxEntry->read_addr;
4666 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4668 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4672 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4676 *data_buff++ = sel_value;
4677 *data_buff++ = read_value;
4679 sel_value += muxEntry->select_value_stride;
4682 return (loop_cnt * (2 * sizeof(uint32_t)));
4686 ql_rdmux2(qla_host_t *ha,
4687 ql_minidump_entry_mux2_t *muxEntry,
4688 uint32_t *data_buff)
4693 uint32_t select_addr_1, select_addr_2;
4694 uint32_t select_value_1, select_value_2;
4695 uint32_t select_value_count, select_value_mask;
4696 uint32_t read_addr, read_value;
4698 select_addr_1 = muxEntry->select_addr_1;
4699 select_addr_2 = muxEntry->select_addr_2;
4700 select_value_1 = muxEntry->select_value_1;
4701 select_value_2 = muxEntry->select_value_2;
4702 select_value_count = muxEntry->select_value_count;
4703 select_value_mask = muxEntry->select_value_mask;
4705 read_addr = muxEntry->read_addr;
4707 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4710 uint32_t temp_sel_val;
4712 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4716 temp_sel_val = select_value_1 & select_value_mask;
4718 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4722 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4726 *data_buff++ = temp_sel_val;
4727 *data_buff++ = read_value;
4729 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4733 temp_sel_val = select_value_2 & select_value_mask;
4735 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4739 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4743 *data_buff++ = temp_sel_val;
4744 *data_buff++ = read_value;
4746 select_value_1 += muxEntry->select_value_stride;
4747 select_value_2 += muxEntry->select_value_stride;
4750 return (loop_cnt * (4 * sizeof(uint32_t)));
4754 * Handling Queue State Reads.
4758 ql_rdqueue(qla_host_t *ha,
4759 ql_minidump_entry_queue_t *queueEntry,
4760 uint32_t *data_buff)
4764 uint32_t read_value;
4765 uint32_t read_addr, read_stride, select_addr;
4766 uint32_t queue_id, read_cnt;
4768 read_cnt = queueEntry->read_addr_cnt;
4769 read_stride = queueEntry->read_addr_stride;
4770 select_addr = queueEntry->select_addr;
4772 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4775 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4779 read_addr = queueEntry->read_addr;
4781 for (k = 0; k < read_cnt; k++) {
4783 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4787 *data_buff++ = read_value;
4788 read_addr += read_stride;
4791 queue_id += queueEntry->queue_id_stride;
4794 return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4798 * Handling control entries.
4802 ql_cntrl(qla_host_t *ha,
4803 ql_minidump_template_hdr_t *template_hdr,
4804 ql_minidump_entry_cntrl_t *crbEntry)
4808 uint32_t opcode, read_value, addr, entry_addr;
4811 entry_addr = crbEntry->addr;
4813 for (count = 0; count < crbEntry->op_count; count++) {
4814 opcode = crbEntry->opcode;
4816 if (opcode & QL_DBG_OPCODE_WR) {
4818 ret = ql_rdwr_indreg32(ha, entry_addr,
4819 &crbEntry->value_1, 0);
4823 opcode &= ~QL_DBG_OPCODE_WR;
4826 if (opcode & QL_DBG_OPCODE_RW) {
4828 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4832 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4836 opcode &= ~QL_DBG_OPCODE_RW;
4839 if (opcode & QL_DBG_OPCODE_AND) {
4841 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4845 read_value &= crbEntry->value_2;
4846 opcode &= ~QL_DBG_OPCODE_AND;
4848 if (opcode & QL_DBG_OPCODE_OR) {
4849 read_value |= crbEntry->value_3;
4850 opcode &= ~QL_DBG_OPCODE_OR;
4853 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4858 if (opcode & QL_DBG_OPCODE_OR) {
4860 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4864 read_value |= crbEntry->value_3;
4866 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4870 opcode &= ~QL_DBG_OPCODE_OR;
4873 if (opcode & QL_DBG_OPCODE_POLL) {
4875 opcode &= ~QL_DBG_OPCODE_POLL;
4876 timeout = crbEntry->poll_timeout;
4879 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4883 while ((read_value & crbEntry->value_2)
4884 != crbEntry->value_1) {
4887 qla_mdelay(__func__, 1);
4892 ret = ql_rdwr_indreg32(ha, addr,
4900 * Report timeout error.
4901 * core dump capture failed
4902 * Skip remaining entries.
4903 * Write buffer out to file
4904 * Use driver specific fields in template header
4905 * to report this error.
4911 if (opcode & QL_DBG_OPCODE_RDSTATE) {
4913 * decide which address to use.
4915 if (crbEntry->state_index_a) {
4916 addr = template_hdr->saved_state_array[
4917 crbEntry-> state_index_a];
4922 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4926 template_hdr->saved_state_array[crbEntry->state_index_v]
4928 opcode &= ~QL_DBG_OPCODE_RDSTATE;
4931 if (opcode & QL_DBG_OPCODE_WRSTATE) {
4933 * decide which value to use.
4935 if (crbEntry->state_index_v) {
4936 read_value = template_hdr->saved_state_array[
4937 crbEntry->state_index_v];
4939 read_value = crbEntry->value_1;
4942 * decide which address to use.
4944 if (crbEntry->state_index_a) {
4945 addr = template_hdr->saved_state_array[
4946 crbEntry-> state_index_a];
4951 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4955 opcode &= ~QL_DBG_OPCODE_WRSTATE;
4958 if (opcode & QL_DBG_OPCODE_MDSTATE) {
4959 /* Read value from saved state using index */
4960 read_value = template_hdr->saved_state_array[
4961 crbEntry->state_index_v];
4963 read_value <<= crbEntry->shl; /*Shift left operation */
4964 read_value >>= crbEntry->shr; /*Shift right operation */
4966 if (crbEntry->value_2) {
4967 /* check if AND mask is provided */
4968 read_value &= crbEntry->value_2;
4971 read_value |= crbEntry->value_3; /* OR operation */
4972 read_value += crbEntry->value_1; /* increment op */
4974 /* Write value back to state area. */
4976 template_hdr->saved_state_array[crbEntry->state_index_v]
4978 opcode &= ~QL_DBG_OPCODE_MDSTATE;
4981 entry_addr += crbEntry->addr_stride;
4988 * Handling rd poll entry.
4992 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4993 uint32_t *data_buff)
4997 uint32_t op_count, select_addr, select_value_stride, select_value;
4998 uint32_t read_addr, poll, mask, data_size, data;
4999 uint32_t wait_count = 0;
5001 select_addr = entry->select_addr;
5002 read_addr = entry->read_addr;
5003 select_value = entry->select_value;
5004 select_value_stride = entry->select_value_stride;
5005 op_count = entry->op_count;
5008 data_size = entry->data_size;
5010 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5012 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5018 while (wait_count < poll) {
5022 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5026 if ( (temp & mask) != 0 ) {
5032 if (wait_count == poll) {
5033 device_printf(ha->pci_dev,
5034 "%s: Error in processing entry\n", __func__);
5035 device_printf(ha->pci_dev,
5036 "%s: wait_count <0x%x> poll <0x%x>\n",
5037 __func__, wait_count, poll);
5041 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5045 *data_buff++ = select_value;
5046 *data_buff++ = data;
5047 select_value = select_value + select_value_stride;
5051 * for testing purpose we return amount of data written
5053 return (loop_cnt * (2 * sizeof(uint32_t)));
5058 * Handling rd modify write poll entry.
5062 ql_pollrd_modify_write(qla_host_t *ha,
5063 ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5064 uint32_t *data_buff)
5067 uint32_t addr_1, addr_2, value_1, value_2, data;
5068 uint32_t poll, mask, data_size, modify_mask;
5069 uint32_t wait_count = 0;
5071 addr_1 = entry->addr_1;
5072 addr_2 = entry->addr_2;
5073 value_1 = entry->value_1;
5074 value_2 = entry->value_2;
5078 modify_mask = entry->modify_mask;
5079 data_size = entry->data_size;
5082 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5087 while (wait_count < poll) {
5091 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5095 if ( (temp & mask) != 0 ) {
5101 if (wait_count == poll) {
5102 device_printf(ha->pci_dev, "%s Error in processing entry\n",
5106 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5110 data = (data & modify_mask);
5112 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5116 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5122 while (wait_count < poll) {
5126 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5130 if ( (temp & mask) != 0 ) {
5135 *data_buff++ = addr_2;
5136 *data_buff++ = data;
5140 * for testing purpose we return amount of data written
5142 return (2 * sizeof(uint32_t));