2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2014 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 * Content: Contains Hardware dependent functions
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
43 #include "qls_inline.h"
51 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
52 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
53 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
54 uint32_t add_mac, uint32_t index);
56 static int qls_init_rss(qla_host_t *ha);
57 static int qls_init_comp_queue(qla_host_t *ha, int cid);
58 static int qls_init_work_queue(qla_host_t *ha, int wid);
59 static int qls_init_fw_routing_table(qla_host_t *ha);
60 static int qls_hw_add_all_mcast(qla_host_t *ha);
61 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
62 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
63 static int qls_wait_for_flash_ready(qla_host_t *ha);
65 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
66 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
68 static void qls_free_tx_dma(qla_host_t *ha);
69 static int qls_alloc_tx_dma(qla_host_t *ha);
70 static void qls_free_rx_dma(qla_host_t *ha);
71 static int qls_alloc_rx_dma(qla_host_t *ha);
72 static void qls_free_mpi_dma(qla_host_t *ha);
73 static int qls_alloc_mpi_dma(qla_host_t *ha);
74 static void qls_free_rss_dma(qla_host_t *ha);
75 static int qls_alloc_rss_dma(qla_host_t *ha);
77 static int qls_flash_validate(qla_host_t *ha, const char *signature);
80 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
81 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
82 uint32_t reg, uint32_t *data);
83 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
84 uint32_t reg, uint32_t data);
86 static int qls_hw_reset(qla_host_t *ha);
89 * MPI Related Functions
91 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
92 uint32_t *out_mbx, uint32_t o_count);
93 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
94 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
95 static void qls_mbx_get_link_status(qla_host_t *ha);
96 static void qls_mbx_about_fw(qla_host_t *ha);
99 qls_get_msix_count(qla_host_t *ha)
101 return (ha->num_rx_rings);
105 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
110 err = sysctl_handle_int(oidp, &ret, 0, req);
112 if (err || !req->newptr)
117 ha = (qla_host_t *)arg1;
118 qls_mpi_core_dump(ha);
124 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
129 err = sysctl_handle_int(oidp, &ret, 0, req);
131 if (err || !req->newptr)
136 ha = (qla_host_t *)arg1;
137 qls_mbx_get_link_status(ha);
138 qls_mbx_about_fw(ha);
144 qls_hw_add_sysctls(qla_host_t *ha)
150 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
152 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
155 ha->num_rx_rings, "Number of Completion Queues");
157 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
158 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
160 ha->num_tx_rings, "Number of Transmit Rings");
162 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
163 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
164 OID_AUTO, "mpi_dump", CTLTYPE_INT | CTLFLAG_RW,
166 qls_syctl_mpi_dump, "I", "MPI Dump");
168 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
172 qls_syctl_link_status, "I", "Link Status");
177 * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
180 qls_free_dma(qla_host_t *ha)
182 qls_free_rss_dma(ha);
183 qls_free_mpi_dma(ha);
190 * Name: qls_alloc_dma
191 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
194 qls_alloc_dma(qla_host_t *ha)
196 if (qls_alloc_rx_dma(ha))
199 if (qls_alloc_tx_dma(ha)) {
204 if (qls_alloc_mpi_dma(ha)) {
210 if (qls_alloc_rss_dma(ha)) {
211 qls_free_mpi_dma(ha);
222 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
228 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
235 ha->qla_initiate_recovery = 1;
240 * Name: qls_config_unicast_mac_addr
241 * Function: binds/unbinds a unicast MAC address to the interface.
244 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
247 uint32_t mac_upper = 0;
248 uint32_t mac_lower = 0;
249 uint32_t value = 0, index;
251 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
252 Q81_CTL_SEM_SET_MAC_SERDES)) {
253 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
258 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
259 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
260 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
262 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
264 goto qls_config_unicast_mac_addr_exit;
266 index = 128 * (ha->pci_func & 0x1); /* index */
268 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
269 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
271 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
272 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
274 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
276 goto qls_config_unicast_mac_addr_exit;
278 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
279 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
281 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
282 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
284 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
286 goto qls_config_unicast_mac_addr_exit;
288 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
289 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
291 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
293 value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
294 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
295 (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
297 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
299 qls_config_unicast_mac_addr_exit:
300 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
305 * Name: qls_config_mcast_mac_addr
306 * Function: binds/unbinds a multicast MAC address to the interface.
309 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
313 uint32_t mac_upper = 0;
314 uint32_t mac_lower = 0;
317 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
318 Q81_CTL_SEM_SET_MAC_SERDES)) {
319 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
324 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
325 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
326 (mac_addr[4] << 8) | mac_addr[5];
328 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
330 goto qls_config_mcast_mac_addr_exit;
332 value = Q81_CTL_MAC_PROTO_AI_E |
333 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
334 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
336 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
337 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
339 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
341 goto qls_config_mcast_mac_addr_exit;
343 value = Q81_CTL_MAC_PROTO_AI_E |
344 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
345 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
347 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
348 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
350 qls_config_mcast_mac_addr_exit:
351 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
357 * Name: qls_set_mac_rcv_mode
358 * Function: Enable/Disable AllMulticast and Promiscuous Modes.
361 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
367 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
374 ha->qla_initiate_recovery = 1;
379 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
383 ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
386 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
387 __func__, index, data);
388 goto qls_load_route_idx_reg_exit;
392 WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
393 WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
395 qls_load_route_idx_reg_exit:
400 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
404 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
405 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
406 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
410 ret = qls_load_route_idx_reg(ha, index, data);
412 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
418 qls_clear_routing_table(qla_host_t *ha)
422 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
423 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
424 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
428 for (i = 0; i < 16; i++) {
429 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
430 (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
435 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
441 qls_set_promisc(qla_host_t *ha)
445 ret = qls_load_route_idx_reg_locked(ha,
446 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
447 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
448 Q81_CTL_RD_VALID_PKT);
453 qls_reset_promisc(qla_host_t *ha)
457 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
458 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
463 qls_set_allmulti(qla_host_t *ha)
467 ret = qls_load_route_idx_reg_locked(ha,
468 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
469 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
475 qls_reset_allmulti(qla_host_t *ha)
479 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
480 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
486 qls_init_fw_routing_table(qla_host_t *ha)
490 ret = qls_clear_routing_table(ha);
494 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
495 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
496 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
500 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
501 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
502 Q81_CTL_RD_ERROR_PKT);
504 goto qls_init_fw_routing_table_exit;
506 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
507 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
510 goto qls_init_fw_routing_table_exit;
512 if (ha->num_rx_rings > 1 ) {
513 ret = qls_load_route_idx_reg(ha,
514 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
515 Q81_CTL_RI_TYPE_NICQMASK |
516 Q81_CTL_RI_IDX_RSS_MATCH),
517 Q81_CTL_RD_RSS_MATCH);
519 goto qls_init_fw_routing_table_exit;
522 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
523 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
524 Q81_CTL_RD_MCAST_REG_MATCH);
526 goto qls_init_fw_routing_table_exit;
528 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
529 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
532 goto qls_init_fw_routing_table_exit;
534 qls_init_fw_routing_table_exit:
535 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
540 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
542 struct ether_vlan_header *eh;
546 uint32_t ehdrlen, ip_hlen;
550 uint8_t buf[sizeof(struct ip6_hdr)];
554 eh = mtod(mp, struct ether_vlan_header *);
556 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
557 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
558 etype = ntohs(eh->evl_proto);
560 ehdrlen = ETHER_HDR_LEN;
561 etype = ntohs(eh->evl_encap_proto);
566 ip = (struct ip *)(mp->m_data + ehdrlen);
568 ip_hlen = sizeof (struct ip);
570 if (mp->m_len < (ehdrlen + ip_hlen)) {
571 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
572 ip = (struct ip *)buf;
574 tx_mac->opcode = Q81_IOCB_TX_TSO;
575 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
577 tx_mac->phdr_offsets = ehdrlen;
579 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
580 Q81_TX_TSO_PHDR_SHIFT);
584 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
585 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
587 th = (struct tcphdr *)(ip + 1);
589 th->th_sum = in_pseudo(ip->ip_src.s_addr,
592 tx_mac->mss = mp->m_pkthdr.tso_segsz;
593 tx_mac->phdr_length = ip_hlen + ehdrlen +
597 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
600 if (ip->ip_p == IPPROTO_TCP) {
601 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
602 } else if (ip->ip_p == IPPROTO_UDP) {
603 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
608 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
610 ip_hlen = sizeof(struct ip6_hdr);
612 if (mp->m_len < (ehdrlen + ip_hlen)) {
613 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
615 ip6 = (struct ip6_hdr *)buf;
618 tx_mac->opcode = Q81_IOCB_TX_TSO;
619 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
620 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
622 tx_mac->phdr_offsets = ehdrlen;
623 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
624 Q81_TX_TSO_PHDR_SHIFT);
626 if (ip6->ip6_nxt == IPPROTO_TCP) {
627 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
628 } else if (ip6->ip6_nxt == IPPROTO_UDP) {
629 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
641 #define QLA_TX_MIN_FREE 2
643 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
645 uint32_t txr_done, txr_next;
647 txr_done = ha->tx_ring[txr_idx].txr_done;
648 txr_next = ha->tx_ring[txr_idx].txr_next;
650 if (txr_done == txr_next) {
651 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
652 } else if (txr_done > txr_next) {
653 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
655 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
659 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
667 * Function: Transmits a packet. It first checks if the packet is a
668 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
669 * offload. If either of these creteria are not met, it is transmitted
670 * as a regular ethernet frame.
673 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
674 uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
676 q81_tx_mac_t *tx_mac;
677 q81_txb_desc_t *tx_desc;
678 uint32_t total_length = 0;
685 total_length = mp->m_pkthdr.len;
687 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
688 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
689 __func__, total_length);
693 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
694 if (qls_hw_tx_done(ha, txr_idx)) {
695 device_printf(dev, "%s: tx_free[%d] = %d\n",
697 ha->tx_ring[txr_idx].txr_free);
702 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
704 bzero(tx_mac, sizeof(q81_tx_mac_t));
706 if ((mp->m_pkthdr.csum_flags &
707 (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
709 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
713 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
714 ha->tx_ring[txr_idx].tx_tso_frames++;
716 ha->tx_ring[txr_idx].tx_frames++;
719 tx_mac->opcode = Q81_IOCB_TX_MAC;
722 if (mp->m_flags & M_VLANTAG) {
724 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
725 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
727 ha->tx_ring[txr_idx].tx_vlan_frames++;
730 tx_mac->frame_length = total_length;
732 tx_mac->tid_lo = txr_next;
734 if (nsegs <= MAX_TX_MAC_DESC) {
736 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
739 for (i = 0; i < nsegs; i++) {
740 tx_mac->txd[i].baddr = segs->ds_addr;
741 tx_mac->txd[i].length = segs->ds_len;
744 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
747 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
750 tx_mac->txd[0].baddr =
751 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
752 tx_mac->txd[0].length =
753 nsegs * (sizeof(q81_txb_desc_t));
754 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
756 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
758 for (i = 0; i < nsegs; i++) {
759 tx_desc->baddr = segs->ds_addr;
760 tx_desc->length = segs->ds_len;
763 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
771 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
772 ha->tx_ring[txr_idx].txr_next = txr_next;
774 ha->tx_ring[txr_idx].txr_free--;
776 Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
782 * Name: qls_del_hw_if
783 * Function: Destroys the hardware specific entities corresponding to an
787 qls_del_hw_if(qla_host_t *ha)
793 if (ha->hw_init == 0) {
798 for (i = 0; i < ha->num_tx_rings; i++) {
799 Q81_SET_WQ_INVALID(i);
801 for (i = 0; i < ha->num_rx_rings; i++) {
802 Q81_SET_CQ_INVALID(i);
805 for (i = 0; i < ha->num_rx_rings; i++) {
806 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
809 value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
810 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
812 value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
813 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
814 ha->flags.intr_enable = 0;
822 * Name: qls_init_hw_if
823 * Function: Creates the hardware specific entities corresponding to an
824 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
825 * corresponding to the interface. Enables LRO if allowed.
828 qls_init_hw_if(qla_host_t *ha)
836 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
840 ret = qls_hw_reset(ha);
842 goto qls_init_hw_if_exit;
844 ha->vm_pgsize = 4096;
846 /* Enable FAE and EFE bits in System Register */
847 value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
848 value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
850 WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
852 /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
853 value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
854 WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
856 /* Function Specific Control Register - Set Page Size and Enable NIC */
857 value = Q81_CTL_FUNC_SPECIFIC_FE |
858 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
859 Q81_CTL_FUNC_SPECIFIC_EPC_O |
860 Q81_CTL_FUNC_SPECIFIC_EPC_I |
861 Q81_CTL_FUNC_SPECIFIC_EC;
862 value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
863 Q81_CTL_FUNC_SPECIFIC_FE |
864 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
865 Q81_CTL_FUNC_SPECIFIC_EPC_O |
866 Q81_CTL_FUNC_SPECIFIC_EPC_I |
867 Q81_CTL_FUNC_SPECIFIC_EC;
869 WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
871 /* Interrupt Mask Register */
872 value = Q81_CTL_INTRM_PI;
873 value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
875 WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
877 /* Initialiatize Completion Queue */
878 for (i = 0; i < ha->num_rx_rings; i++) {
879 ret = qls_init_comp_queue(ha, i);
881 goto qls_init_hw_if_exit;
884 if (ha->num_rx_rings > 1 ) {
885 ret = qls_init_rss(ha);
887 goto qls_init_hw_if_exit;
890 /* Initialize Work Queue */
892 for (i = 0; i < ha->num_tx_rings; i++) {
893 ret = qls_init_work_queue(ha, i);
895 goto qls_init_hw_if_exit;
899 goto qls_init_hw_if_exit;
901 /* Set up CAM RAM with MAC Address */
902 ret = qls_config_unicast_mac_addr(ha, 1);
904 goto qls_init_hw_if_exit;
906 ret = qls_hw_add_all_mcast(ha);
908 goto qls_init_hw_if_exit;
910 /* Initialize Firmware Routing Table */
911 ret = qls_init_fw_routing_table(ha);
913 goto qls_init_hw_if_exit;
915 /* Get Chip Revision ID */
916 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
918 /* Enable Global Interrupt */
919 value = Q81_CTL_INTRE_EI;
920 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
922 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
924 /* Enable Interrupt Handshake Disable */
925 value = Q81_CTL_INTRE_IHD;
926 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
928 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
930 /* Enable Completion Interrupt */
932 ha->flags.intr_enable = 1;
934 for (i = 0; i < ha->num_rx_rings; i++) {
935 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
940 qls_mbx_get_link_status(ha);
942 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
943 ha->rx_ring[0].cq_db_offset));
944 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
945 ha->tx_ring[0].wq_db_offset));
947 for (i = 0; i < ha->num_rx_rings; i++) {
949 Q81_WR_CQ_CONS_IDX(i, 0);
950 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
951 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
953 QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
954 "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
955 Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
959 for (i = 0; i < ha->num_rx_rings; i++) {
964 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
969 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
976 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
978 if ((data32 & bits) == value)
983 ha->qla_initiate_recovery = 1;
984 device_printf(ha->pci_dev, "%s: failed\n", __func__);
988 static uint8_t q81_hash_key[] = {
989 0xda, 0x56, 0x5a, 0x6d,
990 0xc2, 0x0e, 0x5b, 0x25,
991 0x3d, 0x25, 0x67, 0x41,
992 0xb0, 0x8f, 0xa3, 0x43,
993 0xcb, 0x2b, 0xca, 0xd0,
994 0xb4, 0x30, 0x7b, 0xae,
995 0xa3, 0x2d, 0xcb, 0x77,
996 0x0c, 0xf2, 0x30, 0x80,
997 0x3b, 0xb7, 0x42, 0x6a,
998 0xfa, 0x01, 0xac, 0xbe };
1001 qls_init_rss(qla_host_t *ha)
1003 q81_rss_icb_t *rss_icb;
1008 rss_icb = ha->rss_dma.dma_b;
1010 bzero(rss_icb, sizeof (q81_rss_icb_t));
1012 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
1013 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1014 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1015 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1017 rss_icb->mask = 0x3FF;
1019 for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1020 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1023 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1024 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1026 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1029 goto qls_init_rss_exit;
1031 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1034 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1035 goto qls_init_rss_exit;
1038 value = (uint32_t)ha->rss_dma.dma_addr;
1039 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1041 value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1042 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1044 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1046 value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1049 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1051 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1058 qls_init_comp_queue(qla_host_t *ha, int cid)
1060 q81_cq_icb_t *cq_icb;
1065 rxr = &ha->rx_ring[cid];
1067 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1069 cq_icb = rxr->cq_icb_vaddr;
1071 bzero(cq_icb, sizeof (q81_cq_icb_t));
1073 cq_icb->msix_vector = cid;
1074 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1075 Q81_CQ_ICB_FLAGS_LI |
1076 Q81_CQ_ICB_FLAGS_LL |
1077 Q81_CQ_ICB_FLAGS_LS |
1078 Q81_CQ_ICB_FLAGS_LV;
1080 cq_icb->length_v = NUM_CQ_ENTRIES;
1082 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1083 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1085 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1086 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1088 cq_icb->pkt_idelay = 10;
1089 cq_icb->idelay = 100;
1091 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1092 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1094 cq_icb->lbq_bsize = QLA_LGB_SIZE;
1095 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1097 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1098 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1100 cq_icb->sbq_bsize = (uint16_t)ha->msize;
1101 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1105 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1108 goto qls_init_comp_queue_exit;
1110 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1113 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1114 goto qls_init_comp_queue_exit;
1117 value = (uint32_t)rxr->cq_icb_paddr;
1118 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1120 value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1121 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1123 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1125 value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1126 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1127 value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1128 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1130 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1133 rxr->lbq_next = rxr->lbq_free = 0;
1134 rxr->sbq_next = rxr->sbq_free = 0;
1135 rxr->rx_free = rxr->rx_next = 0;
1136 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1137 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1139 qls_init_comp_queue_exit:
1144 qls_init_work_queue(qla_host_t *ha, int wid)
1146 q81_wq_icb_t *wq_icb;
1151 txr = &ha->tx_ring[wid];
1153 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1154 + (ha->vm_pgsize * wid));
1156 txr->wq_db_offset = (ha->vm_pgsize * wid);
1158 wq_icb = txr->wq_icb_vaddr;
1159 bzero(wq_icb, sizeof (q81_wq_icb_t));
1161 wq_icb->length_v = NUM_TX_DESCRIPTORS |
1164 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1165 Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1167 wq_icb->wqcqid_rss = wid;
1169 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1170 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1172 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1173 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1175 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1178 goto qls_init_wq_exit;
1180 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1183 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1184 goto qls_init_wq_exit;
1187 value = (uint32_t)txr->wq_icb_paddr;
1188 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1190 value = (uint32_t)(txr->wq_icb_paddr >> 32);
1191 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1193 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1195 value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1196 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1197 value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1198 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1200 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1202 txr->txr_free = NUM_TX_DESCRIPTORS;
1211 qls_hw_add_all_mcast(qla_host_t *ha)
1215 nmcast = ha->nmcast;
1217 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1218 if ((ha->mcast[i].addr[0] != 0) ||
1219 (ha->mcast[i].addr[1] != 0) ||
1220 (ha->mcast[i].addr[2] != 0) ||
1221 (ha->mcast[i].addr[3] != 0) ||
1222 (ha->mcast[i].addr[4] != 0) ||
1223 (ha->mcast[i].addr[5] != 0)) {
1225 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1227 device_printf(ha->pci_dev, "%s: failed\n",
1239 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1243 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1245 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1246 return 0; /* its been already added */
1249 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1251 if ((ha->mcast[i].addr[0] == 0) &&
1252 (ha->mcast[i].addr[1] == 0) &&
1253 (ha->mcast[i].addr[2] == 0) &&
1254 (ha->mcast[i].addr[3] == 0) &&
1255 (ha->mcast[i].addr[4] == 0) &&
1256 (ha->mcast[i].addr[5] == 0)) {
1258 if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1261 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1271 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1275 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1276 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1278 if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1281 ha->mcast[i].addr[0] = 0;
1282 ha->mcast[i].addr[1] = 0;
1283 ha->mcast[i].addr[2] = 0;
1284 ha->mcast[i].addr[3] = 0;
1285 ha->mcast[i].addr[4] = 0;
1286 ha->mcast[i].addr[5] = 0;
1297 * Name: qls_hw_set_multi
1298 * Function: Sets the Multicast Addresses provided the host O.S into the
1299 * hardware (for the given interface)
1302 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1307 for (i = 0; i < mcnt; i++) {
1309 if (qls_hw_add_mcast(ha, mta))
1312 if (qls_hw_del_mcast(ha, mta))
1316 mta += Q8_MAC_ADDR_LEN;
1322 qls_update_link_state(qla_host_t *ha)
1324 uint32_t link_state;
1325 uint32_t prev_link_state;
1327 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1331 link_state = READ_REG32(ha, Q81_CTL_STATUS);
1333 prev_link_state = ha->link_up;
1335 if ((ha->pci_func & 0x1) == 0)
1336 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1338 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1340 if (prev_link_state != ha->link_up) {
1344 if_link_state_change(ha->ifp, LINK_STATE_UP);
1346 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1353 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1355 if (ha->tx_ring[r_idx].flags.wq_dma) {
1356 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1357 ha->tx_ring[r_idx].flags.wq_dma = 0;
1360 if (ha->tx_ring[r_idx].flags.privb_dma) {
1361 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1362 ha->tx_ring[r_idx].flags.privb_dma = 0;
1368 qls_free_tx_dma(qla_host_t *ha)
1373 for (i = 0; i < ha->num_tx_rings; i++) {
1375 qls_free_tx_ring_dma(ha, i);
1377 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1379 txb = &ha->tx_ring[i].tx_buf[j];
1382 bus_dmamap_destroy(ha->tx_tag, txb->map);
1387 if (ha->tx_tag != NULL) {
1388 bus_dma_tag_destroy(ha->tx_tag);
1396 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1402 device_t dev = ha->pci_dev;
1404 ha->tx_ring[ridx].wq_dma.alignment = 8;
1405 ha->tx_ring[ridx].wq_dma.size =
1406 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1408 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1411 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1412 goto qls_alloc_tx_ring_dma_exit;
1414 ha->tx_ring[ridx].flags.wq_dma = 1;
1416 ha->tx_ring[ridx].privb_dma.alignment = 8;
1417 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1419 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1422 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1423 goto qls_alloc_tx_ring_dma_exit;
1426 ha->tx_ring[ridx].flags.privb_dma = 1;
1428 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1429 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1431 v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1432 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1434 ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1435 ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1437 ha->tx_ring[ridx].txr_cons_vaddr =
1438 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1439 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1441 v_addr = v_addr + (PAGE_SIZE >> 1);
1442 p_addr = p_addr + (PAGE_SIZE >> 1);
1444 txb = ha->tx_ring[ridx].tx_buf;
1446 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1448 txb[i].oal_vaddr = v_addr;
1449 txb[i].oal_paddr = p_addr;
1451 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1452 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1455 qls_alloc_tx_ring_dma_exit:
1460 qls_alloc_tx_dma(qla_host_t *ha)
1466 if (bus_dma_tag_create(NULL, /* parent */
1467 1, 0, /* alignment, bounds */
1468 BUS_SPACE_MAXADDR, /* lowaddr */
1469 BUS_SPACE_MAXADDR, /* highaddr */
1470 NULL, NULL, /* filter, filterarg */
1471 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1472 QLA_MAX_SEGMENTS, /* nsegments */
1473 PAGE_SIZE, /* maxsegsize */
1474 BUS_DMA_ALLOCNOW, /* flags */
1475 NULL, /* lockfunc */
1476 NULL, /* lockfuncarg */
1478 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1483 for (i = 0; i < ha->num_tx_rings; i++) {
1485 ret = qls_alloc_tx_ring_dma(ha, i);
1488 qls_free_tx_dma(ha);
1492 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1494 txb = &ha->tx_ring[i].tx_buf[j];
1496 ret = bus_dmamap_create(ha->tx_tag,
1497 BUS_DMA_NOWAIT, &txb->map);
1499 ha->err_tx_dmamap_create++;
1500 device_printf(ha->pci_dev,
1501 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1502 __func__, ret, i, j);
1504 qls_free_tx_dma(ha);
1515 qls_free_rss_dma(qla_host_t *ha)
1517 qls_free_dmabuf(ha, &ha->rss_dma);
1518 ha->flags.rss_dma = 0;
1522 qls_alloc_rss_dma(qla_host_t *ha)
1526 ha->rss_dma.alignment = 4;
1527 ha->rss_dma.size = PAGE_SIZE;
1529 ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1532 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1534 ha->flags.rss_dma = 1;
1540 qls_free_mpi_dma(qla_host_t *ha)
1542 qls_free_dmabuf(ha, &ha->mpi_dma);
1543 ha->flags.mpi_dma = 0;
1547 qls_alloc_mpi_dma(qla_host_t *ha)
1551 ha->mpi_dma.alignment = 4;
1552 ha->mpi_dma.size = (0x4000 * 4);
1554 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1556 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1558 ha->flags.mpi_dma = 1;
1564 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1566 if (ha->rx_ring[ridx].flags.cq_dma) {
1567 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1568 ha->rx_ring[ridx].flags.cq_dma = 0;
1571 if (ha->rx_ring[ridx].flags.lbq_dma) {
1572 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1573 ha->rx_ring[ridx].flags.lbq_dma = 0;
1576 if (ha->rx_ring[ridx].flags.sbq_dma) {
1577 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1578 ha->rx_ring[ridx].flags.sbq_dma = 0;
1581 if (ha->rx_ring[ridx].flags.lb_dma) {
1582 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1583 ha->rx_ring[ridx].flags.lb_dma = 0;
1589 qls_free_rx_dma(qla_host_t *ha)
1593 for (i = 0; i < ha->num_rx_rings; i++) {
1594 qls_free_rx_ring_dma(ha, i);
1597 if (ha->rx_tag != NULL) {
1598 bus_dma_tag_destroy(ha->rx_tag);
1606 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1611 volatile q81_bq_addr_e_t *bq_e;
1612 device_t dev = ha->pci_dev;
1614 ha->rx_ring[ridx].cq_dma.alignment = 128;
1615 ha->rx_ring[ridx].cq_dma.size =
1616 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1618 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1621 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1622 goto qls_alloc_rx_ring_dma_exit;
1624 ha->rx_ring[ridx].flags.cq_dma = 1;
1626 ha->rx_ring[ridx].lbq_dma.alignment = 8;
1627 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1629 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1632 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1633 goto qls_alloc_rx_ring_dma_exit;
1635 ha->rx_ring[ridx].flags.lbq_dma = 1;
1637 ha->rx_ring[ridx].sbq_dma.alignment = 8;
1638 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1640 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1643 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1644 goto qls_alloc_rx_ring_dma_exit;
1646 ha->rx_ring[ridx].flags.sbq_dma = 1;
1648 ha->rx_ring[ridx].lb_dma.alignment = 8;
1649 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1651 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1653 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1654 goto qls_alloc_rx_ring_dma_exit;
1656 ha->rx_ring[ridx].flags.lb_dma = 1;
1658 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1659 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1660 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1661 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1663 /* completion queue */
1664 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1665 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1667 v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1668 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1670 v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1671 p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1673 /* completion queue icb */
1674 ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1675 ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1677 v_addr = v_addr + (PAGE_SIZE >> 2);
1678 p_addr = p_addr + (PAGE_SIZE >> 2);
1680 /* completion queue index register */
1681 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1682 ha->rx_ring[ridx].cqi_paddr = p_addr;
1684 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1685 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1687 /* large buffer queue address table */
1688 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1689 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1691 /* large buffer queue */
1692 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1693 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1695 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1696 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1698 /* small buffer queue address table */
1699 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1700 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1702 /* small buffer queue */
1703 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1704 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1706 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1707 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1709 /* Initialize Large Buffer Queue Table */
1711 p_addr = ha->rx_ring[ridx].lbq_paddr;
1712 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1714 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1715 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1717 p_addr = ha->rx_ring[ridx].lb_paddr;
1718 bq_e = ha->rx_ring[ridx].lbq_vaddr;
1720 for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1721 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1722 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1724 p_addr = p_addr + QLA_LGB_SIZE;
1728 /* Initialize Small Buffer Queue Table */
1730 p_addr = ha->rx_ring[ridx].sbq_paddr;
1731 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1733 for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1734 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1735 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1737 p_addr = p_addr + QLA_PAGE_SIZE;
1741 qls_alloc_rx_ring_dma_exit:
1746 qls_alloc_rx_dma(qla_host_t *ha)
1751 if (bus_dma_tag_create(NULL, /* parent */
1752 1, 0, /* alignment, bounds */
1753 BUS_SPACE_MAXADDR, /* lowaddr */
1754 BUS_SPACE_MAXADDR, /* highaddr */
1755 NULL, NULL, /* filter, filterarg */
1756 MJUM9BYTES, /* maxsize */
1758 MJUM9BYTES, /* maxsegsize */
1759 BUS_DMA_ALLOCNOW, /* flags */
1760 NULL, /* lockfunc */
1761 NULL, /* lockfuncarg */
1764 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1770 for (i = 0; i < ha->num_rx_rings; i++) {
1771 ret = qls_alloc_rx_ring_dma(ha, i);
1774 qls_free_rx_dma(ha);
1783 qls_wait_for_flash_ready(qla_host_t *ha)
1790 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1792 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1793 goto qls_wait_for_flash_ready_exit;
1795 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1798 QLA_USEC_DELAY(100);
1801 qls_wait_for_flash_ready_exit:
1802 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1808 * Name: qls_rd_flash32
1809 * Function: Read Flash Memory
1812 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1816 ret = qls_wait_for_flash_ready(ha);
1821 WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1823 ret = qls_wait_for_flash_ready(ha);
1828 *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1834 qls_flash_validate(qla_host_t *ha, const char *signature)
1836 uint16_t csum16 = 0;
1840 if (bcmp(ha->flash.id, signature, 4)) {
1841 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1842 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1843 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1848 data16 = (uint16_t *)&ha->flash;
1850 for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1851 csum16 += *data16++;
1855 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1862 qls_rd_nic_params(qla_host_t *ha)
1868 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1869 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1873 if ((ha->pci_func & 0x1) == 0)
1874 faddr = Q81_F0_FLASH_OFFSET >> 2;
1876 faddr = Q81_F1_FLASH_OFFSET >> 2;
1878 qflash = (uint32_t *)&ha->flash;
1880 for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1882 ret = qls_rd_flash32(ha, faddr, qflash);
1885 goto qls_rd_flash_data_exit;
1891 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1893 ret = qls_flash_validate(ha, Q81_FLASH_ID);
1896 goto qls_rd_flash_data_exit;
1898 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1900 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1901 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
1902 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
1904 qls_rd_flash_data_exit:
1906 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1912 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1914 uint32_t count = 30;
1918 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1920 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1925 QLA_USEC_DELAY(100);
1928 ha->qla_initiate_recovery = 1;
1933 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1935 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1939 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1946 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1948 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1949 goto qls_wait_for_proc_addr_ready_exit;
1951 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1954 QLA_USEC_DELAY(100);
1957 qls_wait_for_proc_addr_ready_exit:
1958 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1960 ha->qla_initiate_recovery = 1;
1965 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1971 ret = qls_wait_for_proc_addr_ready(ha);
1974 goto qls_proc_addr_rd_reg_exit;
1976 value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1978 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1980 ret = qls_wait_for_proc_addr_ready(ha);
1983 goto qls_proc_addr_rd_reg_exit;
1985 *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1987 qls_proc_addr_rd_reg_exit:
1992 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1998 ret = qls_wait_for_proc_addr_ready(ha);
2001 goto qls_proc_addr_wr_reg_exit;
2003 WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
2005 value = addr_module | reg;
2007 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
2009 ret = qls_wait_for_proc_addr_ready(ha);
2011 qls_proc_addr_wr_reg_exit:
2016 qls_hw_nic_reset(qla_host_t *ha)
2020 device_t dev = ha->pci_dev;
2024 data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
2026 WRITE_REG32(ha, Q81_CTL_RESET, data);
2030 data = READ_REG32(ha, Q81_CTL_RESET);
2031 if ((data & Q81_CTL_RESET_FUNC) == 0)
2036 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2044 qls_hw_reset(qla_host_t *ha)
2046 device_t dev = ha->pci_dev;
2051 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2053 if (ha->hw_init == 0) {
2054 ret = qls_hw_nic_reset(ha);
2055 goto qls_hw_reset_exit;
2058 ret = qls_clear_routing_table(ha);
2060 goto qls_hw_reset_exit;
2062 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2064 goto qls_hw_reset_exit;
2067 * Wait for FIFO to empty
2071 data = READ_REG32(ha, Q81_CTL_STATUS);
2072 if (data & Q81_CTL_STATUS_NFE)
2074 qls_mdelay(__func__, 100);
2077 device_printf(dev, "%s: NFE bit not set\n", __func__);
2078 goto qls_hw_reset_exit;
2083 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2085 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2086 (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2088 qls_mdelay(__func__, 100);
2091 goto qls_hw_reset_exit;
2094 * Reset the NIC function
2096 ret = qls_hw_nic_reset(ha);
2098 goto qls_hw_reset_exit;
2100 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2104 device_printf(dev, "%s: failed\n", __func__);
2110 * MPI Related Functions
2113 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2117 ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2123 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2127 ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2133 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2137 if ((ha->pci_func & 0x1) == 0)
2138 reg += Q81_FUNC0_MBX_OUT_REG0;
2140 reg += Q81_FUNC1_MBX_OUT_REG0;
2142 ret = qls_mpi_risc_rd_reg(ha, reg, data);
2148 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2152 if ((ha->pci_func & 0x1) == 0)
2153 reg += Q81_FUNC0_MBX_IN_REG0;
2155 reg += Q81_FUNC1_MBX_IN_REG0;
2157 ret = qls_mpi_risc_wr_reg(ha, reg, data);
2164 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2165 uint32_t *out_mbx, uint32_t o_count)
2168 uint32_t data32, mbx_cmd = 0;
2169 uint32_t count = 50;
2171 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2172 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2174 data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2176 if (data32 & Q81_CTL_HCS_HTR_INTR) {
2177 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2179 goto qls_mbx_cmd_exit;
2182 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2183 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2184 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2185 goto qls_mbx_cmd_exit;
2192 for (i = 0; i < i_count; i++) {
2194 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2197 device_printf(ha->pci_dev,
2198 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2200 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2201 goto qls_mbx_cmd_exit;
2206 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2208 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2215 if (ha->flags.intr_enable == 0) {
2216 data32 = READ_REG32(ha, Q81_CTL_STATUS);
2218 if (!(data32 & Q81_CTL_STATUS_PI)) {
2219 qls_mdelay(__func__, 100);
2223 ret = qls_mbx_rd_reg(ha, 0, &data32);
2226 if ((data32 & 0xF000) == 0x4000) {
2228 out_mbx[0] = data32;
2230 for (i = 1; i < o_count; i++) {
2231 ret = qls_mbx_rd_reg(ha, i,
2241 out_mbx[i] = data32;
2244 } else if ((data32 & 0xF000) == 0x8000) {
2247 Q81_CTL_HOST_CMD_STATUS,\
2248 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2253 for (i = 1; i < o_count; i++) {
2254 out_mbx[i] = ha->mbox[i];
2260 qls_mdelay(__func__, 1000);
2265 if (ha->flags.intr_enable == 0) {
2266 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2267 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2271 ha->qla_initiate_recovery = 1;
2274 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2279 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2282 device_t dev = ha->pci_dev;
2285 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2287 mbox[0] = Q81_MBX_SET_MGMT_CTL;
2290 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2291 device_printf(dev, "%s failed\n", __func__);
2295 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2296 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2297 (mbox[0] == Q81_MBX_CMD_ERROR))){
2300 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2306 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2309 device_t dev = ha->pci_dev;
2314 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2316 mbox[0] = Q81_MBX_GET_MGMT_CTL;
2318 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2319 device_printf(dev, "%s failed\n", __func__);
2323 *t_status = mbox[1];
2329 qls_mbx_get_link_status(qla_host_t *ha)
2332 device_t dev = ha->pci_dev;
2335 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2337 mbox[0] = Q81_MBX_GET_LNK_STATUS;
2339 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2340 device_printf(dev, "%s failed\n", __func__);
2344 ha->link_status = mbox[1];
2345 ha->link_down_info = mbox[2];
2346 ha->link_hw_info = mbox[3];
2347 ha->link_dcbx_counters = mbox[4];
2348 ha->link_change_counters = mbox[5];
2350 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2351 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2357 qls_mbx_about_fw(qla_host_t *ha)
2360 device_t dev = ha->pci_dev;
2363 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2365 mbox[0] = Q81_MBX_ABOUT_FW;
2367 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2368 device_printf(dev, "%s failed\n", __func__);
2372 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2373 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2377 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2382 device_t dev = ha->pci_dev;
2385 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2387 bzero(ha->mpi_dma.dma_b,(r_size << 2));
2388 b_paddr = ha->mpi_dma.dma_addr;
2390 mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2391 mbox[1] = r_addr & 0xFFFF;
2392 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2393 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2394 mbox[4] = (r_size >> 16) & 0xFFFF;
2395 mbox[5] = r_size & 0xFFFF;
2396 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2397 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2398 mbox[8] = (r_addr >> 16) & 0xFFFF;
2400 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2401 BUS_DMASYNC_PREREAD);
2403 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2404 device_printf(dev, "%s failed\n", __func__);
2407 if (mbox[0] != 0x4000) {
2408 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2411 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2412 BUS_DMASYNC_POSTREAD);
2413 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2420 qls_mpi_reset(qla_host_t *ha)
2424 device_t dev = ha->pci_dev;
2426 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2427 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2431 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2432 if (data & Q81_CTL_HCS_RISC_RESET) {
2433 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2434 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2437 qls_mdelay(__func__, 10);
2440 device_printf(dev, "%s: failed\n", __func__);