2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2014 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 * Content: Contains Hardware dependent functions
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
41 #include "qls_inline.h"
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52 uint32_t add_mac, uint32_t index);
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
77 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
78 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
79 uint32_t reg, uint32_t *data);
80 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
81 uint32_t reg, uint32_t data);
83 static int qls_hw_reset(qla_host_t *ha);
86 * MPI Related Functions
88 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
89 uint32_t *out_mbx, uint32_t o_count);
90 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
91 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
92 static void qls_mbx_get_link_status(qla_host_t *ha);
93 static void qls_mbx_about_fw(qla_host_t *ha);
96 qls_get_msix_count(qla_host_t *ha)
98 return (ha->num_rx_rings);
102 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
107 err = sysctl_handle_int(oidp, &ret, 0, req);
109 if (err || !req->newptr)
113 ha = (qla_host_t *)arg1;
114 qls_mpi_core_dump(ha);
120 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
125 err = sysctl_handle_int(oidp, &ret, 0, req);
127 if (err || !req->newptr)
131 ha = (qla_host_t *)arg1;
132 qls_mbx_get_link_status(ha);
133 qls_mbx_about_fw(ha);
139 qls_hw_add_sysctls(qla_host_t *ha)
145 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
147 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
148 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
149 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
150 ha->num_rx_rings, "Number of Completion Queues");
152 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
155 ha->num_tx_rings, "Number of Transmit Rings");
157 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
158 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159 OID_AUTO, "mpi_dump",
160 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
161 qls_syctl_mpi_dump, "I", "MPI Dump");
163 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
164 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165 OID_AUTO, "link_status",
166 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
167 qls_syctl_link_status, "I", "Link Status");
172 * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
175 qls_free_dma(qla_host_t *ha)
177 qls_free_rss_dma(ha);
178 qls_free_mpi_dma(ha);
185 * Name: qls_alloc_dma
186 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
189 qls_alloc_dma(qla_host_t *ha)
191 if (qls_alloc_rx_dma(ha))
194 if (qls_alloc_tx_dma(ha)) {
199 if (qls_alloc_mpi_dma(ha)) {
205 if (qls_alloc_rss_dma(ha)) {
206 qls_free_mpi_dma(ha);
216 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
222 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
229 ha->qla_initiate_recovery = 1;
234 * Name: qls_config_unicast_mac_addr
235 * Function: binds/unbinds a unicast MAC address to the interface.
238 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
241 uint32_t mac_upper = 0;
242 uint32_t mac_lower = 0;
243 uint32_t value = 0, index;
245 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
246 Q81_CTL_SEM_SET_MAC_SERDES)) {
247 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
252 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
253 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
254 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
256 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
258 goto qls_config_unicast_mac_addr_exit;
260 index = 128 * (ha->pci_func & 0x1); /* index */
262 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
263 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
265 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
266 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
268 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
270 goto qls_config_unicast_mac_addr_exit;
272 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
273 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
275 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
276 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
278 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
280 goto qls_config_unicast_mac_addr_exit;
282 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
283 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
285 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
287 value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
288 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
289 (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
291 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
293 qls_config_unicast_mac_addr_exit:
294 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
299 * Name: qls_config_mcast_mac_addr
300 * Function: binds/unbinds a multicast MAC address to the interface.
303 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
307 uint32_t mac_upper = 0;
308 uint32_t mac_lower = 0;
311 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
312 Q81_CTL_SEM_SET_MAC_SERDES)) {
313 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
318 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
319 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
320 (mac_addr[4] << 8) | mac_addr[5];
322 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
324 goto qls_config_mcast_mac_addr_exit;
326 value = Q81_CTL_MAC_PROTO_AI_E |
327 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
328 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
330 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
331 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
333 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
335 goto qls_config_mcast_mac_addr_exit;
337 value = Q81_CTL_MAC_PROTO_AI_E |
338 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
339 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
341 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
342 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
344 qls_config_mcast_mac_addr_exit:
345 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
351 * Name: qls_set_mac_rcv_mode
352 * Function: Enable/Disable AllMulticast and Promiscuous Modes.
355 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
361 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
368 ha->qla_initiate_recovery = 1;
373 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
377 ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
380 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
381 __func__, index, data);
382 goto qls_load_route_idx_reg_exit;
385 WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
386 WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
388 qls_load_route_idx_reg_exit:
393 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
397 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
398 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
399 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
403 ret = qls_load_route_idx_reg(ha, index, data);
405 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
411 qls_clear_routing_table(qla_host_t *ha)
415 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
416 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
417 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
421 for (i = 0; i < 16; i++) {
422 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
423 (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
428 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
434 qls_set_promisc(qla_host_t *ha)
438 ret = qls_load_route_idx_reg_locked(ha,
439 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
440 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
441 Q81_CTL_RD_VALID_PKT);
446 qls_reset_promisc(qla_host_t *ha)
450 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
451 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
456 qls_set_allmulti(qla_host_t *ha)
460 ret = qls_load_route_idx_reg_locked(ha,
461 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
462 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
468 qls_reset_allmulti(qla_host_t *ha)
472 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
473 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
478 qls_init_fw_routing_table(qla_host_t *ha)
482 ret = qls_clear_routing_table(ha);
486 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
487 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
488 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
492 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
493 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
494 Q81_CTL_RD_ERROR_PKT);
496 goto qls_init_fw_routing_table_exit;
498 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
499 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
502 goto qls_init_fw_routing_table_exit;
504 if (ha->num_rx_rings > 1 ) {
505 ret = qls_load_route_idx_reg(ha,
506 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
507 Q81_CTL_RI_TYPE_NICQMASK |
508 Q81_CTL_RI_IDX_RSS_MATCH),
509 Q81_CTL_RD_RSS_MATCH);
511 goto qls_init_fw_routing_table_exit;
514 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
515 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
516 Q81_CTL_RD_MCAST_REG_MATCH);
518 goto qls_init_fw_routing_table_exit;
520 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
521 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
524 goto qls_init_fw_routing_table_exit;
526 qls_init_fw_routing_table_exit:
527 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
532 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
534 struct ether_vlan_header *eh;
538 uint32_t ehdrlen, ip_hlen;
542 uint8_t buf[sizeof(struct ip6_hdr)];
546 eh = mtod(mp, struct ether_vlan_header *);
548 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
549 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
550 etype = ntohs(eh->evl_proto);
552 ehdrlen = ETHER_HDR_LEN;
553 etype = ntohs(eh->evl_encap_proto);
558 ip = (struct ip *)(mp->m_data + ehdrlen);
560 ip_hlen = sizeof (struct ip);
562 if (mp->m_len < (ehdrlen + ip_hlen)) {
563 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
564 ip = (struct ip *)buf;
566 tx_mac->opcode = Q81_IOCB_TX_TSO;
567 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
569 tx_mac->phdr_offsets = ehdrlen;
571 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
572 Q81_TX_TSO_PHDR_SHIFT);
576 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
577 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
579 th = (struct tcphdr *)(ip + 1);
581 th->th_sum = in_pseudo(ip->ip_src.s_addr,
584 tx_mac->mss = mp->m_pkthdr.tso_segsz;
585 tx_mac->phdr_length = ip_hlen + ehdrlen +
589 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
591 if (ip->ip_p == IPPROTO_TCP) {
592 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
593 } else if (ip->ip_p == IPPROTO_UDP) {
594 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
599 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
601 ip_hlen = sizeof(struct ip6_hdr);
603 if (mp->m_len < (ehdrlen + ip_hlen)) {
604 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
606 ip6 = (struct ip6_hdr *)buf;
609 tx_mac->opcode = Q81_IOCB_TX_TSO;
610 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
611 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
613 tx_mac->phdr_offsets = ehdrlen;
614 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
615 Q81_TX_TSO_PHDR_SHIFT);
617 if (ip6->ip6_nxt == IPPROTO_TCP) {
618 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
619 } else if (ip6->ip6_nxt == IPPROTO_UDP) {
620 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
632 #define QLA_TX_MIN_FREE 2
634 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
636 uint32_t txr_done, txr_next;
638 txr_done = ha->tx_ring[txr_idx].txr_done;
639 txr_next = ha->tx_ring[txr_idx].txr_next;
641 if (txr_done == txr_next) {
642 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
643 } else if (txr_done > txr_next) {
644 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
646 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
650 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
658 * Function: Transmits a packet. It first checks if the packet is a
659 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
660 * offload. If either of these creteria are not met, it is transmitted
661 * as a regular ethernet frame.
664 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
665 uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
667 q81_tx_mac_t *tx_mac;
668 q81_txb_desc_t *tx_desc;
669 uint32_t total_length = 0;
676 total_length = mp->m_pkthdr.len;
678 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
679 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
680 __func__, total_length);
684 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
685 if (qls_hw_tx_done(ha, txr_idx)) {
686 device_printf(dev, "%s: tx_free[%d] = %d\n",
688 ha->tx_ring[txr_idx].txr_free);
693 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
695 bzero(tx_mac, sizeof(q81_tx_mac_t));
697 if ((mp->m_pkthdr.csum_flags &
698 (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
699 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
703 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
704 ha->tx_ring[txr_idx].tx_tso_frames++;
706 ha->tx_ring[txr_idx].tx_frames++;
709 tx_mac->opcode = Q81_IOCB_TX_MAC;
712 if (mp->m_flags & M_VLANTAG) {
713 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
714 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
716 ha->tx_ring[txr_idx].tx_vlan_frames++;
719 tx_mac->frame_length = total_length;
721 tx_mac->tid_lo = txr_next;
723 if (nsegs <= MAX_TX_MAC_DESC) {
724 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
727 for (i = 0; i < nsegs; i++) {
728 tx_mac->txd[i].baddr = segs->ds_addr;
729 tx_mac->txd[i].length = segs->ds_len;
732 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
735 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
738 tx_mac->txd[0].baddr =
739 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
740 tx_mac->txd[0].length =
741 nsegs * (sizeof(q81_txb_desc_t));
742 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
744 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
746 for (i = 0; i < nsegs; i++) {
747 tx_desc->baddr = segs->ds_addr;
748 tx_desc->length = segs->ds_len;
751 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
759 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
760 ha->tx_ring[txr_idx].txr_next = txr_next;
762 ha->tx_ring[txr_idx].txr_free--;
764 Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
770 * Name: qls_del_hw_if
771 * Function: Destroys the hardware specific entities corresponding to an
775 qls_del_hw_if(qla_host_t *ha)
781 if (ha->hw_init == 0) {
786 for (i = 0; i < ha->num_tx_rings; i++) {
787 Q81_SET_WQ_INVALID(i);
789 for (i = 0; i < ha->num_rx_rings; i++) {
790 Q81_SET_CQ_INVALID(i);
793 for (i = 0; i < ha->num_rx_rings; i++) {
794 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
797 value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
798 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
800 value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
801 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
802 ha->flags.intr_enable = 0;
810 * Name: qls_init_hw_if
811 * Function: Creates the hardware specific entities corresponding to an
812 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
813 * corresponding to the interface. Enables LRO if allowed.
816 qls_init_hw_if(qla_host_t *ha)
823 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
827 ret = qls_hw_reset(ha);
829 goto qls_init_hw_if_exit;
831 ha->vm_pgsize = 4096;
833 /* Enable FAE and EFE bits in System Register */
834 value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
835 value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
837 WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
839 /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
840 value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
841 WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
843 /* Function Specific Control Register - Set Page Size and Enable NIC */
844 value = Q81_CTL_FUNC_SPECIFIC_FE |
845 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
846 Q81_CTL_FUNC_SPECIFIC_EPC_O |
847 Q81_CTL_FUNC_SPECIFIC_EPC_I |
848 Q81_CTL_FUNC_SPECIFIC_EC;
849 value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
850 Q81_CTL_FUNC_SPECIFIC_FE |
851 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
852 Q81_CTL_FUNC_SPECIFIC_EPC_O |
853 Q81_CTL_FUNC_SPECIFIC_EPC_I |
854 Q81_CTL_FUNC_SPECIFIC_EC;
856 WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
858 /* Interrupt Mask Register */
859 value = Q81_CTL_INTRM_PI;
860 value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
862 WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
864 /* Initialiatize Completion Queue */
865 for (i = 0; i < ha->num_rx_rings; i++) {
866 ret = qls_init_comp_queue(ha, i);
868 goto qls_init_hw_if_exit;
871 if (ha->num_rx_rings > 1 ) {
872 ret = qls_init_rss(ha);
874 goto qls_init_hw_if_exit;
877 /* Initialize Work Queue */
879 for (i = 0; i < ha->num_tx_rings; i++) {
880 ret = qls_init_work_queue(ha, i);
882 goto qls_init_hw_if_exit;
886 goto qls_init_hw_if_exit;
888 /* Set up CAM RAM with MAC Address */
889 ret = qls_config_unicast_mac_addr(ha, 1);
891 goto qls_init_hw_if_exit;
893 ret = qls_hw_add_all_mcast(ha);
895 goto qls_init_hw_if_exit;
897 /* Initialize Firmware Routing Table */
898 ret = qls_init_fw_routing_table(ha);
900 goto qls_init_hw_if_exit;
902 /* Get Chip Revision ID */
903 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
905 /* Enable Global Interrupt */
906 value = Q81_CTL_INTRE_EI;
907 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
909 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
911 /* Enable Interrupt Handshake Disable */
912 value = Q81_CTL_INTRE_IHD;
913 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
915 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
917 /* Enable Completion Interrupt */
919 ha->flags.intr_enable = 1;
921 for (i = 0; i < ha->num_rx_rings; i++) {
922 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
927 qls_mbx_get_link_status(ha);
929 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
930 ha->rx_ring[0].cq_db_offset));
931 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
932 ha->tx_ring[0].wq_db_offset));
934 for (i = 0; i < ha->num_rx_rings; i++) {
935 Q81_WR_CQ_CONS_IDX(i, 0);
936 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
937 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
939 QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
940 "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
941 Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
945 for (i = 0; i < ha->num_rx_rings; i++) {
950 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
955 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
961 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
963 if ((data32 & bits) == value)
968 ha->qla_initiate_recovery = 1;
969 device_printf(ha->pci_dev, "%s: failed\n", __func__);
973 static uint8_t q81_hash_key[] = {
974 0xda, 0x56, 0x5a, 0x6d,
975 0xc2, 0x0e, 0x5b, 0x25,
976 0x3d, 0x25, 0x67, 0x41,
977 0xb0, 0x8f, 0xa3, 0x43,
978 0xcb, 0x2b, 0xca, 0xd0,
979 0xb4, 0x30, 0x7b, 0xae,
980 0xa3, 0x2d, 0xcb, 0x77,
981 0x0c, 0xf2, 0x30, 0x80,
982 0x3b, 0xb7, 0x42, 0x6a,
983 0xfa, 0x01, 0xac, 0xbe };
986 qls_init_rss(qla_host_t *ha)
988 q81_rss_icb_t *rss_icb;
993 rss_icb = ha->rss_dma.dma_b;
995 bzero(rss_icb, sizeof (q81_rss_icb_t));
997 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
998 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
999 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1000 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1002 rss_icb->mask = 0x3FF;
1004 for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1005 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1008 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1009 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1011 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1014 goto qls_init_rss_exit;
1016 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1019 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1020 goto qls_init_rss_exit;
1023 value = (uint32_t)ha->rss_dma.dma_addr;
1024 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1026 value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1027 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1029 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1031 value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1034 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1036 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1043 qls_init_comp_queue(qla_host_t *ha, int cid)
1045 q81_cq_icb_t *cq_icb;
1050 rxr = &ha->rx_ring[cid];
1052 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1054 cq_icb = rxr->cq_icb_vaddr;
1056 bzero(cq_icb, sizeof (q81_cq_icb_t));
1058 cq_icb->msix_vector = cid;
1059 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1060 Q81_CQ_ICB_FLAGS_LI |
1061 Q81_CQ_ICB_FLAGS_LL |
1062 Q81_CQ_ICB_FLAGS_LS |
1063 Q81_CQ_ICB_FLAGS_LV;
1065 cq_icb->length_v = NUM_CQ_ENTRIES;
1067 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1068 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1070 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1071 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1073 cq_icb->pkt_idelay = 10;
1074 cq_icb->idelay = 100;
1076 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1077 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1079 cq_icb->lbq_bsize = QLA_LGB_SIZE;
1080 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1082 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1083 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1085 cq_icb->sbq_bsize = (uint16_t)ha->msize;
1086 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1090 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1093 goto qls_init_comp_queue_exit;
1095 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1098 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1099 goto qls_init_comp_queue_exit;
1102 value = (uint32_t)rxr->cq_icb_paddr;
1103 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1105 value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1106 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1108 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1110 value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1111 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1112 value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1113 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1115 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1118 rxr->lbq_next = rxr->lbq_free = 0;
1119 rxr->sbq_next = rxr->sbq_free = 0;
1120 rxr->rx_free = rxr->rx_next = 0;
1121 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1122 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1124 qls_init_comp_queue_exit:
1129 qls_init_work_queue(qla_host_t *ha, int wid)
1131 q81_wq_icb_t *wq_icb;
1136 txr = &ha->tx_ring[wid];
1138 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1139 + (ha->vm_pgsize * wid));
1141 txr->wq_db_offset = (ha->vm_pgsize * wid);
1143 wq_icb = txr->wq_icb_vaddr;
1144 bzero(wq_icb, sizeof (q81_wq_icb_t));
1146 wq_icb->length_v = NUM_TX_DESCRIPTORS |
1149 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1150 Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1152 wq_icb->wqcqid_rss = wid;
1154 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1155 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1157 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1158 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1160 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1163 goto qls_init_wq_exit;
1165 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1168 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1169 goto qls_init_wq_exit;
1172 value = (uint32_t)txr->wq_icb_paddr;
1173 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1175 value = (uint32_t)(txr->wq_icb_paddr >> 32);
1176 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1178 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1180 value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1181 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1182 value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1183 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1185 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1187 txr->txr_free = NUM_TX_DESCRIPTORS;
1196 qls_hw_add_all_mcast(qla_host_t *ha)
1200 nmcast = ha->nmcast;
1202 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1203 if ((ha->mcast[i].addr[0] != 0) ||
1204 (ha->mcast[i].addr[1] != 0) ||
1205 (ha->mcast[i].addr[2] != 0) ||
1206 (ha->mcast[i].addr[3] != 0) ||
1207 (ha->mcast[i].addr[4] != 0) ||
1208 (ha->mcast[i].addr[5] != 0)) {
1209 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1211 device_printf(ha->pci_dev, "%s: failed\n",
1223 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1227 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1228 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1229 return 0; /* its been already added */
1232 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1233 if ((ha->mcast[i].addr[0] == 0) &&
1234 (ha->mcast[i].addr[1] == 0) &&
1235 (ha->mcast[i].addr[2] == 0) &&
1236 (ha->mcast[i].addr[3] == 0) &&
1237 (ha->mcast[i].addr[4] == 0) &&
1238 (ha->mcast[i].addr[5] == 0)) {
1239 if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1242 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1252 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1256 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1257 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1258 if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1261 ha->mcast[i].addr[0] = 0;
1262 ha->mcast[i].addr[1] = 0;
1263 ha->mcast[i].addr[2] = 0;
1264 ha->mcast[i].addr[3] = 0;
1265 ha->mcast[i].addr[4] = 0;
1266 ha->mcast[i].addr[5] = 0;
1277 * Name: qls_hw_set_multi
1278 * Function: Sets the Multicast Addresses provided the host O.S into the
1279 * hardware (for the given interface)
1282 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1287 for (i = 0; i < mcnt; i++) {
1289 if (qls_hw_add_mcast(ha, mta))
1292 if (qls_hw_del_mcast(ha, mta))
1296 mta += Q8_MAC_ADDR_LEN;
1302 qls_update_link_state(qla_host_t *ha)
1304 uint32_t link_state;
1305 uint32_t prev_link_state;
1307 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1311 link_state = READ_REG32(ha, Q81_CTL_STATUS);
1313 prev_link_state = ha->link_up;
1315 if ((ha->pci_func & 0x1) == 0)
1316 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1318 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1320 if (prev_link_state != ha->link_up) {
1322 if_link_state_change(ha->ifp, LINK_STATE_UP);
1324 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1331 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1333 if (ha->tx_ring[r_idx].flags.wq_dma) {
1334 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1335 ha->tx_ring[r_idx].flags.wq_dma = 0;
1338 if (ha->tx_ring[r_idx].flags.privb_dma) {
1339 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1340 ha->tx_ring[r_idx].flags.privb_dma = 0;
1346 qls_free_tx_dma(qla_host_t *ha)
1351 for (i = 0; i < ha->num_tx_rings; i++) {
1352 qls_free_tx_ring_dma(ha, i);
1354 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1355 txb = &ha->tx_ring[i].tx_buf[j];
1358 bus_dmamap_destroy(ha->tx_tag, txb->map);
1363 if (ha->tx_tag != NULL) {
1364 bus_dma_tag_destroy(ha->tx_tag);
1372 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1378 device_t dev = ha->pci_dev;
1380 ha->tx_ring[ridx].wq_dma.alignment = 8;
1381 ha->tx_ring[ridx].wq_dma.size =
1382 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1384 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1387 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1388 goto qls_alloc_tx_ring_dma_exit;
1390 ha->tx_ring[ridx].flags.wq_dma = 1;
1392 ha->tx_ring[ridx].privb_dma.alignment = 8;
1393 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1395 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1398 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1399 goto qls_alloc_tx_ring_dma_exit;
1402 ha->tx_ring[ridx].flags.privb_dma = 1;
1404 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1405 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1407 v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1408 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1410 ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1411 ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1413 ha->tx_ring[ridx].txr_cons_vaddr =
1414 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1415 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1417 v_addr = v_addr + (PAGE_SIZE >> 1);
1418 p_addr = p_addr + (PAGE_SIZE >> 1);
1420 txb = ha->tx_ring[ridx].tx_buf;
1422 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1423 txb[i].oal_vaddr = v_addr;
1424 txb[i].oal_paddr = p_addr;
1426 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1427 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1430 qls_alloc_tx_ring_dma_exit:
1435 qls_alloc_tx_dma(qla_host_t *ha)
1441 if (bus_dma_tag_create(NULL, /* parent */
1442 1, 0, /* alignment, bounds */
1443 BUS_SPACE_MAXADDR, /* lowaddr */
1444 BUS_SPACE_MAXADDR, /* highaddr */
1445 NULL, NULL, /* filter, filterarg */
1446 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1447 QLA_MAX_SEGMENTS, /* nsegments */
1448 PAGE_SIZE, /* maxsegsize */
1449 BUS_DMA_ALLOCNOW, /* flags */
1450 NULL, /* lockfunc */
1451 NULL, /* lockfuncarg */
1453 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1458 for (i = 0; i < ha->num_tx_rings; i++) {
1459 ret = qls_alloc_tx_ring_dma(ha, i);
1462 qls_free_tx_dma(ha);
1466 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1467 txb = &ha->tx_ring[i].tx_buf[j];
1469 ret = bus_dmamap_create(ha->tx_tag,
1470 BUS_DMA_NOWAIT, &txb->map);
1472 ha->err_tx_dmamap_create++;
1473 device_printf(ha->pci_dev,
1474 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1475 __func__, ret, i, j);
1477 qls_free_tx_dma(ha);
1488 qls_free_rss_dma(qla_host_t *ha)
1490 qls_free_dmabuf(ha, &ha->rss_dma);
1491 ha->flags.rss_dma = 0;
1495 qls_alloc_rss_dma(qla_host_t *ha)
1499 ha->rss_dma.alignment = 4;
1500 ha->rss_dma.size = PAGE_SIZE;
1502 ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1505 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1507 ha->flags.rss_dma = 1;
1513 qls_free_mpi_dma(qla_host_t *ha)
1515 qls_free_dmabuf(ha, &ha->mpi_dma);
1516 ha->flags.mpi_dma = 0;
1520 qls_alloc_mpi_dma(qla_host_t *ha)
1524 ha->mpi_dma.alignment = 4;
1525 ha->mpi_dma.size = (0x4000 * 4);
1527 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1529 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1531 ha->flags.mpi_dma = 1;
1537 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1539 if (ha->rx_ring[ridx].flags.cq_dma) {
1540 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1541 ha->rx_ring[ridx].flags.cq_dma = 0;
1544 if (ha->rx_ring[ridx].flags.lbq_dma) {
1545 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1546 ha->rx_ring[ridx].flags.lbq_dma = 0;
1549 if (ha->rx_ring[ridx].flags.sbq_dma) {
1550 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1551 ha->rx_ring[ridx].flags.sbq_dma = 0;
1554 if (ha->rx_ring[ridx].flags.lb_dma) {
1555 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1556 ha->rx_ring[ridx].flags.lb_dma = 0;
1562 qls_free_rx_dma(qla_host_t *ha)
1566 for (i = 0; i < ha->num_rx_rings; i++) {
1567 qls_free_rx_ring_dma(ha, i);
1570 if (ha->rx_tag != NULL) {
1571 bus_dma_tag_destroy(ha->rx_tag);
1579 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1584 volatile q81_bq_addr_e_t *bq_e;
1585 device_t dev = ha->pci_dev;
1587 ha->rx_ring[ridx].cq_dma.alignment = 128;
1588 ha->rx_ring[ridx].cq_dma.size =
1589 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1591 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1594 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1595 goto qls_alloc_rx_ring_dma_exit;
1597 ha->rx_ring[ridx].flags.cq_dma = 1;
1599 ha->rx_ring[ridx].lbq_dma.alignment = 8;
1600 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1602 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1605 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1606 goto qls_alloc_rx_ring_dma_exit;
1608 ha->rx_ring[ridx].flags.lbq_dma = 1;
1610 ha->rx_ring[ridx].sbq_dma.alignment = 8;
1611 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1613 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1616 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1617 goto qls_alloc_rx_ring_dma_exit;
1619 ha->rx_ring[ridx].flags.sbq_dma = 1;
1621 ha->rx_ring[ridx].lb_dma.alignment = 8;
1622 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1624 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1626 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1627 goto qls_alloc_rx_ring_dma_exit;
1629 ha->rx_ring[ridx].flags.lb_dma = 1;
1631 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1632 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1633 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1634 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1636 /* completion queue */
1637 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1638 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1640 v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1641 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1643 v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1644 p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1646 /* completion queue icb */
1647 ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1648 ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1650 v_addr = v_addr + (PAGE_SIZE >> 2);
1651 p_addr = p_addr + (PAGE_SIZE >> 2);
1653 /* completion queue index register */
1654 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1655 ha->rx_ring[ridx].cqi_paddr = p_addr;
1657 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1658 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1660 /* large buffer queue address table */
1661 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1662 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1664 /* large buffer queue */
1665 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1666 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1668 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1669 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1671 /* small buffer queue address table */
1672 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1673 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1675 /* small buffer queue */
1676 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1677 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1679 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1680 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1682 /* Initialize Large Buffer Queue Table */
1684 p_addr = ha->rx_ring[ridx].lbq_paddr;
1685 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1687 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1688 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1690 p_addr = ha->rx_ring[ridx].lb_paddr;
1691 bq_e = ha->rx_ring[ridx].lbq_vaddr;
1693 for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1694 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1695 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1697 p_addr = p_addr + QLA_LGB_SIZE;
1701 /* Initialize Small Buffer Queue Table */
1703 p_addr = ha->rx_ring[ridx].sbq_paddr;
1704 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1706 for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1707 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1708 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1710 p_addr = p_addr + QLA_PAGE_SIZE;
1714 qls_alloc_rx_ring_dma_exit:
1719 qls_alloc_rx_dma(qla_host_t *ha)
1724 if (bus_dma_tag_create(NULL, /* parent */
1725 1, 0, /* alignment, bounds */
1726 BUS_SPACE_MAXADDR, /* lowaddr */
1727 BUS_SPACE_MAXADDR, /* highaddr */
1728 NULL, NULL, /* filter, filterarg */
1729 MJUM9BYTES, /* maxsize */
1731 MJUM9BYTES, /* maxsegsize */
1732 BUS_DMA_ALLOCNOW, /* flags */
1733 NULL, /* lockfunc */
1734 NULL, /* lockfuncarg */
1736 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1742 for (i = 0; i < ha->num_rx_rings; i++) {
1743 ret = qls_alloc_rx_ring_dma(ha, i);
1746 qls_free_rx_dma(ha);
1755 qls_wait_for_flash_ready(qla_host_t *ha)
1761 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1763 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1764 goto qls_wait_for_flash_ready_exit;
1766 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1769 QLA_USEC_DELAY(100);
1772 qls_wait_for_flash_ready_exit:
1773 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1779 * Name: qls_rd_flash32
1780 * Function: Read Flash Memory
1783 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1787 ret = qls_wait_for_flash_ready(ha);
1792 WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1794 ret = qls_wait_for_flash_ready(ha);
1799 *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1805 qls_flash_validate(qla_host_t *ha, const char *signature)
1807 uint16_t csum16 = 0;
1811 if (bcmp(ha->flash.id, signature, 4)) {
1812 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1813 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1814 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1819 data16 = (uint16_t *)&ha->flash;
1821 for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1822 csum16 += *data16++;
1826 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1833 qls_rd_nic_params(qla_host_t *ha)
1839 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1840 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1844 if ((ha->pci_func & 0x1) == 0)
1845 faddr = Q81_F0_FLASH_OFFSET >> 2;
1847 faddr = Q81_F1_FLASH_OFFSET >> 2;
1849 qflash = (uint32_t *)&ha->flash;
1851 for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1852 ret = qls_rd_flash32(ha, faddr, qflash);
1855 goto qls_rd_flash_data_exit;
1861 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1863 ret = qls_flash_validate(ha, Q81_FLASH_ID);
1866 goto qls_rd_flash_data_exit;
1868 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1870 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1871 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
1872 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
1874 qls_rd_flash_data_exit:
1876 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1882 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1884 uint32_t count = 30;
1888 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1890 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1895 QLA_USEC_DELAY(100);
1898 ha->qla_initiate_recovery = 1;
1903 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1905 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1909 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1915 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1917 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1918 goto qls_wait_for_proc_addr_ready_exit;
1920 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1923 QLA_USEC_DELAY(100);
1926 qls_wait_for_proc_addr_ready_exit:
1927 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1929 ha->qla_initiate_recovery = 1;
1934 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1940 ret = qls_wait_for_proc_addr_ready(ha);
1943 goto qls_proc_addr_rd_reg_exit;
1945 value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1947 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1949 ret = qls_wait_for_proc_addr_ready(ha);
1952 goto qls_proc_addr_rd_reg_exit;
1954 *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1956 qls_proc_addr_rd_reg_exit:
1961 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1967 ret = qls_wait_for_proc_addr_ready(ha);
1970 goto qls_proc_addr_wr_reg_exit;
1972 WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1974 value = addr_module | reg;
1976 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1978 ret = qls_wait_for_proc_addr_ready(ha);
1980 qls_proc_addr_wr_reg_exit:
1985 qls_hw_nic_reset(qla_host_t *ha)
1989 device_t dev = ha->pci_dev;
1993 data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1995 WRITE_REG32(ha, Q81_CTL_RESET, data);
1999 data = READ_REG32(ha, Q81_CTL_RESET);
2000 if ((data & Q81_CTL_RESET_FUNC) == 0)
2005 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2013 qls_hw_reset(qla_host_t *ha)
2015 device_t dev = ha->pci_dev;
2020 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2022 if (ha->hw_init == 0) {
2023 ret = qls_hw_nic_reset(ha);
2024 goto qls_hw_reset_exit;
2027 ret = qls_clear_routing_table(ha);
2029 goto qls_hw_reset_exit;
2031 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2033 goto qls_hw_reset_exit;
2036 * Wait for FIFO to empty
2040 data = READ_REG32(ha, Q81_CTL_STATUS);
2041 if (data & Q81_CTL_STATUS_NFE)
2043 qls_mdelay(__func__, 100);
2046 device_printf(dev, "%s: NFE bit not set\n", __func__);
2047 goto qls_hw_reset_exit;
2052 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2054 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2055 (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2057 qls_mdelay(__func__, 100);
2060 goto qls_hw_reset_exit;
2063 * Reset the NIC function
2065 ret = qls_hw_nic_reset(ha);
2067 goto qls_hw_reset_exit;
2069 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2073 device_printf(dev, "%s: failed\n", __func__);
2079 * MPI Related Functions
2082 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2086 ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2092 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2096 ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2102 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2106 if ((ha->pci_func & 0x1) == 0)
2107 reg += Q81_FUNC0_MBX_OUT_REG0;
2109 reg += Q81_FUNC1_MBX_OUT_REG0;
2111 ret = qls_mpi_risc_rd_reg(ha, reg, data);
2117 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2121 if ((ha->pci_func & 0x1) == 0)
2122 reg += Q81_FUNC0_MBX_IN_REG0;
2124 reg += Q81_FUNC1_MBX_IN_REG0;
2126 ret = qls_mpi_risc_wr_reg(ha, reg, data);
2132 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2133 uint32_t *out_mbx, uint32_t o_count)
2136 uint32_t data32, mbx_cmd = 0;
2137 uint32_t count = 50;
2139 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2140 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2142 data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2144 if (data32 & Q81_CTL_HCS_HTR_INTR) {
2145 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2147 goto qls_mbx_cmd_exit;
2150 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2151 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2152 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2153 goto qls_mbx_cmd_exit;
2160 for (i = 0; i < i_count; i++) {
2161 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2164 device_printf(ha->pci_dev,
2165 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2167 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2168 goto qls_mbx_cmd_exit;
2173 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2175 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2181 if (ha->flags.intr_enable == 0) {
2182 data32 = READ_REG32(ha, Q81_CTL_STATUS);
2184 if (!(data32 & Q81_CTL_STATUS_PI)) {
2185 qls_mdelay(__func__, 100);
2189 ret = qls_mbx_rd_reg(ha, 0, &data32);
2192 if ((data32 & 0xF000) == 0x4000) {
2193 out_mbx[0] = data32;
2195 for (i = 1; i < o_count; i++) {
2196 ret = qls_mbx_rd_reg(ha, i,
2206 out_mbx[i] = data32;
2209 } else if ((data32 & 0xF000) == 0x8000) {
2212 Q81_CTL_HOST_CMD_STATUS,\
2213 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2218 for (i = 1; i < o_count; i++) {
2219 out_mbx[i] = ha->mbox[i];
2225 qls_mdelay(__func__, 1000);
2230 if (ha->flags.intr_enable == 0) {
2231 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2232 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2236 ha->qla_initiate_recovery = 1;
2239 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2244 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2247 device_t dev = ha->pci_dev;
2250 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2252 mbox[0] = Q81_MBX_SET_MGMT_CTL;
2255 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2256 device_printf(dev, "%s failed\n", __func__);
2260 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2261 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2262 (mbox[0] == Q81_MBX_CMD_ERROR))){
2265 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2271 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2274 device_t dev = ha->pci_dev;
2279 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2281 mbox[0] = Q81_MBX_GET_MGMT_CTL;
2283 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2284 device_printf(dev, "%s failed\n", __func__);
2288 *t_status = mbox[1];
2294 qls_mbx_get_link_status(qla_host_t *ha)
2297 device_t dev = ha->pci_dev;
2300 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2302 mbox[0] = Q81_MBX_GET_LNK_STATUS;
2304 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2305 device_printf(dev, "%s failed\n", __func__);
2309 ha->link_status = mbox[1];
2310 ha->link_down_info = mbox[2];
2311 ha->link_hw_info = mbox[3];
2312 ha->link_dcbx_counters = mbox[4];
2313 ha->link_change_counters = mbox[5];
2315 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2316 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2322 qls_mbx_about_fw(qla_host_t *ha)
2325 device_t dev = ha->pci_dev;
2328 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2330 mbox[0] = Q81_MBX_ABOUT_FW;
2332 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2333 device_printf(dev, "%s failed\n", __func__);
2337 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2338 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2342 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2347 device_t dev = ha->pci_dev;
2350 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2352 bzero(ha->mpi_dma.dma_b,(r_size << 2));
2353 b_paddr = ha->mpi_dma.dma_addr;
2355 mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2356 mbox[1] = r_addr & 0xFFFF;
2357 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2358 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2359 mbox[4] = (r_size >> 16) & 0xFFFF;
2360 mbox[5] = r_size & 0xFFFF;
2361 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2362 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2363 mbox[8] = (r_addr >> 16) & 0xFFFF;
2365 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2366 BUS_DMASYNC_PREREAD);
2368 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2369 device_printf(dev, "%s failed\n", __func__);
2372 if (mbox[0] != 0x4000) {
2373 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2376 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2377 BUS_DMASYNC_POSTREAD);
2378 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2385 qls_mpi_reset(qla_host_t *ha)
2389 device_t dev = ha->pci_dev;
2391 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2392 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2396 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2397 if (data & Q81_CTL_HCS_RISC_RESET) {
2398 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2399 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2402 qls_mdelay(__func__, 10);
2405 device_printf(dev, "%s: failed\n", __func__);