2 * Copyright (c) 2013-2014 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
41 #include "qls_inline.h"
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52 uint32_t add_mac, uint32_t index);
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
78 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
79 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
80 uint32_t reg, uint32_t *data);
81 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
82 uint32_t reg, uint32_t data);
84 static int qls_hw_reset(qla_host_t *ha);
87 * MPI Related Functions
89 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
90 uint32_t *out_mbx, uint32_t o_count);
91 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
92 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
93 static void qls_mbx_get_link_status(qla_host_t *ha);
94 static void qls_mbx_about_fw(qla_host_t *ha);
97 qls_get_msix_count(qla_host_t *ha)
99 return (ha->num_rx_rings);
103 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
108 err = sysctl_handle_int(oidp, &ret, 0, req);
110 if (err || !req->newptr)
115 ha = (qla_host_t *)arg1;
116 qls_mpi_core_dump(ha);
122 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
127 err = sysctl_handle_int(oidp, &ret, 0, req);
129 if (err || !req->newptr)
134 ha = (qla_host_t *)arg1;
135 qls_mbx_get_link_status(ha);
136 qls_mbx_about_fw(ha);
142 qls_hw_add_sysctls(qla_host_t *ha)
148 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
150 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
151 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
152 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
153 ha->num_rx_rings, "Number of Completion Queues");
155 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
156 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
157 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
158 ha->num_tx_rings, "Number of Transmit Rings");
160 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
161 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
162 OID_AUTO, "mpi_dump", CTLTYPE_INT | CTLFLAG_RW,
164 qls_syctl_mpi_dump, "I", "MPI Dump");
166 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
167 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
170 qls_syctl_link_status, "I", "Link Status");
175 * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
178 qls_free_dma(qla_host_t *ha)
180 qls_free_rss_dma(ha);
181 qls_free_mpi_dma(ha);
188 * Name: qls_alloc_dma
189 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
192 qls_alloc_dma(qla_host_t *ha)
194 if (qls_alloc_rx_dma(ha))
197 if (qls_alloc_tx_dma(ha)) {
202 if (qls_alloc_mpi_dma(ha)) {
208 if (qls_alloc_rss_dma(ha)) {
209 qls_free_mpi_dma(ha);
220 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
226 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
233 ha->qla_initiate_recovery = 1;
238 * Name: qls_config_unicast_mac_addr
239 * Function: binds/unbinds a unicast MAC address to the interface.
242 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
245 uint32_t mac_upper = 0;
246 uint32_t mac_lower = 0;
247 uint32_t value = 0, index;
249 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
250 Q81_CTL_SEM_SET_MAC_SERDES)) {
251 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
256 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
257 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
258 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
260 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
262 goto qls_config_unicast_mac_addr_exit;
264 index = 128 * (ha->pci_func & 0x1); /* index */
266 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
267 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
269 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
270 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
272 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
274 goto qls_config_unicast_mac_addr_exit;
276 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
277 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
279 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
280 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
282 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
284 goto qls_config_unicast_mac_addr_exit;
286 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
287 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
289 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
291 value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
292 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
293 (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
295 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
297 qls_config_unicast_mac_addr_exit:
298 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
303 * Name: qls_config_mcast_mac_addr
304 * Function: binds/unbinds a multicast MAC address to the interface.
307 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
311 uint32_t mac_upper = 0;
312 uint32_t mac_lower = 0;
315 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
316 Q81_CTL_SEM_SET_MAC_SERDES)) {
317 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
322 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
323 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
324 (mac_addr[4] << 8) | mac_addr[5];
326 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
328 goto qls_config_mcast_mac_addr_exit;
330 value = Q81_CTL_MAC_PROTO_AI_E |
331 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
332 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
334 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
335 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
337 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
339 goto qls_config_mcast_mac_addr_exit;
341 value = Q81_CTL_MAC_PROTO_AI_E |
342 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
343 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
345 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
346 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
348 qls_config_mcast_mac_addr_exit:
349 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
355 * Name: qls_set_mac_rcv_mode
356 * Function: Enable/Disable AllMulticast and Promiscous Modes.
359 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
365 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
372 ha->qla_initiate_recovery = 1;
377 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
381 ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
384 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
385 __func__, index, data);
386 goto qls_load_route_idx_reg_exit;
390 WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
391 WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
393 qls_load_route_idx_reg_exit:
398 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
402 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
403 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
404 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
408 ret = qls_load_route_idx_reg(ha, index, data);
410 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
416 qls_clear_routing_table(qla_host_t *ha)
420 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
421 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
422 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
426 for (i = 0; i < 16; i++) {
427 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
428 (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
433 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
439 qls_set_promisc(qla_host_t *ha)
443 ret = qls_load_route_idx_reg_locked(ha,
444 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
445 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
446 Q81_CTL_RD_VALID_PKT);
451 qls_reset_promisc(qla_host_t *ha)
455 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
456 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
461 qls_set_allmulti(qla_host_t *ha)
465 ret = qls_load_route_idx_reg_locked(ha,
466 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
467 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
473 qls_reset_allmulti(qla_host_t *ha)
477 ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
478 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
484 qls_init_fw_routing_table(qla_host_t *ha)
488 ret = qls_clear_routing_table(ha);
492 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
493 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
494 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
498 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
499 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
500 Q81_CTL_RD_ERROR_PKT);
502 goto qls_init_fw_routing_table_exit;
504 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
505 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
508 goto qls_init_fw_routing_table_exit;
510 if (ha->num_rx_rings > 1 ) {
511 ret = qls_load_route_idx_reg(ha,
512 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
513 Q81_CTL_RI_TYPE_NICQMASK |
514 Q81_CTL_RI_IDX_RSS_MATCH),
515 Q81_CTL_RD_RSS_MATCH);
517 goto qls_init_fw_routing_table_exit;
520 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
521 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
522 Q81_CTL_RD_MCAST_REG_MATCH);
524 goto qls_init_fw_routing_table_exit;
526 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
527 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
530 goto qls_init_fw_routing_table_exit;
532 qls_init_fw_routing_table_exit:
533 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
538 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
540 struct ether_vlan_header *eh;
544 uint32_t ehdrlen, ip_hlen;
548 uint8_t buf[sizeof(struct ip6_hdr)];
552 eh = mtod(mp, struct ether_vlan_header *);
554 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
555 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
556 etype = ntohs(eh->evl_proto);
558 ehdrlen = ETHER_HDR_LEN;
559 etype = ntohs(eh->evl_encap_proto);
564 ip = (struct ip *)(mp->m_data + ehdrlen);
566 ip_hlen = sizeof (struct ip);
568 if (mp->m_len < (ehdrlen + ip_hlen)) {
569 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
570 ip = (struct ip *)buf;
572 tx_mac->opcode = Q81_IOCB_TX_TSO;
573 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
575 tx_mac->phdr_offsets = ehdrlen;
577 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
578 Q81_TX_TSO_PHDR_SHIFT);
582 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
583 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
585 th = (struct tcphdr *)(ip + 1);
587 th->th_sum = in_pseudo(ip->ip_src.s_addr,
590 tx_mac->mss = mp->m_pkthdr.tso_segsz;
591 tx_mac->phdr_length = ip_hlen + ehdrlen +
595 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
598 if (ip->ip_p == IPPROTO_TCP) {
599 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
600 } else if (ip->ip_p == IPPROTO_UDP) {
601 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
606 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
608 ip_hlen = sizeof(struct ip6_hdr);
610 if (mp->m_len < (ehdrlen + ip_hlen)) {
611 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
613 ip6 = (struct ip6_hdr *)buf;
616 tx_mac->opcode = Q81_IOCB_TX_TSO;
617 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
618 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
620 tx_mac->phdr_offsets = ehdrlen;
621 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
622 Q81_TX_TSO_PHDR_SHIFT);
624 if (ip6->ip6_nxt == IPPROTO_TCP) {
625 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
626 } else if (ip6->ip6_nxt == IPPROTO_UDP) {
627 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
639 #define QLA_TX_MIN_FREE 2
641 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
643 uint32_t txr_done, txr_next;
645 txr_done = ha->tx_ring[txr_idx].txr_done;
646 txr_next = ha->tx_ring[txr_idx].txr_next;
648 if (txr_done == txr_next) {
649 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
650 } else if (txr_done > txr_next) {
651 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
653 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
657 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
665 * Function: Transmits a packet. It first checks if the packet is a
666 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
667 * offload. If either of these creteria are not met, it is transmitted
668 * as a regular ethernet frame.
671 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
672 uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
674 q81_tx_mac_t *tx_mac;
675 q81_txb_desc_t *tx_desc;
676 uint32_t total_length = 0;
683 total_length = mp->m_pkthdr.len;
685 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
686 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
687 __func__, total_length);
691 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
692 if (qls_hw_tx_done(ha, txr_idx)) {
693 device_printf(dev, "%s: tx_free[%d] = %d\n",
695 ha->tx_ring[txr_idx].txr_free);
700 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
702 bzero(tx_mac, sizeof(q81_tx_mac_t));
704 if ((mp->m_pkthdr.csum_flags &
705 (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
707 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
711 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
712 ha->tx_ring[txr_idx].tx_tso_frames++;
714 ha->tx_ring[txr_idx].tx_frames++;
717 tx_mac->opcode = Q81_IOCB_TX_MAC;
720 if (mp->m_flags & M_VLANTAG) {
722 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
723 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
725 ha->tx_ring[txr_idx].tx_vlan_frames++;
728 tx_mac->frame_length = total_length;
730 tx_mac->tid_lo = txr_next;
732 if (nsegs <= MAX_TX_MAC_DESC) {
734 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
737 for (i = 0; i < nsegs; i++) {
738 tx_mac->txd[i].baddr = segs->ds_addr;
739 tx_mac->txd[i].length = segs->ds_len;
742 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
745 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
748 tx_mac->txd[0].baddr =
749 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
750 tx_mac->txd[0].length =
751 nsegs * (sizeof(q81_txb_desc_t));
752 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
754 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
756 for (i = 0; i < nsegs; i++) {
757 tx_desc->baddr = segs->ds_addr;
758 tx_desc->length = segs->ds_len;
761 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
769 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
770 ha->tx_ring[txr_idx].txr_next = txr_next;
772 ha->tx_ring[txr_idx].txr_free--;
774 Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
780 * Name: qls_del_hw_if
781 * Function: Destroys the hardware specific entities corresponding to an
785 qls_del_hw_if(qla_host_t *ha)
791 if (ha->hw_init == 0) {
796 for (i = 0; i < ha->num_tx_rings; i++) {
797 Q81_SET_WQ_INVALID(i);
799 for (i = 0; i < ha->num_rx_rings; i++) {
800 Q81_SET_CQ_INVALID(i);
803 for (i = 0; i < ha->num_rx_rings; i++) {
804 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
807 value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
808 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
810 value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
811 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
812 ha->flags.intr_enable = 0;
820 * Name: qls_init_hw_if
821 * Function: Creates the hardware specific entities corresponding to an
822 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
823 * corresponding to the interface. Enables LRO if allowed.
826 qls_init_hw_if(qla_host_t *ha)
834 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
838 ret = qls_hw_reset(ha);
840 goto qls_init_hw_if_exit;
842 ha->vm_pgsize = 4096;
844 /* Enable FAE and EFE bits in System Register */
845 value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
846 value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
848 WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
850 /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
851 value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
852 WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
854 /* Function Specific Control Register - Set Page Size and Enable NIC */
855 value = Q81_CTL_FUNC_SPECIFIC_FE |
856 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
857 Q81_CTL_FUNC_SPECIFIC_EPC_O |
858 Q81_CTL_FUNC_SPECIFIC_EPC_I |
859 Q81_CTL_FUNC_SPECIFIC_EC;
860 value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
861 Q81_CTL_FUNC_SPECIFIC_FE |
862 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
863 Q81_CTL_FUNC_SPECIFIC_EPC_O |
864 Q81_CTL_FUNC_SPECIFIC_EPC_I |
865 Q81_CTL_FUNC_SPECIFIC_EC;
867 WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
869 /* Interrupt Mask Register */
870 value = Q81_CTL_INTRM_PI;
871 value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
873 WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
875 /* Initialiatize Completion Queue */
876 for (i = 0; i < ha->num_rx_rings; i++) {
877 ret = qls_init_comp_queue(ha, i);
879 goto qls_init_hw_if_exit;
882 if (ha->num_rx_rings > 1 ) {
883 ret = qls_init_rss(ha);
885 goto qls_init_hw_if_exit;
888 /* Initialize Work Queue */
890 for (i = 0; i < ha->num_tx_rings; i++) {
891 ret = qls_init_work_queue(ha, i);
893 goto qls_init_hw_if_exit;
897 goto qls_init_hw_if_exit;
899 /* Set up CAM RAM with MAC Address */
900 ret = qls_config_unicast_mac_addr(ha, 1);
902 goto qls_init_hw_if_exit;
904 ret = qls_hw_add_all_mcast(ha);
906 goto qls_init_hw_if_exit;
908 /* Initialize Firmware Routing Table */
909 ret = qls_init_fw_routing_table(ha);
911 goto qls_init_hw_if_exit;
913 /* Get Chip Revision ID */
914 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
916 /* Enable Global Interrupt */
917 value = Q81_CTL_INTRE_EI;
918 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
920 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
922 /* Enable Interrupt Handshake Disable */
923 value = Q81_CTL_INTRE_IHD;
924 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
926 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
928 /* Enable Completion Interrupt */
930 ha->flags.intr_enable = 1;
932 for (i = 0; i < ha->num_rx_rings; i++) {
933 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
938 qls_mbx_get_link_status(ha);
940 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
941 ha->rx_ring[0].cq_db_offset));
942 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
943 ha->tx_ring[0].wq_db_offset));
945 for (i = 0; i < ha->num_rx_rings; i++) {
947 Q81_WR_CQ_CONS_IDX(i, 0);
948 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
949 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
951 QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
952 "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
953 Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
957 for (i = 0; i < ha->num_rx_rings; i++) {
962 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
967 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
974 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
976 if ((data32 & bits) == value)
981 ha->qla_initiate_recovery = 1;
982 device_printf(ha->pci_dev, "%s: failed\n", __func__);
986 static uint8_t q81_hash_key[] = {
987 0xda, 0x56, 0x5a, 0x6d,
988 0xc2, 0x0e, 0x5b, 0x25,
989 0x3d, 0x25, 0x67, 0x41,
990 0xb0, 0x8f, 0xa3, 0x43,
991 0xcb, 0x2b, 0xca, 0xd0,
992 0xb4, 0x30, 0x7b, 0xae,
993 0xa3, 0x2d, 0xcb, 0x77,
994 0x0c, 0xf2, 0x30, 0x80,
995 0x3b, 0xb7, 0x42, 0x6a,
996 0xfa, 0x01, 0xac, 0xbe };
999 qls_init_rss(qla_host_t *ha)
1001 q81_rss_icb_t *rss_icb;
1006 rss_icb = ha->rss_dma.dma_b;
1008 bzero(rss_icb, sizeof (q81_rss_icb_t));
1010 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
1011 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1012 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1013 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1015 rss_icb->mask = 0x3FF;
1017 for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1018 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1021 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1022 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1024 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1027 goto qls_init_rss_exit;
1029 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1032 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1033 goto qls_init_rss_exit;
1036 value = (uint32_t)ha->rss_dma.dma_addr;
1037 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1039 value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1040 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1042 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1044 value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1047 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1049 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1056 qls_init_comp_queue(qla_host_t *ha, int cid)
1058 q81_cq_icb_t *cq_icb;
1063 rxr = &ha->rx_ring[cid];
1065 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1067 cq_icb = rxr->cq_icb_vaddr;
1069 bzero(cq_icb, sizeof (q81_cq_icb_t));
1071 cq_icb->msix_vector = cid;
1072 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1073 Q81_CQ_ICB_FLAGS_LI |
1074 Q81_CQ_ICB_FLAGS_LL |
1075 Q81_CQ_ICB_FLAGS_LS |
1076 Q81_CQ_ICB_FLAGS_LV;
1078 cq_icb->length_v = NUM_CQ_ENTRIES;
1080 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1081 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1083 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1084 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1086 cq_icb->pkt_idelay = 10;
1087 cq_icb->idelay = 100;
1089 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1090 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1092 cq_icb->lbq_bsize = QLA_LGB_SIZE;
1093 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1095 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1096 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1098 cq_icb->sbq_bsize = (uint16_t)ha->msize;
1099 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1103 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1106 goto qls_init_comp_queue_exit;
1108 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1111 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1112 goto qls_init_comp_queue_exit;
1115 value = (uint32_t)rxr->cq_icb_paddr;
1116 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1118 value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1119 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1121 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1123 value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1124 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1125 value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1126 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1128 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1131 rxr->lbq_next = rxr->lbq_free = 0;
1132 rxr->sbq_next = rxr->sbq_free = 0;
1133 rxr->rx_free = rxr->rx_next = 0;
1134 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1135 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1137 qls_init_comp_queue_exit:
1142 qls_init_work_queue(qla_host_t *ha, int wid)
1144 q81_wq_icb_t *wq_icb;
1149 txr = &ha->tx_ring[wid];
1151 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1152 + (ha->vm_pgsize * wid));
1154 txr->wq_db_offset = (ha->vm_pgsize * wid);
1156 wq_icb = txr->wq_icb_vaddr;
1157 bzero(wq_icb, sizeof (q81_wq_icb_t));
1159 wq_icb->length_v = NUM_TX_DESCRIPTORS |
1162 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1163 Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1165 wq_icb->wqcqid_rss = wid;
1167 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1168 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1170 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1171 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1173 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1176 goto qls_init_wq_exit;
1178 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1181 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1182 goto qls_init_wq_exit;
1185 value = (uint32_t)txr->wq_icb_paddr;
1186 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1188 value = (uint32_t)(txr->wq_icb_paddr >> 32);
1189 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1191 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1193 value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1194 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1195 value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1196 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1198 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1200 txr->txr_free = NUM_TX_DESCRIPTORS;
1209 qls_hw_add_all_mcast(qla_host_t *ha)
1213 nmcast = ha->nmcast;
1215 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1216 if ((ha->mcast[i].addr[0] != 0) ||
1217 (ha->mcast[i].addr[1] != 0) ||
1218 (ha->mcast[i].addr[2] != 0) ||
1219 (ha->mcast[i].addr[3] != 0) ||
1220 (ha->mcast[i].addr[4] != 0) ||
1221 (ha->mcast[i].addr[5] != 0)) {
1223 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1225 device_printf(ha->pci_dev, "%s: failed\n",
1237 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1241 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1243 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1244 return 0; /* its been already added */
1247 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1249 if ((ha->mcast[i].addr[0] == 0) &&
1250 (ha->mcast[i].addr[1] == 0) &&
1251 (ha->mcast[i].addr[2] == 0) &&
1252 (ha->mcast[i].addr[3] == 0) &&
1253 (ha->mcast[i].addr[4] == 0) &&
1254 (ha->mcast[i].addr[5] == 0)) {
1256 if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1259 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1269 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1273 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1274 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1276 if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1279 ha->mcast[i].addr[0] = 0;
1280 ha->mcast[i].addr[1] = 0;
1281 ha->mcast[i].addr[2] = 0;
1282 ha->mcast[i].addr[3] = 0;
1283 ha->mcast[i].addr[4] = 0;
1284 ha->mcast[i].addr[5] = 0;
1295 * Name: qls_hw_set_multi
1296 * Function: Sets the Multicast Addresses provided the host O.S into the
1297 * hardware (for the given interface)
1300 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1305 for (i = 0; i < mcnt; i++) {
1307 if (qls_hw_add_mcast(ha, mta))
1310 if (qls_hw_del_mcast(ha, mta))
1314 mta += Q8_MAC_ADDR_LEN;
1320 qls_update_link_state(qla_host_t *ha)
1322 uint32_t link_state;
1323 uint32_t prev_link_state;
1325 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1329 link_state = READ_REG32(ha, Q81_CTL_STATUS);
1331 prev_link_state = ha->link_up;
1333 if ((ha->pci_func & 0x1) == 0)
1334 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1336 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1338 if (prev_link_state != ha->link_up) {
1342 if_link_state_change(ha->ifp, LINK_STATE_UP);
1344 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1351 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1353 if (ha->tx_ring[r_idx].flags.wq_dma) {
1354 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1355 ha->tx_ring[r_idx].flags.wq_dma = 0;
1358 if (ha->tx_ring[r_idx].flags.privb_dma) {
1359 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1360 ha->tx_ring[r_idx].flags.privb_dma = 0;
1366 qls_free_tx_dma(qla_host_t *ha)
1371 for (i = 0; i < ha->num_tx_rings; i++) {
1373 qls_free_tx_ring_dma(ha, i);
1375 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1377 txb = &ha->tx_ring[i].tx_buf[j];
1380 bus_dmamap_destroy(ha->tx_tag, txb->map);
1385 if (ha->tx_tag != NULL) {
1386 bus_dma_tag_destroy(ha->tx_tag);
1394 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1400 device_t dev = ha->pci_dev;
1402 ha->tx_ring[ridx].wq_dma.alignment = 8;
1403 ha->tx_ring[ridx].wq_dma.size =
1404 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1406 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1409 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1410 goto qls_alloc_tx_ring_dma_exit;
1412 ha->tx_ring[ridx].flags.wq_dma = 1;
1414 ha->tx_ring[ridx].privb_dma.alignment = 8;
1415 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1417 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1420 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1421 goto qls_alloc_tx_ring_dma_exit;
1424 ha->tx_ring[ridx].flags.privb_dma = 1;
1426 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1427 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1429 v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1430 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1432 ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1433 ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1435 ha->tx_ring[ridx].txr_cons_vaddr =
1436 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1437 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1439 v_addr = v_addr + (PAGE_SIZE >> 1);
1440 p_addr = p_addr + (PAGE_SIZE >> 1);
1442 txb = ha->tx_ring[ridx].tx_buf;
1444 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1446 txb[i].oal_vaddr = v_addr;
1447 txb[i].oal_paddr = p_addr;
1449 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1450 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1453 qls_alloc_tx_ring_dma_exit:
1458 qls_alloc_tx_dma(qla_host_t *ha)
1464 if (bus_dma_tag_create(NULL, /* parent */
1465 1, 0, /* alignment, bounds */
1466 BUS_SPACE_MAXADDR, /* lowaddr */
1467 BUS_SPACE_MAXADDR, /* highaddr */
1468 NULL, NULL, /* filter, filterarg */
1469 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1470 QLA_MAX_SEGMENTS, /* nsegments */
1471 PAGE_SIZE, /* maxsegsize */
1472 BUS_DMA_ALLOCNOW, /* flags */
1473 NULL, /* lockfunc */
1474 NULL, /* lockfuncarg */
1476 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1481 for (i = 0; i < ha->num_tx_rings; i++) {
1483 ret = qls_alloc_tx_ring_dma(ha, i);
1486 qls_free_tx_dma(ha);
1490 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1492 txb = &ha->tx_ring[i].tx_buf[j];
1494 ret = bus_dmamap_create(ha->tx_tag,
1495 BUS_DMA_NOWAIT, &txb->map);
1497 ha->err_tx_dmamap_create++;
1498 device_printf(ha->pci_dev,
1499 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1500 __func__, ret, i, j);
1502 qls_free_tx_dma(ha);
1513 qls_free_rss_dma(qla_host_t *ha)
1515 qls_free_dmabuf(ha, &ha->rss_dma);
1516 ha->flags.rss_dma = 0;
1520 qls_alloc_rss_dma(qla_host_t *ha)
1524 ha->rss_dma.alignment = 4;
1525 ha->rss_dma.size = PAGE_SIZE;
1527 ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1530 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1532 ha->flags.rss_dma = 1;
1538 qls_free_mpi_dma(qla_host_t *ha)
1540 qls_free_dmabuf(ha, &ha->mpi_dma);
1541 ha->flags.mpi_dma = 0;
1545 qls_alloc_mpi_dma(qla_host_t *ha)
1549 ha->mpi_dma.alignment = 4;
1550 ha->mpi_dma.size = (0x4000 * 4);
1552 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1554 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1556 ha->flags.mpi_dma = 1;
1562 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1564 if (ha->rx_ring[ridx].flags.cq_dma) {
1565 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1566 ha->rx_ring[ridx].flags.cq_dma = 0;
1569 if (ha->rx_ring[ridx].flags.lbq_dma) {
1570 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1571 ha->rx_ring[ridx].flags.lbq_dma = 0;
1574 if (ha->rx_ring[ridx].flags.sbq_dma) {
1575 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1576 ha->rx_ring[ridx].flags.sbq_dma = 0;
1579 if (ha->rx_ring[ridx].flags.lb_dma) {
1580 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1581 ha->rx_ring[ridx].flags.lb_dma = 0;
1587 qls_free_rx_dma(qla_host_t *ha)
1591 for (i = 0; i < ha->num_rx_rings; i++) {
1592 qls_free_rx_ring_dma(ha, i);
1595 if (ha->rx_tag != NULL) {
1596 bus_dma_tag_destroy(ha->rx_tag);
1604 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1609 volatile q81_bq_addr_e_t *bq_e;
1610 device_t dev = ha->pci_dev;
1612 ha->rx_ring[ridx].cq_dma.alignment = 128;
1613 ha->rx_ring[ridx].cq_dma.size =
1614 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1616 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1619 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1620 goto qls_alloc_rx_ring_dma_exit;
1622 ha->rx_ring[ridx].flags.cq_dma = 1;
1624 ha->rx_ring[ridx].lbq_dma.alignment = 8;
1625 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1627 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1630 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1631 goto qls_alloc_rx_ring_dma_exit;
1633 ha->rx_ring[ridx].flags.lbq_dma = 1;
1635 ha->rx_ring[ridx].sbq_dma.alignment = 8;
1636 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1638 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1641 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1642 goto qls_alloc_rx_ring_dma_exit;
1644 ha->rx_ring[ridx].flags.sbq_dma = 1;
1646 ha->rx_ring[ridx].lb_dma.alignment = 8;
1647 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1649 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1651 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1652 goto qls_alloc_rx_ring_dma_exit;
1654 ha->rx_ring[ridx].flags.lb_dma = 1;
1656 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1657 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1658 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1659 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1661 /* completion queue */
1662 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1663 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1665 v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1666 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1668 v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1669 p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1671 /* completion queue icb */
1672 ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1673 ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1675 v_addr = v_addr + (PAGE_SIZE >> 2);
1676 p_addr = p_addr + (PAGE_SIZE >> 2);
1678 /* completion queue index register */
1679 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1680 ha->rx_ring[ridx].cqi_paddr = p_addr;
1682 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1683 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1685 /* large buffer queue address table */
1686 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1687 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1689 /* large buffer queue */
1690 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1691 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1693 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1694 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1696 /* small buffer queue address table */
1697 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1698 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1700 /* small buffer queue */
1701 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1702 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1704 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1705 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1707 /* Initialize Large Buffer Queue Table */
1709 p_addr = ha->rx_ring[ridx].lbq_paddr;
1710 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1712 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1713 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1715 p_addr = ha->rx_ring[ridx].lb_paddr;
1716 bq_e = ha->rx_ring[ridx].lbq_vaddr;
1718 for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1719 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1720 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1722 p_addr = p_addr + QLA_LGB_SIZE;
1726 /* Initialize Small Buffer Queue Table */
1728 p_addr = ha->rx_ring[ridx].sbq_paddr;
1729 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1731 for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1732 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1733 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1735 p_addr = p_addr + QLA_PAGE_SIZE;
1739 qls_alloc_rx_ring_dma_exit:
1744 qls_alloc_rx_dma(qla_host_t *ha)
1749 if (bus_dma_tag_create(NULL, /* parent */
1750 1, 0, /* alignment, bounds */
1751 BUS_SPACE_MAXADDR, /* lowaddr */
1752 BUS_SPACE_MAXADDR, /* highaddr */
1753 NULL, NULL, /* filter, filterarg */
1754 MJUM9BYTES, /* maxsize */
1756 MJUM9BYTES, /* maxsegsize */
1757 BUS_DMA_ALLOCNOW, /* flags */
1758 NULL, /* lockfunc */
1759 NULL, /* lockfuncarg */
1762 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1768 for (i = 0; i < ha->num_rx_rings; i++) {
1769 ret = qls_alloc_rx_ring_dma(ha, i);
1772 qls_free_rx_dma(ha);
1781 qls_wait_for_flash_ready(qla_host_t *ha)
1788 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1790 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1791 goto qls_wait_for_flash_ready_exit;
1793 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1796 QLA_USEC_DELAY(100);
1799 qls_wait_for_flash_ready_exit:
1800 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1806 * Name: qls_rd_flash32
1807 * Function: Read Flash Memory
1810 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1814 ret = qls_wait_for_flash_ready(ha);
1819 WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1821 ret = qls_wait_for_flash_ready(ha);
1826 *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1832 qls_flash_validate(qla_host_t *ha, const char *signature)
1834 uint16_t csum16 = 0;
1838 if (bcmp(ha->flash.id, signature, 4)) {
1839 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1840 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1841 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1846 data16 = (uint16_t *)&ha->flash;
1848 for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1849 csum16 += *data16++;
1853 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1860 qls_rd_nic_params(qla_host_t *ha)
1866 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1867 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1871 if ((ha->pci_func & 0x1) == 0)
1872 faddr = Q81_F0_FLASH_OFFSET >> 2;
1874 faddr = Q81_F1_FLASH_OFFSET >> 2;
1876 qflash = (uint32_t *)&ha->flash;
1878 for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1880 ret = qls_rd_flash32(ha, faddr, qflash);
1883 goto qls_rd_flash_data_exit;
1889 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1891 ret = qls_flash_validate(ha, Q81_FLASH_ID);
1894 goto qls_rd_flash_data_exit;
1896 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1898 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1899 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
1900 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
1902 qls_rd_flash_data_exit:
1904 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1910 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1912 uint32_t count = 30;
1916 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1918 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1923 QLA_USEC_DELAY(100);
1926 ha->qla_initiate_recovery = 1;
1931 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1933 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1937 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1944 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1946 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1947 goto qls_wait_for_proc_addr_ready_exit;
1949 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1952 QLA_USEC_DELAY(100);
1955 qls_wait_for_proc_addr_ready_exit:
1956 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1958 ha->qla_initiate_recovery = 1;
1963 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1969 ret = qls_wait_for_proc_addr_ready(ha);
1972 goto qls_proc_addr_rd_reg_exit;
1974 value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1976 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1978 ret = qls_wait_for_proc_addr_ready(ha);
1981 goto qls_proc_addr_rd_reg_exit;
1983 *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1985 qls_proc_addr_rd_reg_exit:
1990 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1996 ret = qls_wait_for_proc_addr_ready(ha);
1999 goto qls_proc_addr_wr_reg_exit;
2001 WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
2003 value = addr_module | reg;
2005 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
2007 ret = qls_wait_for_proc_addr_ready(ha);
2009 qls_proc_addr_wr_reg_exit:
2014 qls_hw_nic_reset(qla_host_t *ha)
2018 device_t dev = ha->pci_dev;
2022 data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
2024 WRITE_REG32(ha, Q81_CTL_RESET, data);
2028 data = READ_REG32(ha, Q81_CTL_RESET);
2029 if ((data & Q81_CTL_RESET_FUNC) == 0)
2034 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2042 qls_hw_reset(qla_host_t *ha)
2044 device_t dev = ha->pci_dev;
2049 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2051 if (ha->hw_init == 0) {
2052 ret = qls_hw_nic_reset(ha);
2053 goto qls_hw_reset_exit;
2056 ret = qls_clear_routing_table(ha);
2058 goto qls_hw_reset_exit;
2060 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2062 goto qls_hw_reset_exit;
2065 * Wait for FIFO to empty
2069 data = READ_REG32(ha, Q81_CTL_STATUS);
2070 if (data & Q81_CTL_STATUS_NFE)
2072 qls_mdelay(__func__, 100);
2075 device_printf(dev, "%s: NFE bit not set\n", __func__);
2076 goto qls_hw_reset_exit;
2081 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2083 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2084 (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2086 qls_mdelay(__func__, 100);
2089 goto qls_hw_reset_exit;
2092 * Reset the NIC function
2094 ret = qls_hw_nic_reset(ha);
2096 goto qls_hw_reset_exit;
2098 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2102 device_printf(dev, "%s: failed\n", __func__);
2108 * MPI Related Functions
2111 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2115 ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2121 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2125 ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2131 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2135 if ((ha->pci_func & 0x1) == 0)
2136 reg += Q81_FUNC0_MBX_OUT_REG0;
2138 reg += Q81_FUNC1_MBX_OUT_REG0;
2140 ret = qls_mpi_risc_rd_reg(ha, reg, data);
2146 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2150 if ((ha->pci_func & 0x1) == 0)
2151 reg += Q81_FUNC0_MBX_IN_REG0;
2153 reg += Q81_FUNC1_MBX_IN_REG0;
2155 ret = qls_mpi_risc_wr_reg(ha, reg, data);
2162 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2163 uint32_t *out_mbx, uint32_t o_count)
2166 uint32_t data32, mbx_cmd = 0;
2167 uint32_t count = 50;
2169 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2170 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2172 data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2174 if (data32 & Q81_CTL_HCS_HTR_INTR) {
2175 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2177 goto qls_mbx_cmd_exit;
2180 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2181 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2182 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2183 goto qls_mbx_cmd_exit;
2190 for (i = 0; i < i_count; i++) {
2192 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2195 device_printf(ha->pci_dev,
2196 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2198 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2199 goto qls_mbx_cmd_exit;
2204 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2206 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2213 if (ha->flags.intr_enable == 0) {
2214 data32 = READ_REG32(ha, Q81_CTL_STATUS);
2216 if (!(data32 & Q81_CTL_STATUS_PI)) {
2217 qls_mdelay(__func__, 100);
2221 ret = qls_mbx_rd_reg(ha, 0, &data32);
2224 if ((data32 & 0xF000) == 0x4000) {
2226 out_mbx[0] = data32;
2228 for (i = 1; i < o_count; i++) {
2229 ret = qls_mbx_rd_reg(ha, i,
2239 out_mbx[i] = data32;
2242 } else if ((data32 & 0xF000) == 0x8000) {
2245 Q81_CTL_HOST_CMD_STATUS,\
2246 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2251 for (i = 1; i < o_count; i++) {
2252 out_mbx[i] = ha->mbox[i];
2258 qls_mdelay(__func__, 1000);
2263 if (ha->flags.intr_enable == 0) {
2264 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2265 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2269 ha->qla_initiate_recovery = 1;
2272 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2277 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2280 device_t dev = ha->pci_dev;
2283 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2285 mbox[0] = Q81_MBX_SET_MGMT_CTL;
2288 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2289 device_printf(dev, "%s failed\n", __func__);
2293 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2294 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2295 (mbox[0] == Q81_MBX_CMD_ERROR))){
2298 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2304 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2307 device_t dev = ha->pci_dev;
2312 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2314 mbox[0] = Q81_MBX_GET_MGMT_CTL;
2316 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2317 device_printf(dev, "%s failed\n", __func__);
2321 *t_status = mbox[1];
2327 qls_mbx_get_link_status(qla_host_t *ha)
2330 device_t dev = ha->pci_dev;
2333 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2335 mbox[0] = Q81_MBX_GET_LNK_STATUS;
2337 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2338 device_printf(dev, "%s failed\n", __func__);
2342 ha->link_status = mbox[1];
2343 ha->link_down_info = mbox[2];
2344 ha->link_hw_info = mbox[3];
2345 ha->link_dcbx_counters = mbox[4];
2346 ha->link_change_counters = mbox[5];
2348 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2349 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2355 qls_mbx_about_fw(qla_host_t *ha)
2358 device_t dev = ha->pci_dev;
2361 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2363 mbox[0] = Q81_MBX_ABOUT_FW;
2365 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2366 device_printf(dev, "%s failed\n", __func__);
2370 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2371 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2375 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2380 device_t dev = ha->pci_dev;
2383 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2385 bzero(ha->mpi_dma.dma_b,(r_size << 2));
2386 b_paddr = ha->mpi_dma.dma_addr;
2388 mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2389 mbox[1] = r_addr & 0xFFFF;
2390 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2391 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2392 mbox[4] = (r_size >> 16) & 0xFFFF;
2393 mbox[5] = r_size & 0xFFFF;
2394 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2395 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2396 mbox[8] = (r_addr >> 16) & 0xFFFF;
2398 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2399 BUS_DMASYNC_PREREAD);
2401 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2402 device_printf(dev, "%s failed\n", __func__);
2405 if (mbox[0] != 0x4000) {
2406 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2409 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2410 BUS_DMASYNC_POSTREAD);
2411 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2418 qls_mpi_reset(qla_host_t *ha)
2422 device_t dev = ha->pci_dev;
2424 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2425 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2429 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2430 if (data & Q81_CTL_HCS_RISC_RESET) {
2431 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2432 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2435 qls_mdelay(__func__, 10);
2438 device_printf(dev, "%s: failed\n", __func__);