2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2014 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 * Content: Contains Hardware dependent functions
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
41 #include "qls_inline.h"
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52 uint32_t add_mac, uint32_t index);
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
77 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
78 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
79 uint32_t reg, uint32_t *data);
80 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
81 uint32_t reg, uint32_t data);
83 static int qls_hw_reset(qla_host_t *ha);
86 * MPI Related Functions
88 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
89 uint32_t *out_mbx, uint32_t o_count);
90 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
91 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
92 static void qls_mbx_get_link_status(qla_host_t *ha);
93 static void qls_mbx_about_fw(qla_host_t *ha);
96 qls_get_msix_count(qla_host_t *ha)
98 return (ha->num_rx_rings);
102 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
107 err = sysctl_handle_int(oidp, &ret, 0, req);
109 if (err || !req->newptr)
113 ha = (qla_host_t *)arg1;
114 qls_mpi_core_dump(ha);
120 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
125 err = sysctl_handle_int(oidp, &ret, 0, req);
127 if (err || !req->newptr)
131 ha = (qla_host_t *)arg1;
132 qls_mbx_get_link_status(ha);
133 qls_mbx_about_fw(ha);
139 qls_hw_add_sysctls(qla_host_t *ha)
145 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
147 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
148 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
149 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
150 ha->num_rx_rings, "Number of Completion Queues");
152 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
155 ha->num_tx_rings, "Number of Transmit Rings");
157 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
158 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159 OID_AUTO, "mpi_dump",
160 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
161 qls_syctl_mpi_dump, "I", "MPI Dump");
163 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
164 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165 OID_AUTO, "link_status",
166 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
167 qls_syctl_link_status, "I", "Link Status");
172 * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
175 qls_free_dma(qla_host_t *ha)
177 qls_free_rss_dma(ha);
178 qls_free_mpi_dma(ha);
185 * Name: qls_alloc_dma
186 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
189 qls_alloc_dma(qla_host_t *ha)
191 if (qls_alloc_rx_dma(ha))
194 if (qls_alloc_tx_dma(ha)) {
199 if (qls_alloc_mpi_dma(ha)) {
205 if (qls_alloc_rss_dma(ha)) {
206 qls_free_mpi_dma(ha);
216 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
222 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
229 ha->qla_initiate_recovery = 1;
234 * Name: qls_config_unicast_mac_addr
235 * Function: binds/unbinds a unicast MAC address to the interface.
238 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
241 uint32_t mac_upper = 0;
242 uint32_t mac_lower = 0;
243 uint32_t value = 0, index;
245 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
246 Q81_CTL_SEM_SET_MAC_SERDES)) {
247 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
252 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
253 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
254 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
256 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
258 goto qls_config_unicast_mac_addr_exit;
260 index = 128 * (ha->pci_func & 0x1); /* index */
262 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
263 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
265 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
266 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
268 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
270 goto qls_config_unicast_mac_addr_exit;
272 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
273 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
275 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
276 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
278 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
280 goto qls_config_unicast_mac_addr_exit;
282 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
283 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
285 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
287 value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
288 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
289 (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
291 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
293 qls_config_unicast_mac_addr_exit:
294 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
299 * Name: qls_config_mcast_mac_addr
300 * Function: binds/unbinds a multicast MAC address to the interface.
303 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
307 uint32_t mac_upper = 0;
308 uint32_t mac_lower = 0;
311 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
312 Q81_CTL_SEM_SET_MAC_SERDES)) {
313 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
318 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
319 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
320 (mac_addr[4] << 8) | mac_addr[5];
322 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
324 goto qls_config_mcast_mac_addr_exit;
326 value = Q81_CTL_MAC_PROTO_AI_E |
327 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
328 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
330 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
331 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
333 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
335 goto qls_config_mcast_mac_addr_exit;
337 value = Q81_CTL_MAC_PROTO_AI_E |
338 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
339 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
341 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
342 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
344 qls_config_mcast_mac_addr_exit:
345 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
351 * Name: qls_set_mac_rcv_mode
352 * Function: Enable/Disable AllMulticast and Promiscuous Modes.
355 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
361 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
368 ha->qla_initiate_recovery = 1;
373 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
377 ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
380 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
381 __func__, index, data);
382 goto qls_load_route_idx_reg_exit;
385 WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
386 WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
388 qls_load_route_idx_reg_exit:
393 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
397 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
398 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
399 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
403 ret = qls_load_route_idx_reg(ha, index, data);
405 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
411 qls_clear_routing_table(qla_host_t *ha)
415 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
416 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
417 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
421 for (i = 0; i < 16; i++) {
422 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
423 (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
428 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
434 qls_set_promisc(qla_host_t *ha)
438 ret = qls_load_route_idx_reg_locked(ha,
439 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
440 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
441 Q81_CTL_RD_VALID_PKT);
446 qls_reset_promisc(qla_host_t *ha)
448 qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
449 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
454 qls_set_allmulti(qla_host_t *ha)
458 ret = qls_load_route_idx_reg_locked(ha,
459 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
460 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
466 qls_reset_allmulti(qla_host_t *ha)
468 qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
469 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
474 qls_init_fw_routing_table(qla_host_t *ha)
478 ret = qls_clear_routing_table(ha);
482 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
483 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
484 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
488 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
489 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
490 Q81_CTL_RD_ERROR_PKT);
492 goto qls_init_fw_routing_table_exit;
494 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
495 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
498 goto qls_init_fw_routing_table_exit;
500 if (ha->num_rx_rings > 1 ) {
501 ret = qls_load_route_idx_reg(ha,
502 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
503 Q81_CTL_RI_TYPE_NICQMASK |
504 Q81_CTL_RI_IDX_RSS_MATCH),
505 Q81_CTL_RD_RSS_MATCH);
507 goto qls_init_fw_routing_table_exit;
510 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
511 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
512 Q81_CTL_RD_MCAST_REG_MATCH);
514 goto qls_init_fw_routing_table_exit;
516 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
517 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
520 goto qls_init_fw_routing_table_exit;
522 qls_init_fw_routing_table_exit:
523 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
528 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
530 #if defined(INET) || defined(INET6)
531 struct ether_vlan_header *eh;
537 uint32_t ehdrlen, ip_hlen;
540 uint8_t buf[sizeof(struct ip6_hdr)];
542 eh = mtod(mp, struct ether_vlan_header *);
544 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
545 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
546 etype = ntohs(eh->evl_proto);
548 ehdrlen = ETHER_HDR_LEN;
549 etype = ntohs(eh->evl_encap_proto);
555 ip = (struct ip *)(mp->m_data + ehdrlen);
557 ip_hlen = sizeof (struct ip);
559 if (mp->m_len < (ehdrlen + ip_hlen)) {
560 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
561 ip = (struct ip *)buf;
563 tx_mac->opcode = Q81_IOCB_TX_TSO;
564 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
566 tx_mac->phdr_offsets = ehdrlen;
568 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
569 Q81_TX_TSO_PHDR_SHIFT);
573 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
574 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
576 th = (struct tcphdr *)(ip + 1);
578 th->th_sum = in_pseudo(ip->ip_src.s_addr,
581 tx_mac->mss = mp->m_pkthdr.tso_segsz;
582 tx_mac->phdr_length = ip_hlen + ehdrlen +
586 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
588 if (ip->ip_p == IPPROTO_TCP) {
589 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
590 } else if (ip->ip_p == IPPROTO_UDP) {
591 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
598 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
600 ip_hlen = sizeof(struct ip6_hdr);
602 if (mp->m_len < (ehdrlen + ip_hlen)) {
603 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
605 ip6 = (struct ip6_hdr *)buf;
608 tx_mac->opcode = Q81_IOCB_TX_TSO;
609 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
610 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
612 tx_mac->phdr_offsets = ehdrlen;
613 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
614 Q81_TX_TSO_PHDR_SHIFT);
616 if (ip6->ip6_nxt == IPPROTO_TCP) {
617 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
618 } else if (ip6->ip6_nxt == IPPROTO_UDP) {
619 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
635 #define QLA_TX_MIN_FREE 2
637 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
639 uint32_t txr_done, txr_next;
641 txr_done = ha->tx_ring[txr_idx].txr_done;
642 txr_next = ha->tx_ring[txr_idx].txr_next;
644 if (txr_done == txr_next) {
645 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
646 } else if (txr_done > txr_next) {
647 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
649 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
653 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
661 * Function: Transmits a packet. It first checks if the packet is a
662 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
663 * offload. If either of these creteria are not met, it is transmitted
664 * as a regular ethernet frame.
667 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
668 uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
670 q81_tx_mac_t *tx_mac;
671 q81_txb_desc_t *tx_desc;
672 uint32_t total_length = 0;
679 total_length = mp->m_pkthdr.len;
681 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
682 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
683 __func__, total_length);
687 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
688 if (qls_hw_tx_done(ha, txr_idx)) {
689 device_printf(dev, "%s: tx_free[%d] = %d\n",
691 ha->tx_ring[txr_idx].txr_free);
696 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
698 bzero(tx_mac, sizeof(q81_tx_mac_t));
700 if ((mp->m_pkthdr.csum_flags &
701 (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
702 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
706 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
707 ha->tx_ring[txr_idx].tx_tso_frames++;
709 ha->tx_ring[txr_idx].tx_frames++;
712 tx_mac->opcode = Q81_IOCB_TX_MAC;
715 if (mp->m_flags & M_VLANTAG) {
716 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
717 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
719 ha->tx_ring[txr_idx].tx_vlan_frames++;
722 tx_mac->frame_length = total_length;
724 tx_mac->tid_lo = txr_next;
726 if (nsegs <= MAX_TX_MAC_DESC) {
727 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
730 for (i = 0; i < nsegs; i++) {
731 tx_mac->txd[i].baddr = segs->ds_addr;
732 tx_mac->txd[i].length = segs->ds_len;
735 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
738 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
741 tx_mac->txd[0].baddr =
742 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
743 tx_mac->txd[0].length =
744 nsegs * (sizeof(q81_txb_desc_t));
745 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
747 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
749 for (i = 0; i < nsegs; i++) {
750 tx_desc->baddr = segs->ds_addr;
751 tx_desc->length = segs->ds_len;
754 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
762 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
763 ha->tx_ring[txr_idx].txr_next = txr_next;
765 ha->tx_ring[txr_idx].txr_free--;
767 Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
773 * Name: qls_del_hw_if
774 * Function: Destroys the hardware specific entities corresponding to an
778 qls_del_hw_if(qla_host_t *ha)
784 if (ha->hw_init == 0) {
789 for (i = 0; i < ha->num_tx_rings; i++) {
790 Q81_SET_WQ_INVALID(i);
792 for (i = 0; i < ha->num_rx_rings; i++) {
793 Q81_SET_CQ_INVALID(i);
796 for (i = 0; i < ha->num_rx_rings; i++) {
797 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
800 value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
801 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
803 value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
804 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
805 ha->flags.intr_enable = 0;
813 * Name: qls_init_hw_if
814 * Function: Creates the hardware specific entities corresponding to an
815 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
816 * corresponding to the interface. Enables LRO if allowed.
819 qls_init_hw_if(qla_host_t *ha)
825 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
827 ret = qls_hw_reset(ha);
829 goto qls_init_hw_if_exit;
831 ha->vm_pgsize = 4096;
833 /* Enable FAE and EFE bits in System Register */
834 value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
835 value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
837 WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
839 /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
840 value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
841 WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
843 /* Function Specific Control Register - Set Page Size and Enable NIC */
844 value = Q81_CTL_FUNC_SPECIFIC_FE |
845 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
846 Q81_CTL_FUNC_SPECIFIC_EPC_O |
847 Q81_CTL_FUNC_SPECIFIC_EPC_I |
848 Q81_CTL_FUNC_SPECIFIC_EC;
849 value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
850 Q81_CTL_FUNC_SPECIFIC_FE |
851 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
852 Q81_CTL_FUNC_SPECIFIC_EPC_O |
853 Q81_CTL_FUNC_SPECIFIC_EPC_I |
854 Q81_CTL_FUNC_SPECIFIC_EC;
856 WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
858 /* Interrupt Mask Register */
859 value = Q81_CTL_INTRM_PI;
860 value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
862 WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
864 /* Initialiatize Completion Queue */
865 for (i = 0; i < ha->num_rx_rings; i++) {
866 ret = qls_init_comp_queue(ha, i);
868 goto qls_init_hw_if_exit;
871 if (ha->num_rx_rings > 1 ) {
872 ret = qls_init_rss(ha);
874 goto qls_init_hw_if_exit;
877 /* Initialize Work Queue */
879 for (i = 0; i < ha->num_tx_rings; i++) {
880 ret = qls_init_work_queue(ha, i);
882 goto qls_init_hw_if_exit;
886 goto qls_init_hw_if_exit;
888 /* Set up CAM RAM with MAC Address */
889 ret = qls_config_unicast_mac_addr(ha, 1);
891 goto qls_init_hw_if_exit;
893 ret = qls_hw_add_all_mcast(ha);
895 goto qls_init_hw_if_exit;
897 /* Initialize Firmware Routing Table */
898 ret = qls_init_fw_routing_table(ha);
900 goto qls_init_hw_if_exit;
902 /* Get Chip Revision ID */
903 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
905 /* Enable Global Interrupt */
906 value = Q81_CTL_INTRE_EI;
907 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
909 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
911 /* Enable Interrupt Handshake Disable */
912 value = Q81_CTL_INTRE_IHD;
913 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
915 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
917 /* Enable Completion Interrupt */
919 ha->flags.intr_enable = 1;
921 for (i = 0; i < ha->num_rx_rings; i++) {
922 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
927 qls_mbx_get_link_status(ha);
929 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
930 ha->rx_ring[0].cq_db_offset));
931 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
932 ha->tx_ring[0].wq_db_offset));
934 for (i = 0; i < ha->num_rx_rings; i++) {
935 Q81_WR_CQ_CONS_IDX(i, 0);
936 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
937 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
939 QL_DPRINT2((ha->pci_dev,
940 "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
941 "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
942 Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
946 for (i = 0; i < ha->num_rx_rings; i++) {
951 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
956 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
962 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
964 if ((data32 & bits) == value)
969 ha->qla_initiate_recovery = 1;
970 device_printf(ha->pci_dev, "%s: failed\n", __func__);
974 static uint8_t q81_hash_key[] = {
975 0xda, 0x56, 0x5a, 0x6d,
976 0xc2, 0x0e, 0x5b, 0x25,
977 0x3d, 0x25, 0x67, 0x41,
978 0xb0, 0x8f, 0xa3, 0x43,
979 0xcb, 0x2b, 0xca, 0xd0,
980 0xb4, 0x30, 0x7b, 0xae,
981 0xa3, 0x2d, 0xcb, 0x77,
982 0x0c, 0xf2, 0x30, 0x80,
983 0x3b, 0xb7, 0x42, 0x6a,
984 0xfa, 0x01, 0xac, 0xbe };
987 qls_init_rss(qla_host_t *ha)
989 q81_rss_icb_t *rss_icb;
994 rss_icb = ha->rss_dma.dma_b;
996 bzero(rss_icb, sizeof (q81_rss_icb_t));
998 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
999 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1000 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1001 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1003 rss_icb->mask = 0x3FF;
1005 for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1006 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1009 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1010 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1012 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1015 goto qls_init_rss_exit;
1017 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1020 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1021 goto qls_init_rss_exit;
1024 value = (uint32_t)ha->rss_dma.dma_addr;
1025 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1027 value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1028 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1030 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1032 value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1035 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1037 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1044 qls_init_comp_queue(qla_host_t *ha, int cid)
1046 q81_cq_icb_t *cq_icb;
1051 rxr = &ha->rx_ring[cid];
1053 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1055 cq_icb = rxr->cq_icb_vaddr;
1057 bzero(cq_icb, sizeof (q81_cq_icb_t));
1059 cq_icb->msix_vector = cid;
1060 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1061 Q81_CQ_ICB_FLAGS_LI |
1062 Q81_CQ_ICB_FLAGS_LL |
1063 Q81_CQ_ICB_FLAGS_LS |
1064 Q81_CQ_ICB_FLAGS_LV;
1066 cq_icb->length_v = NUM_CQ_ENTRIES;
1068 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1069 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1071 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1072 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1074 cq_icb->pkt_idelay = 10;
1075 cq_icb->idelay = 100;
1077 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1078 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1080 cq_icb->lbq_bsize = QLA_LGB_SIZE;
1081 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1083 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1084 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1086 cq_icb->sbq_bsize = (uint16_t)ha->msize;
1087 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1091 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1094 goto qls_init_comp_queue_exit;
1096 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1099 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1100 goto qls_init_comp_queue_exit;
1103 value = (uint32_t)rxr->cq_icb_paddr;
1104 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1106 value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1107 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1109 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1111 value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1112 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1113 value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1114 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1116 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1119 rxr->lbq_next = rxr->lbq_free = 0;
1120 rxr->sbq_next = rxr->sbq_free = 0;
1121 rxr->rx_free = rxr->rx_next = 0;
1122 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1123 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1125 qls_init_comp_queue_exit:
1130 qls_init_work_queue(qla_host_t *ha, int wid)
1132 q81_wq_icb_t *wq_icb;
1137 txr = &ha->tx_ring[wid];
1139 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1140 + (ha->vm_pgsize * wid));
1142 txr->wq_db_offset = (ha->vm_pgsize * wid);
1144 wq_icb = txr->wq_icb_vaddr;
1145 bzero(wq_icb, sizeof (q81_wq_icb_t));
1147 wq_icb->length_v = NUM_TX_DESCRIPTORS |
1150 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1151 Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1153 wq_icb->wqcqid_rss = wid;
1155 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1156 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1158 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1159 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1161 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1164 goto qls_init_wq_exit;
1166 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1169 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1170 goto qls_init_wq_exit;
1173 value = (uint32_t)txr->wq_icb_paddr;
1174 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1176 value = (uint32_t)(txr->wq_icb_paddr >> 32);
1177 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1179 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1181 value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1182 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1183 value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1184 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1186 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1188 txr->txr_free = NUM_TX_DESCRIPTORS;
1197 qls_hw_add_all_mcast(qla_host_t *ha)
1201 nmcast = ha->nmcast;
1203 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1204 if ((ha->mcast[i].addr[0] != 0) ||
1205 (ha->mcast[i].addr[1] != 0) ||
1206 (ha->mcast[i].addr[2] != 0) ||
1207 (ha->mcast[i].addr[3] != 0) ||
1208 (ha->mcast[i].addr[4] != 0) ||
1209 (ha->mcast[i].addr[5] != 0)) {
1210 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1212 device_printf(ha->pci_dev, "%s: failed\n",
1224 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1228 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1229 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1230 return 0; /* its been already added */
1233 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1234 if ((ha->mcast[i].addr[0] == 0) &&
1235 (ha->mcast[i].addr[1] == 0) &&
1236 (ha->mcast[i].addr[2] == 0) &&
1237 (ha->mcast[i].addr[3] == 0) &&
1238 (ha->mcast[i].addr[4] == 0) &&
1239 (ha->mcast[i].addr[5] == 0)) {
1240 if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1243 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1253 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1257 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1258 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1259 if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1262 ha->mcast[i].addr[0] = 0;
1263 ha->mcast[i].addr[1] = 0;
1264 ha->mcast[i].addr[2] = 0;
1265 ha->mcast[i].addr[3] = 0;
1266 ha->mcast[i].addr[4] = 0;
1267 ha->mcast[i].addr[5] = 0;
1278 * Name: qls_hw_set_multi
1279 * Function: Sets the Multicast Addresses provided the host O.S into the
1280 * hardware (for the given interface)
1283 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1288 for (i = 0; i < mcnt; i++) {
1290 if (qls_hw_add_mcast(ha, mta))
1293 if (qls_hw_del_mcast(ha, mta))
1297 mta += Q8_MAC_ADDR_LEN;
1303 qls_update_link_state(qla_host_t *ha)
1305 uint32_t link_state;
1306 uint32_t prev_link_state;
1308 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1312 link_state = READ_REG32(ha, Q81_CTL_STATUS);
1314 prev_link_state = ha->link_up;
1316 if ((ha->pci_func & 0x1) == 0)
1317 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1319 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1321 if (prev_link_state != ha->link_up) {
1323 if_link_state_change(ha->ifp, LINK_STATE_UP);
1325 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1332 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1334 if (ha->tx_ring[r_idx].flags.wq_dma) {
1335 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1336 ha->tx_ring[r_idx].flags.wq_dma = 0;
1339 if (ha->tx_ring[r_idx].flags.privb_dma) {
1340 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1341 ha->tx_ring[r_idx].flags.privb_dma = 0;
1347 qls_free_tx_dma(qla_host_t *ha)
1352 for (i = 0; i < ha->num_tx_rings; i++) {
1353 qls_free_tx_ring_dma(ha, i);
1355 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1356 txb = &ha->tx_ring[i].tx_buf[j];
1359 bus_dmamap_destroy(ha->tx_tag, txb->map);
1364 if (ha->tx_tag != NULL) {
1365 bus_dma_tag_destroy(ha->tx_tag);
1373 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1379 device_t dev = ha->pci_dev;
1381 ha->tx_ring[ridx].wq_dma.alignment = 8;
1382 ha->tx_ring[ridx].wq_dma.size =
1383 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1385 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1388 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1389 goto qls_alloc_tx_ring_dma_exit;
1391 ha->tx_ring[ridx].flags.wq_dma = 1;
1393 ha->tx_ring[ridx].privb_dma.alignment = 8;
1394 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1396 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1399 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1400 goto qls_alloc_tx_ring_dma_exit;
1403 ha->tx_ring[ridx].flags.privb_dma = 1;
1405 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1406 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1408 v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1409 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1411 ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1412 ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1414 ha->tx_ring[ridx].txr_cons_vaddr =
1415 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1416 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1418 v_addr = v_addr + (PAGE_SIZE >> 1);
1419 p_addr = p_addr + (PAGE_SIZE >> 1);
1421 txb = ha->tx_ring[ridx].tx_buf;
1423 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1424 txb[i].oal_vaddr = v_addr;
1425 txb[i].oal_paddr = p_addr;
1427 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1428 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1431 qls_alloc_tx_ring_dma_exit:
1436 qls_alloc_tx_dma(qla_host_t *ha)
1442 if (bus_dma_tag_create(NULL, /* parent */
1443 1, 0, /* alignment, bounds */
1444 BUS_SPACE_MAXADDR, /* lowaddr */
1445 BUS_SPACE_MAXADDR, /* highaddr */
1446 NULL, NULL, /* filter, filterarg */
1447 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1448 QLA_MAX_SEGMENTS, /* nsegments */
1449 PAGE_SIZE, /* maxsegsize */
1450 BUS_DMA_ALLOCNOW, /* flags */
1451 NULL, /* lockfunc */
1452 NULL, /* lockfuncarg */
1454 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1459 for (i = 0; i < ha->num_tx_rings; i++) {
1460 ret = qls_alloc_tx_ring_dma(ha, i);
1463 qls_free_tx_dma(ha);
1467 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1468 txb = &ha->tx_ring[i].tx_buf[j];
1470 ret = bus_dmamap_create(ha->tx_tag,
1471 BUS_DMA_NOWAIT, &txb->map);
1473 ha->err_tx_dmamap_create++;
1474 device_printf(ha->pci_dev,
1475 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1476 __func__, ret, i, j);
1478 qls_free_tx_dma(ha);
1489 qls_free_rss_dma(qla_host_t *ha)
1491 qls_free_dmabuf(ha, &ha->rss_dma);
1492 ha->flags.rss_dma = 0;
1496 qls_alloc_rss_dma(qla_host_t *ha)
1500 ha->rss_dma.alignment = 4;
1501 ha->rss_dma.size = PAGE_SIZE;
1503 ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1506 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1508 ha->flags.rss_dma = 1;
1514 qls_free_mpi_dma(qla_host_t *ha)
1516 qls_free_dmabuf(ha, &ha->mpi_dma);
1517 ha->flags.mpi_dma = 0;
1521 qls_alloc_mpi_dma(qla_host_t *ha)
1525 ha->mpi_dma.alignment = 4;
1526 ha->mpi_dma.size = (0x4000 * 4);
1528 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1530 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1532 ha->flags.mpi_dma = 1;
1538 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1540 if (ha->rx_ring[ridx].flags.cq_dma) {
1541 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1542 ha->rx_ring[ridx].flags.cq_dma = 0;
1545 if (ha->rx_ring[ridx].flags.lbq_dma) {
1546 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1547 ha->rx_ring[ridx].flags.lbq_dma = 0;
1550 if (ha->rx_ring[ridx].flags.sbq_dma) {
1551 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1552 ha->rx_ring[ridx].flags.sbq_dma = 0;
1555 if (ha->rx_ring[ridx].flags.lb_dma) {
1556 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1557 ha->rx_ring[ridx].flags.lb_dma = 0;
1563 qls_free_rx_dma(qla_host_t *ha)
1567 for (i = 0; i < ha->num_rx_rings; i++) {
1568 qls_free_rx_ring_dma(ha, i);
1571 if (ha->rx_tag != NULL) {
1572 bus_dma_tag_destroy(ha->rx_tag);
1580 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1585 volatile q81_bq_addr_e_t *bq_e;
1586 device_t dev = ha->pci_dev;
1588 ha->rx_ring[ridx].cq_dma.alignment = 128;
1589 ha->rx_ring[ridx].cq_dma.size =
1590 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1592 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1595 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1596 goto qls_alloc_rx_ring_dma_exit;
1598 ha->rx_ring[ridx].flags.cq_dma = 1;
1600 ha->rx_ring[ridx].lbq_dma.alignment = 8;
1601 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1603 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1606 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1607 goto qls_alloc_rx_ring_dma_exit;
1609 ha->rx_ring[ridx].flags.lbq_dma = 1;
1611 ha->rx_ring[ridx].sbq_dma.alignment = 8;
1612 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1614 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1617 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1618 goto qls_alloc_rx_ring_dma_exit;
1620 ha->rx_ring[ridx].flags.sbq_dma = 1;
1622 ha->rx_ring[ridx].lb_dma.alignment = 8;
1623 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1625 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1627 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1628 goto qls_alloc_rx_ring_dma_exit;
1630 ha->rx_ring[ridx].flags.lb_dma = 1;
1632 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1633 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1634 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1635 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1637 /* completion queue */
1638 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1639 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1641 v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1642 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1644 v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1645 p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1647 /* completion queue icb */
1648 ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1649 ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1651 v_addr = v_addr + (PAGE_SIZE >> 2);
1652 p_addr = p_addr + (PAGE_SIZE >> 2);
1654 /* completion queue index register */
1655 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1656 ha->rx_ring[ridx].cqi_paddr = p_addr;
1658 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1659 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1661 /* large buffer queue address table */
1662 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1663 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1665 /* large buffer queue */
1666 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1667 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1669 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1670 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1672 /* small buffer queue address table */
1673 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1674 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1676 /* small buffer queue */
1677 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1678 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1680 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1681 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1683 /* Initialize Large Buffer Queue Table */
1685 p_addr = ha->rx_ring[ridx].lbq_paddr;
1686 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1688 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1689 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1691 p_addr = ha->rx_ring[ridx].lb_paddr;
1692 bq_e = ha->rx_ring[ridx].lbq_vaddr;
1694 for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1695 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1696 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1698 p_addr = p_addr + QLA_LGB_SIZE;
1702 /* Initialize Small Buffer Queue Table */
1704 p_addr = ha->rx_ring[ridx].sbq_paddr;
1705 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1707 for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1708 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1709 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1711 p_addr = p_addr + QLA_PAGE_SIZE;
1715 qls_alloc_rx_ring_dma_exit:
1720 qls_alloc_rx_dma(qla_host_t *ha)
1725 if (bus_dma_tag_create(NULL, /* parent */
1726 1, 0, /* alignment, bounds */
1727 BUS_SPACE_MAXADDR, /* lowaddr */
1728 BUS_SPACE_MAXADDR, /* highaddr */
1729 NULL, NULL, /* filter, filterarg */
1730 MJUM9BYTES, /* maxsize */
1732 MJUM9BYTES, /* maxsegsize */
1733 BUS_DMA_ALLOCNOW, /* flags */
1734 NULL, /* lockfunc */
1735 NULL, /* lockfuncarg */
1737 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1743 for (i = 0; i < ha->num_rx_rings; i++) {
1744 ret = qls_alloc_rx_ring_dma(ha, i);
1747 qls_free_rx_dma(ha);
1756 qls_wait_for_flash_ready(qla_host_t *ha)
1762 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1764 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1765 goto qls_wait_for_flash_ready_exit;
1767 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1770 QLA_USEC_DELAY(100);
1773 qls_wait_for_flash_ready_exit:
1774 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1780 * Name: qls_rd_flash32
1781 * Function: Read Flash Memory
1784 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1788 ret = qls_wait_for_flash_ready(ha);
1793 WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1795 ret = qls_wait_for_flash_ready(ha);
1800 *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1806 qls_flash_validate(qla_host_t *ha, const char *signature)
1808 uint16_t csum16 = 0;
1812 if (bcmp(ha->flash.id, signature, 4)) {
1813 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1814 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1815 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1820 data16 = (uint16_t *)&ha->flash;
1822 for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1823 csum16 += *data16++;
1827 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1834 qls_rd_nic_params(qla_host_t *ha)
1840 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1841 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1845 if ((ha->pci_func & 0x1) == 0)
1846 faddr = Q81_F0_FLASH_OFFSET >> 2;
1848 faddr = Q81_F1_FLASH_OFFSET >> 2;
1850 qflash = (uint32_t *)&ha->flash;
1852 for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1853 ret = qls_rd_flash32(ha, faddr, qflash);
1856 goto qls_rd_flash_data_exit;
1862 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1864 ret = qls_flash_validate(ha, Q81_FLASH_ID);
1867 goto qls_rd_flash_data_exit;
1869 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1871 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1872 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
1873 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
1875 qls_rd_flash_data_exit:
1877 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1883 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1885 uint32_t count = 30;
1889 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1891 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1896 QLA_USEC_DELAY(100);
1899 ha->qla_initiate_recovery = 1;
1904 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1906 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1910 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1916 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1918 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1919 goto qls_wait_for_proc_addr_ready_exit;
1921 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1924 QLA_USEC_DELAY(100);
1927 qls_wait_for_proc_addr_ready_exit:
1928 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1930 ha->qla_initiate_recovery = 1;
1935 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1941 ret = qls_wait_for_proc_addr_ready(ha);
1944 goto qls_proc_addr_rd_reg_exit;
1946 value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1948 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1950 ret = qls_wait_for_proc_addr_ready(ha);
1953 goto qls_proc_addr_rd_reg_exit;
1955 *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1957 qls_proc_addr_rd_reg_exit:
1962 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1968 ret = qls_wait_for_proc_addr_ready(ha);
1971 goto qls_proc_addr_wr_reg_exit;
1973 WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1975 value = addr_module | reg;
1977 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1979 ret = qls_wait_for_proc_addr_ready(ha);
1981 qls_proc_addr_wr_reg_exit:
1986 qls_hw_nic_reset(qla_host_t *ha)
1990 device_t dev = ha->pci_dev;
1994 data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1996 WRITE_REG32(ha, Q81_CTL_RESET, data);
2000 data = READ_REG32(ha, Q81_CTL_RESET);
2001 if ((data & Q81_CTL_RESET_FUNC) == 0)
2006 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2014 qls_hw_reset(qla_host_t *ha)
2016 device_t dev = ha->pci_dev;
2021 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2023 if (ha->hw_init == 0) {
2024 ret = qls_hw_nic_reset(ha);
2025 goto qls_hw_reset_exit;
2028 ret = qls_clear_routing_table(ha);
2030 goto qls_hw_reset_exit;
2032 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2034 goto qls_hw_reset_exit;
2037 * Wait for FIFO to empty
2041 data = READ_REG32(ha, Q81_CTL_STATUS);
2042 if (data & Q81_CTL_STATUS_NFE)
2044 qls_mdelay(__func__, 100);
2047 device_printf(dev, "%s: NFE bit not set\n", __func__);
2048 goto qls_hw_reset_exit;
2053 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2055 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2056 (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2058 qls_mdelay(__func__, 100);
2061 goto qls_hw_reset_exit;
2064 * Reset the NIC function
2066 ret = qls_hw_nic_reset(ha);
2068 goto qls_hw_reset_exit;
2070 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2074 device_printf(dev, "%s: failed\n", __func__);
2080 * MPI Related Functions
2083 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2087 ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2093 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2097 ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2103 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2107 if ((ha->pci_func & 0x1) == 0)
2108 reg += Q81_FUNC0_MBX_OUT_REG0;
2110 reg += Q81_FUNC1_MBX_OUT_REG0;
2112 ret = qls_mpi_risc_rd_reg(ha, reg, data);
2118 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2122 if ((ha->pci_func & 0x1) == 0)
2123 reg += Q81_FUNC0_MBX_IN_REG0;
2125 reg += Q81_FUNC1_MBX_IN_REG0;
2127 ret = qls_mpi_risc_wr_reg(ha, reg, data);
2133 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2134 uint32_t *out_mbx, uint32_t o_count)
2138 uint32_t count = 50;
2140 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2141 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2143 data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2145 if (data32 & Q81_CTL_HCS_HTR_INTR) {
2146 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2148 goto qls_mbx_cmd_exit;
2151 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2152 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2153 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2154 goto qls_mbx_cmd_exit;
2159 for (i = 0; i < i_count; i++) {
2160 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2163 device_printf(ha->pci_dev,
2164 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2166 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2167 goto qls_mbx_cmd_exit;
2172 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2174 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2180 if (ha->flags.intr_enable == 0) {
2181 data32 = READ_REG32(ha, Q81_CTL_STATUS);
2183 if (!(data32 & Q81_CTL_STATUS_PI)) {
2184 qls_mdelay(__func__, 100);
2188 ret = qls_mbx_rd_reg(ha, 0, &data32);
2191 if ((data32 & 0xF000) == 0x4000) {
2192 out_mbx[0] = data32;
2194 for (i = 1; i < o_count; i++) {
2195 ret = qls_mbx_rd_reg(ha, i,
2205 out_mbx[i] = data32;
2208 } else if ((data32 & 0xF000) == 0x8000) {
2211 Q81_CTL_HOST_CMD_STATUS,\
2212 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2217 for (i = 1; i < o_count; i++) {
2218 out_mbx[i] = ha->mbox[i];
2224 qls_mdelay(__func__, 1000);
2229 if (ha->flags.intr_enable == 0) {
2230 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2231 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2235 ha->qla_initiate_recovery = 1;
2238 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2243 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2246 device_t dev = ha->pci_dev;
2249 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2251 mbox[0] = Q81_MBX_SET_MGMT_CTL;
2254 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2255 device_printf(dev, "%s failed\n", __func__);
2259 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2260 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2261 (mbox[0] == Q81_MBX_CMD_ERROR))){
2264 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2270 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2273 device_t dev = ha->pci_dev;
2278 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2280 mbox[0] = Q81_MBX_GET_MGMT_CTL;
2282 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2283 device_printf(dev, "%s failed\n", __func__);
2287 *t_status = mbox[1];
2293 qls_mbx_get_link_status(qla_host_t *ha)
2296 device_t dev = ha->pci_dev;
2299 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2301 mbox[0] = Q81_MBX_GET_LNK_STATUS;
2303 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2304 device_printf(dev, "%s failed\n", __func__);
2308 ha->link_status = mbox[1];
2309 ha->link_down_info = mbox[2];
2310 ha->link_hw_info = mbox[3];
2311 ha->link_dcbx_counters = mbox[4];
2312 ha->link_change_counters = mbox[5];
2314 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2315 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2321 qls_mbx_about_fw(qla_host_t *ha)
2324 device_t dev = ha->pci_dev;
2327 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2329 mbox[0] = Q81_MBX_ABOUT_FW;
2331 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2332 device_printf(dev, "%s failed\n", __func__);
2336 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2337 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2341 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2346 device_t dev = ha->pci_dev;
2349 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2351 bzero(ha->mpi_dma.dma_b,(r_size << 2));
2352 b_paddr = ha->mpi_dma.dma_addr;
2354 mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2355 mbox[1] = r_addr & 0xFFFF;
2356 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2357 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2358 mbox[4] = (r_size >> 16) & 0xFFFF;
2359 mbox[5] = r_size & 0xFFFF;
2360 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2361 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2362 mbox[8] = (r_addr >> 16) & 0xFFFF;
2364 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2365 BUS_DMASYNC_PREREAD);
2367 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2368 device_printf(dev, "%s failed\n", __func__);
2371 if (mbox[0] != 0x4000) {
2372 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2375 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2376 BUS_DMASYNC_POSTREAD);
2377 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2384 qls_mpi_reset(qla_host_t *ha)
2388 device_t dev = ha->pci_dev;
2390 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2391 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2395 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2396 if (data & Q81_CTL_HCS_RISC_RESET) {
2397 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2398 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2401 qls_mdelay(__func__, 10);
2404 device_printf(dev, "%s: failed\n", __func__);