2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013-2014 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 * Content: Contains Hardware dependent functions
35 #include <sys/cdefs.h>
39 #include "qls_inline.h"
47 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
48 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
49 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
50 uint32_t add_mac, uint32_t index);
52 static int qls_init_rss(qla_host_t *ha);
53 static int qls_init_comp_queue(qla_host_t *ha, int cid);
54 static int qls_init_work_queue(qla_host_t *ha, int wid);
55 static int qls_init_fw_routing_table(qla_host_t *ha);
56 static int qls_hw_add_all_mcast(qla_host_t *ha);
57 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
58 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
59 static int qls_wait_for_flash_ready(qla_host_t *ha);
61 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
62 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
64 static void qls_free_tx_dma(qla_host_t *ha);
65 static int qls_alloc_tx_dma(qla_host_t *ha);
66 static void qls_free_rx_dma(qla_host_t *ha);
67 static int qls_alloc_rx_dma(qla_host_t *ha);
68 static void qls_free_mpi_dma(qla_host_t *ha);
69 static int qls_alloc_mpi_dma(qla_host_t *ha);
70 static void qls_free_rss_dma(qla_host_t *ha);
71 static int qls_alloc_rss_dma(qla_host_t *ha);
73 static int qls_flash_validate(qla_host_t *ha, const char *signature);
75 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
76 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
77 uint32_t reg, uint32_t *data);
78 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
79 uint32_t reg, uint32_t data);
81 static int qls_hw_reset(qla_host_t *ha);
84 * MPI Related Functions
86 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
87 uint32_t *out_mbx, uint32_t o_count);
88 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
89 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
90 static void qls_mbx_get_link_status(qla_host_t *ha);
91 static void qls_mbx_about_fw(qla_host_t *ha);
94 qls_get_msix_count(qla_host_t *ha)
96 return (ha->num_rx_rings);
100 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
105 err = sysctl_handle_int(oidp, &ret, 0, req);
107 if (err || !req->newptr)
111 ha = (qla_host_t *)arg1;
112 qls_mpi_core_dump(ha);
118 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
123 err = sysctl_handle_int(oidp, &ret, 0, req);
125 if (err || !req->newptr)
129 ha = (qla_host_t *)arg1;
130 qls_mbx_get_link_status(ha);
131 qls_mbx_about_fw(ha);
137 qls_hw_add_sysctls(qla_host_t *ha)
143 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
145 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
146 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
147 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
148 ha->num_rx_rings, "Number of Completion Queues");
150 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
151 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
152 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
153 ha->num_tx_rings, "Number of Transmit Rings");
155 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
156 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
157 OID_AUTO, "mpi_dump",
158 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
159 qls_syctl_mpi_dump, "I", "MPI Dump");
161 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
162 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
163 OID_AUTO, "link_status",
164 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
165 qls_syctl_link_status, "I", "Link Status");
170 * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
173 qls_free_dma(qla_host_t *ha)
175 qls_free_rss_dma(ha);
176 qls_free_mpi_dma(ha);
183 * Name: qls_alloc_dma
184 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
187 qls_alloc_dma(qla_host_t *ha)
189 if (qls_alloc_rx_dma(ha))
192 if (qls_alloc_tx_dma(ha)) {
197 if (qls_alloc_mpi_dma(ha)) {
203 if (qls_alloc_rss_dma(ha)) {
204 qls_free_mpi_dma(ha);
214 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
220 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
227 ha->qla_initiate_recovery = 1;
232 * Name: qls_config_unicast_mac_addr
233 * Function: binds/unbinds a unicast MAC address to the interface.
236 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
239 uint32_t mac_upper = 0;
240 uint32_t mac_lower = 0;
241 uint32_t value = 0, index;
243 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
244 Q81_CTL_SEM_SET_MAC_SERDES)) {
245 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
250 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
251 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
252 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
254 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
256 goto qls_config_unicast_mac_addr_exit;
258 index = 128 * (ha->pci_func & 0x1); /* index */
260 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
261 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
263 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
264 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
266 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
268 goto qls_config_unicast_mac_addr_exit;
270 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
271 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
273 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
274 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
276 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
278 goto qls_config_unicast_mac_addr_exit;
280 value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
281 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
283 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
285 value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
286 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
287 (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
289 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
291 qls_config_unicast_mac_addr_exit:
292 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
297 * Name: qls_config_mcast_mac_addr
298 * Function: binds/unbinds a multicast MAC address to the interface.
301 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
305 uint32_t mac_upper = 0;
306 uint32_t mac_lower = 0;
309 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
310 Q81_CTL_SEM_SET_MAC_SERDES)) {
311 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
316 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
317 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
318 (mac_addr[4] << 8) | mac_addr[5];
320 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
322 goto qls_config_mcast_mac_addr_exit;
324 value = Q81_CTL_MAC_PROTO_AI_E |
325 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
326 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
328 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
329 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
331 ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
333 goto qls_config_mcast_mac_addr_exit;
335 value = Q81_CTL_MAC_PROTO_AI_E |
336 (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
337 Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
339 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
340 WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
342 qls_config_mcast_mac_addr_exit:
343 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
349 * Name: qls_set_mac_rcv_mode
350 * Function: Enable/Disable AllMulticast and Promiscuous Modes.
353 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
359 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
366 ha->qla_initiate_recovery = 1;
371 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
375 ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
378 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
379 __func__, index, data);
380 goto qls_load_route_idx_reg_exit;
383 WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
384 WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
386 qls_load_route_idx_reg_exit:
391 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
395 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
396 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
397 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
401 ret = qls_load_route_idx_reg(ha, index, data);
403 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
409 qls_clear_routing_table(qla_host_t *ha)
413 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
414 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
415 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
419 for (i = 0; i < 16; i++) {
420 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
421 (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
426 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
432 qls_set_promisc(qla_host_t *ha)
436 ret = qls_load_route_idx_reg_locked(ha,
437 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
438 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
439 Q81_CTL_RD_VALID_PKT);
444 qls_reset_promisc(qla_host_t *ha)
446 qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
447 Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
452 qls_set_allmulti(qla_host_t *ha)
456 ret = qls_load_route_idx_reg_locked(ha,
457 (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
458 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
464 qls_reset_allmulti(qla_host_t *ha)
466 qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
467 Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
472 qls_init_fw_routing_table(qla_host_t *ha)
476 ret = qls_clear_routing_table(ha);
480 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
481 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
482 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
486 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
487 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
488 Q81_CTL_RD_ERROR_PKT);
490 goto qls_init_fw_routing_table_exit;
492 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
493 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
496 goto qls_init_fw_routing_table_exit;
498 if (ha->num_rx_rings > 1 ) {
499 ret = qls_load_route_idx_reg(ha,
500 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
501 Q81_CTL_RI_TYPE_NICQMASK |
502 Q81_CTL_RI_IDX_RSS_MATCH),
503 Q81_CTL_RD_RSS_MATCH);
505 goto qls_init_fw_routing_table_exit;
508 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
509 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
510 Q81_CTL_RD_MCAST_REG_MATCH);
512 goto qls_init_fw_routing_table_exit;
514 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
515 Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
518 goto qls_init_fw_routing_table_exit;
520 qls_init_fw_routing_table_exit:
521 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
526 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
528 #if defined(INET) || defined(INET6)
529 struct ether_vlan_header *eh;
535 uint32_t ehdrlen, ip_hlen;
538 uint8_t buf[sizeof(struct ip6_hdr)];
540 eh = mtod(mp, struct ether_vlan_header *);
542 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
543 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
544 etype = ntohs(eh->evl_proto);
546 ehdrlen = ETHER_HDR_LEN;
547 etype = ntohs(eh->evl_encap_proto);
553 ip = (struct ip *)(mp->m_data + ehdrlen);
555 ip_hlen = sizeof (struct ip);
557 if (mp->m_len < (ehdrlen + ip_hlen)) {
558 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
559 ip = (struct ip *)buf;
561 tx_mac->opcode = Q81_IOCB_TX_TSO;
562 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
564 tx_mac->phdr_offsets = ehdrlen;
566 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
567 Q81_TX_TSO_PHDR_SHIFT);
571 if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
572 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
574 th = (struct tcphdr *)(ip + 1);
576 th->th_sum = in_pseudo(ip->ip_src.s_addr,
579 tx_mac->mss = mp->m_pkthdr.tso_segsz;
580 tx_mac->phdr_length = ip_hlen + ehdrlen +
584 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
586 if (ip->ip_p == IPPROTO_TCP) {
587 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
588 } else if (ip->ip_p == IPPROTO_UDP) {
589 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
596 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
598 ip_hlen = sizeof(struct ip6_hdr);
600 if (mp->m_len < (ehdrlen + ip_hlen)) {
601 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
603 ip6 = (struct ip6_hdr *)buf;
606 tx_mac->opcode = Q81_IOCB_TX_TSO;
607 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
608 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
610 tx_mac->phdr_offsets = ehdrlen;
611 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
612 Q81_TX_TSO_PHDR_SHIFT);
614 if (ip6->ip6_nxt == IPPROTO_TCP) {
615 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
616 } else if (ip6->ip6_nxt == IPPROTO_UDP) {
617 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
633 #define QLA_TX_MIN_FREE 2
635 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
637 uint32_t txr_done, txr_next;
639 txr_done = ha->tx_ring[txr_idx].txr_done;
640 txr_next = ha->tx_ring[txr_idx].txr_next;
642 if (txr_done == txr_next) {
643 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
644 } else if (txr_done > txr_next) {
645 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
647 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
651 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
659 * Function: Transmits a packet. It first checks if the packet is a
660 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum
661 * offload. If either of these creteria are not met, it is transmitted
662 * as a regular ethernet frame.
665 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
666 uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx)
668 q81_tx_mac_t *tx_mac;
669 q81_txb_desc_t *tx_desc;
670 uint32_t total_length = 0;
677 total_length = mp->m_pkthdr.len;
679 if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
680 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
681 __func__, total_length);
685 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
686 if (qls_hw_tx_done(ha, txr_idx)) {
687 device_printf(dev, "%s: tx_free[%d] = %d\n",
689 ha->tx_ring[txr_idx].txr_free);
694 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
696 bzero(tx_mac, sizeof(q81_tx_mac_t));
698 if ((mp->m_pkthdr.csum_flags &
699 (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
700 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
704 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
705 ha->tx_ring[txr_idx].tx_tso_frames++;
707 ha->tx_ring[txr_idx].tx_frames++;
710 tx_mac->opcode = Q81_IOCB_TX_MAC;
713 if (mp->m_flags & M_VLANTAG) {
714 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
715 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
717 ha->tx_ring[txr_idx].tx_vlan_frames++;
720 tx_mac->frame_length = total_length;
722 tx_mac->tid_lo = txr_next;
724 if (nsegs <= MAX_TX_MAC_DESC) {
725 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
728 for (i = 0; i < nsegs; i++) {
729 tx_mac->txd[i].baddr = segs->ds_addr;
730 tx_mac->txd[i].length = segs->ds_len;
733 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
736 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
739 tx_mac->txd[0].baddr =
740 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
741 tx_mac->txd[0].length =
742 nsegs * (sizeof(q81_txb_desc_t));
743 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
745 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
747 for (i = 0; i < nsegs; i++) {
748 tx_desc->baddr = segs->ds_addr;
749 tx_desc->length = segs->ds_len;
752 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
760 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
761 ha->tx_ring[txr_idx].txr_next = txr_next;
763 ha->tx_ring[txr_idx].txr_free--;
765 Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
771 * Name: qls_del_hw_if
772 * Function: Destroys the hardware specific entities corresponding to an
776 qls_del_hw_if(qla_host_t *ha)
782 if (ha->hw_init == 0) {
787 for (i = 0; i < ha->num_tx_rings; i++) {
788 Q81_SET_WQ_INVALID(i);
790 for (i = 0; i < ha->num_rx_rings; i++) {
791 Q81_SET_CQ_INVALID(i);
794 for (i = 0; i < ha->num_rx_rings; i++) {
795 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
798 value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
799 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
801 value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
802 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
803 ha->flags.intr_enable = 0;
811 * Name: qls_init_hw_if
812 * Function: Creates the hardware specific entities corresponding to an
813 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
814 * corresponding to the interface. Enables LRO if allowed.
817 qls_init_hw_if(qla_host_t *ha)
823 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
825 ret = qls_hw_reset(ha);
827 goto qls_init_hw_if_exit;
829 ha->vm_pgsize = 4096;
831 /* Enable FAE and EFE bits in System Register */
832 value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
833 value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
835 WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
837 /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
838 value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
839 WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
841 /* Function Specific Control Register - Set Page Size and Enable NIC */
842 value = Q81_CTL_FUNC_SPECIFIC_FE |
843 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
844 Q81_CTL_FUNC_SPECIFIC_EPC_O |
845 Q81_CTL_FUNC_SPECIFIC_EPC_I |
846 Q81_CTL_FUNC_SPECIFIC_EC;
847 value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) |
848 Q81_CTL_FUNC_SPECIFIC_FE |
849 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
850 Q81_CTL_FUNC_SPECIFIC_EPC_O |
851 Q81_CTL_FUNC_SPECIFIC_EPC_I |
852 Q81_CTL_FUNC_SPECIFIC_EC;
854 WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
856 /* Interrupt Mask Register */
857 value = Q81_CTL_INTRM_PI;
858 value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
860 WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
862 /* Initialiatize Completion Queue */
863 for (i = 0; i < ha->num_rx_rings; i++) {
864 ret = qls_init_comp_queue(ha, i);
866 goto qls_init_hw_if_exit;
869 if (ha->num_rx_rings > 1 ) {
870 ret = qls_init_rss(ha);
872 goto qls_init_hw_if_exit;
875 /* Initialize Work Queue */
877 for (i = 0; i < ha->num_tx_rings; i++) {
878 ret = qls_init_work_queue(ha, i);
880 goto qls_init_hw_if_exit;
884 goto qls_init_hw_if_exit;
886 /* Set up CAM RAM with MAC Address */
887 ret = qls_config_unicast_mac_addr(ha, 1);
889 goto qls_init_hw_if_exit;
891 ret = qls_hw_add_all_mcast(ha);
893 goto qls_init_hw_if_exit;
895 /* Initialize Firmware Routing Table */
896 ret = qls_init_fw_routing_table(ha);
898 goto qls_init_hw_if_exit;
900 /* Get Chip Revision ID */
901 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
903 /* Enable Global Interrupt */
904 value = Q81_CTL_INTRE_EI;
905 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
907 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
909 /* Enable Interrupt Handshake Disable */
910 value = Q81_CTL_INTRE_IHD;
911 value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
913 WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
915 /* Enable Completion Interrupt */
917 ha->flags.intr_enable = 1;
919 for (i = 0; i < ha->num_rx_rings; i++) {
920 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
925 qls_mbx_get_link_status(ha);
927 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
928 ha->rx_ring[0].cq_db_offset));
929 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
930 ha->tx_ring[0].wq_db_offset));
932 for (i = 0; i < ha->num_rx_rings; i++) {
933 Q81_WR_CQ_CONS_IDX(i, 0);
934 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
935 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
937 QL_DPRINT2((ha->pci_dev,
938 "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
939 "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
940 Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
944 for (i = 0; i < ha->num_rx_rings; i++) {
949 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
954 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
960 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
962 if ((data32 & bits) == value)
967 ha->qla_initiate_recovery = 1;
968 device_printf(ha->pci_dev, "%s: failed\n", __func__);
972 static uint8_t q81_hash_key[] = {
973 0xda, 0x56, 0x5a, 0x6d,
974 0xc2, 0x0e, 0x5b, 0x25,
975 0x3d, 0x25, 0x67, 0x41,
976 0xb0, 0x8f, 0xa3, 0x43,
977 0xcb, 0x2b, 0xca, 0xd0,
978 0xb4, 0x30, 0x7b, 0xae,
979 0xa3, 0x2d, 0xcb, 0x77,
980 0x0c, 0xf2, 0x30, 0x80,
981 0x3b, 0xb7, 0x42, 0x6a,
982 0xfa, 0x01, 0xac, 0xbe };
985 qls_init_rss(qla_host_t *ha)
987 q81_rss_icb_t *rss_icb;
992 rss_icb = ha->rss_dma.dma_b;
994 bzero(rss_icb, sizeof (q81_rss_icb_t));
996 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
997 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
998 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
999 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6;
1001 rss_icb->mask = 0x3FF;
1003 for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1004 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1007 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1008 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1010 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1013 goto qls_init_rss_exit;
1015 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1018 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1019 goto qls_init_rss_exit;
1022 value = (uint32_t)ha->rss_dma.dma_addr;
1023 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1025 value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1026 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1028 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1030 value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1033 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1035 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1042 qls_init_comp_queue(qla_host_t *ha, int cid)
1044 q81_cq_icb_t *cq_icb;
1049 rxr = &ha->rx_ring[cid];
1051 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1053 cq_icb = rxr->cq_icb_vaddr;
1055 bzero(cq_icb, sizeof (q81_cq_icb_t));
1057 cq_icb->msix_vector = cid;
1058 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1059 Q81_CQ_ICB_FLAGS_LI |
1060 Q81_CQ_ICB_FLAGS_LL |
1061 Q81_CQ_ICB_FLAGS_LS |
1062 Q81_CQ_ICB_FLAGS_LV;
1064 cq_icb->length_v = NUM_CQ_ENTRIES;
1066 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1067 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1069 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1070 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1072 cq_icb->pkt_idelay = 10;
1073 cq_icb->idelay = 100;
1075 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1076 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1078 cq_icb->lbq_bsize = QLA_LGB_SIZE;
1079 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1081 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1082 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1084 cq_icb->sbq_bsize = (uint16_t)ha->msize;
1085 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1089 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1092 goto qls_init_comp_queue_exit;
1094 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1097 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1098 goto qls_init_comp_queue_exit;
1101 value = (uint32_t)rxr->cq_icb_paddr;
1102 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1104 value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1105 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1107 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1109 value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1110 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1111 value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1112 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1114 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1117 rxr->lbq_next = rxr->lbq_free = 0;
1118 rxr->sbq_next = rxr->sbq_free = 0;
1119 rxr->rx_free = rxr->rx_next = 0;
1120 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1121 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1123 qls_init_comp_queue_exit:
1128 qls_init_work_queue(qla_host_t *ha, int wid)
1130 q81_wq_icb_t *wq_icb;
1135 txr = &ha->tx_ring[wid];
1137 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1138 + (ha->vm_pgsize * wid));
1140 txr->wq_db_offset = (ha->vm_pgsize * wid);
1142 wq_icb = txr->wq_icb_vaddr;
1143 bzero(wq_icb, sizeof (q81_wq_icb_t));
1145 wq_icb->length_v = NUM_TX_DESCRIPTORS |
1148 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1149 Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1151 wq_icb->wqcqid_rss = wid;
1153 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1154 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1156 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1157 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1159 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1162 goto qls_init_wq_exit;
1164 ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1167 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1168 goto qls_init_wq_exit;
1171 value = (uint32_t)txr->wq_icb_paddr;
1172 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1174 value = (uint32_t)(txr->wq_icb_paddr >> 32);
1175 WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1177 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1179 value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1180 value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1181 value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1182 WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1184 ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1186 txr->txr_free = NUM_TX_DESCRIPTORS;
1195 qls_hw_add_all_mcast(qla_host_t *ha)
1199 nmcast = ha->nmcast;
1201 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1202 if ((ha->mcast[i].addr[0] != 0) ||
1203 (ha->mcast[i].addr[1] != 0) ||
1204 (ha->mcast[i].addr[2] != 0) ||
1205 (ha->mcast[i].addr[3] != 0) ||
1206 (ha->mcast[i].addr[4] != 0) ||
1207 (ha->mcast[i].addr[5] != 0)) {
1208 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1210 device_printf(ha->pci_dev, "%s: failed\n",
1222 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1226 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1227 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1228 return 0; /* its been already added */
1231 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1232 if ((ha->mcast[i].addr[0] == 0) &&
1233 (ha->mcast[i].addr[1] == 0) &&
1234 (ha->mcast[i].addr[2] == 0) &&
1235 (ha->mcast[i].addr[3] == 0) &&
1236 (ha->mcast[i].addr[4] == 0) &&
1237 (ha->mcast[i].addr[5] == 0)) {
1238 if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1241 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1251 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1255 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1256 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1257 if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1260 ha->mcast[i].addr[0] = 0;
1261 ha->mcast[i].addr[1] = 0;
1262 ha->mcast[i].addr[2] = 0;
1263 ha->mcast[i].addr[3] = 0;
1264 ha->mcast[i].addr[4] = 0;
1265 ha->mcast[i].addr[5] = 0;
1276 * Name: qls_hw_set_multi
1277 * Function: Sets the Multicast Addresses provided the host O.S into the
1278 * hardware (for the given interface)
1281 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1286 for (i = 0; i < mcnt; i++) {
1288 if (qls_hw_add_mcast(ha, mta))
1291 if (qls_hw_del_mcast(ha, mta))
1295 mta += Q8_MAC_ADDR_LEN;
1301 qls_update_link_state(qla_host_t *ha)
1303 uint32_t link_state;
1304 uint32_t prev_link_state;
1306 if (!(if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING)) {
1310 link_state = READ_REG32(ha, Q81_CTL_STATUS);
1312 prev_link_state = ha->link_up;
1314 if ((ha->pci_func & 0x1) == 0)
1315 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1317 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1319 if (prev_link_state != ha->link_up) {
1321 if_link_state_change(ha->ifp, LINK_STATE_UP);
1323 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1330 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1332 if (ha->tx_ring[r_idx].flags.wq_dma) {
1333 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1334 ha->tx_ring[r_idx].flags.wq_dma = 0;
1337 if (ha->tx_ring[r_idx].flags.privb_dma) {
1338 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1339 ha->tx_ring[r_idx].flags.privb_dma = 0;
1345 qls_free_tx_dma(qla_host_t *ha)
1350 for (i = 0; i < ha->num_tx_rings; i++) {
1351 qls_free_tx_ring_dma(ha, i);
1353 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1354 txb = &ha->tx_ring[i].tx_buf[j];
1357 bus_dmamap_destroy(ha->tx_tag, txb->map);
1362 if (ha->tx_tag != NULL) {
1363 bus_dma_tag_destroy(ha->tx_tag);
1371 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1377 device_t dev = ha->pci_dev;
1379 ha->tx_ring[ridx].wq_dma.alignment = 8;
1380 ha->tx_ring[ridx].wq_dma.size =
1381 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1383 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1386 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1387 goto qls_alloc_tx_ring_dma_exit;
1389 ha->tx_ring[ridx].flags.wq_dma = 1;
1391 ha->tx_ring[ridx].privb_dma.alignment = 8;
1392 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1394 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1397 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1398 goto qls_alloc_tx_ring_dma_exit;
1401 ha->tx_ring[ridx].flags.privb_dma = 1;
1403 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1404 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1406 v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1407 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1409 ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1410 ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1412 ha->tx_ring[ridx].txr_cons_vaddr =
1413 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1414 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1416 v_addr = v_addr + (PAGE_SIZE >> 1);
1417 p_addr = p_addr + (PAGE_SIZE >> 1);
1419 txb = ha->tx_ring[ridx].tx_buf;
1421 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1422 txb[i].oal_vaddr = v_addr;
1423 txb[i].oal_paddr = p_addr;
1425 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1426 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1429 qls_alloc_tx_ring_dma_exit:
1434 qls_alloc_tx_dma(qla_host_t *ha)
1440 if (bus_dma_tag_create(NULL, /* parent */
1441 1, 0, /* alignment, bounds */
1442 BUS_SPACE_MAXADDR, /* lowaddr */
1443 BUS_SPACE_MAXADDR, /* highaddr */
1444 NULL, NULL, /* filter, filterarg */
1445 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1446 QLA_MAX_SEGMENTS, /* nsegments */
1447 PAGE_SIZE, /* maxsegsize */
1448 BUS_DMA_ALLOCNOW, /* flags */
1449 NULL, /* lockfunc */
1450 NULL, /* lockfuncarg */
1452 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1457 for (i = 0; i < ha->num_tx_rings; i++) {
1458 ret = qls_alloc_tx_ring_dma(ha, i);
1461 qls_free_tx_dma(ha);
1465 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1466 txb = &ha->tx_ring[i].tx_buf[j];
1468 ret = bus_dmamap_create(ha->tx_tag,
1469 BUS_DMA_NOWAIT, &txb->map);
1471 ha->err_tx_dmamap_create++;
1472 device_printf(ha->pci_dev,
1473 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1474 __func__, ret, i, j);
1476 qls_free_tx_dma(ha);
1487 qls_free_rss_dma(qla_host_t *ha)
1489 qls_free_dmabuf(ha, &ha->rss_dma);
1490 ha->flags.rss_dma = 0;
1494 qls_alloc_rss_dma(qla_host_t *ha)
1498 ha->rss_dma.alignment = 4;
1499 ha->rss_dma.size = PAGE_SIZE;
1501 ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1504 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1506 ha->flags.rss_dma = 1;
1512 qls_free_mpi_dma(qla_host_t *ha)
1514 qls_free_dmabuf(ha, &ha->mpi_dma);
1515 ha->flags.mpi_dma = 0;
1519 qls_alloc_mpi_dma(qla_host_t *ha)
1523 ha->mpi_dma.alignment = 4;
1524 ha->mpi_dma.size = (0x4000 * 4);
1526 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1528 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1530 ha->flags.mpi_dma = 1;
1536 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1538 if (ha->rx_ring[ridx].flags.cq_dma) {
1539 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1540 ha->rx_ring[ridx].flags.cq_dma = 0;
1543 if (ha->rx_ring[ridx].flags.lbq_dma) {
1544 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1545 ha->rx_ring[ridx].flags.lbq_dma = 0;
1548 if (ha->rx_ring[ridx].flags.sbq_dma) {
1549 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1550 ha->rx_ring[ridx].flags.sbq_dma = 0;
1553 if (ha->rx_ring[ridx].flags.lb_dma) {
1554 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1555 ha->rx_ring[ridx].flags.lb_dma = 0;
1561 qls_free_rx_dma(qla_host_t *ha)
1565 for (i = 0; i < ha->num_rx_rings; i++) {
1566 qls_free_rx_ring_dma(ha, i);
1569 if (ha->rx_tag != NULL) {
1570 bus_dma_tag_destroy(ha->rx_tag);
1578 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1583 volatile q81_bq_addr_e_t *bq_e;
1584 device_t dev = ha->pci_dev;
1586 ha->rx_ring[ridx].cq_dma.alignment = 128;
1587 ha->rx_ring[ridx].cq_dma.size =
1588 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1590 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1593 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1594 goto qls_alloc_rx_ring_dma_exit;
1596 ha->rx_ring[ridx].flags.cq_dma = 1;
1598 ha->rx_ring[ridx].lbq_dma.alignment = 8;
1599 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1601 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1604 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1605 goto qls_alloc_rx_ring_dma_exit;
1607 ha->rx_ring[ridx].flags.lbq_dma = 1;
1609 ha->rx_ring[ridx].sbq_dma.alignment = 8;
1610 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1612 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1615 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1616 goto qls_alloc_rx_ring_dma_exit;
1618 ha->rx_ring[ridx].flags.sbq_dma = 1;
1620 ha->rx_ring[ridx].lb_dma.alignment = 8;
1621 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1623 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1625 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1626 goto qls_alloc_rx_ring_dma_exit;
1628 ha->rx_ring[ridx].flags.lb_dma = 1;
1630 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1631 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1632 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1633 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1635 /* completion queue */
1636 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1637 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1639 v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1640 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1642 v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1643 p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1645 /* completion queue icb */
1646 ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1647 ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1649 v_addr = v_addr + (PAGE_SIZE >> 2);
1650 p_addr = p_addr + (PAGE_SIZE >> 2);
1652 /* completion queue index register */
1653 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1654 ha->rx_ring[ridx].cqi_paddr = p_addr;
1656 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1657 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1659 /* large buffer queue address table */
1660 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1661 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1663 /* large buffer queue */
1664 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1665 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1667 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1668 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1670 /* small buffer queue address table */
1671 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1672 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1674 /* small buffer queue */
1675 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1676 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1678 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1679 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1681 /* Initialize Large Buffer Queue Table */
1683 p_addr = ha->rx_ring[ridx].lbq_paddr;
1684 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1686 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1687 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1689 p_addr = ha->rx_ring[ridx].lb_paddr;
1690 bq_e = ha->rx_ring[ridx].lbq_vaddr;
1692 for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1693 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1694 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1696 p_addr = p_addr + QLA_LGB_SIZE;
1700 /* Initialize Small Buffer Queue Table */
1702 p_addr = ha->rx_ring[ridx].sbq_paddr;
1703 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1705 for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1706 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1707 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1709 p_addr = p_addr + QLA_PAGE_SIZE;
1713 qls_alloc_rx_ring_dma_exit:
1718 qls_alloc_rx_dma(qla_host_t *ha)
1723 if (bus_dma_tag_create(NULL, /* parent */
1724 1, 0, /* alignment, bounds */
1725 BUS_SPACE_MAXADDR, /* lowaddr */
1726 BUS_SPACE_MAXADDR, /* highaddr */
1727 NULL, NULL, /* filter, filterarg */
1728 MJUM9BYTES, /* maxsize */
1730 MJUM9BYTES, /* maxsegsize */
1731 BUS_DMA_ALLOCNOW, /* flags */
1732 NULL, /* lockfunc */
1733 NULL, /* lockfuncarg */
1735 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1741 for (i = 0; i < ha->num_rx_rings; i++) {
1742 ret = qls_alloc_rx_ring_dma(ha, i);
1745 qls_free_rx_dma(ha);
1754 qls_wait_for_flash_ready(qla_host_t *ha)
1760 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1762 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1763 goto qls_wait_for_flash_ready_exit;
1765 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1768 QLA_USEC_DELAY(100);
1771 qls_wait_for_flash_ready_exit:
1772 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1778 * Name: qls_rd_flash32
1779 * Function: Read Flash Memory
1782 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1786 ret = qls_wait_for_flash_ready(ha);
1791 WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1793 ret = qls_wait_for_flash_ready(ha);
1798 *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1804 qls_flash_validate(qla_host_t *ha, const char *signature)
1806 uint16_t csum16 = 0;
1810 if (bcmp(ha->flash.id, signature, 4)) {
1811 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1812 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1813 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1818 data16 = (uint16_t *)&ha->flash;
1820 for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1821 csum16 += *data16++;
1825 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1832 qls_rd_nic_params(qla_host_t *ha)
1838 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1839 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1843 if ((ha->pci_func & 0x1) == 0)
1844 faddr = Q81_F0_FLASH_OFFSET >> 2;
1846 faddr = Q81_F1_FLASH_OFFSET >> 2;
1848 qflash = (uint32_t *)&ha->flash;
1850 for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1851 ret = qls_rd_flash32(ha, faddr, qflash);
1854 goto qls_rd_flash_data_exit;
1860 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1862 ret = qls_flash_validate(ha, Q81_FLASH_ID);
1865 goto qls_rd_flash_data_exit;
1867 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1869 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1870 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2],
1871 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5]));
1873 qls_rd_flash_data_exit:
1875 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1881 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1883 uint32_t count = 30;
1887 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1889 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1894 QLA_USEC_DELAY(100);
1897 ha->qla_initiate_recovery = 1;
1902 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1904 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1908 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1914 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1916 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1917 goto qls_wait_for_proc_addr_ready_exit;
1919 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1922 QLA_USEC_DELAY(100);
1925 qls_wait_for_proc_addr_ready_exit:
1926 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1928 ha->qla_initiate_recovery = 1;
1933 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1939 ret = qls_wait_for_proc_addr_ready(ha);
1942 goto qls_proc_addr_rd_reg_exit;
1944 value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1946 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1948 ret = qls_wait_for_proc_addr_ready(ha);
1951 goto qls_proc_addr_rd_reg_exit;
1953 *data = READ_REG32(ha, Q81_CTL_PROC_DATA);
1955 qls_proc_addr_rd_reg_exit:
1960 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1966 ret = qls_wait_for_proc_addr_ready(ha);
1969 goto qls_proc_addr_wr_reg_exit;
1971 WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1973 value = addr_module | reg;
1975 WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1977 ret = qls_wait_for_proc_addr_ready(ha);
1979 qls_proc_addr_wr_reg_exit:
1984 qls_hw_nic_reset(qla_host_t *ha)
1988 device_t dev = ha->pci_dev;
1992 data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1994 WRITE_REG32(ha, Q81_CTL_RESET, data);
1998 data = READ_REG32(ha, Q81_CTL_RESET);
1999 if ((data & Q81_CTL_RESET_FUNC) == 0)
2004 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2012 qls_hw_reset(qla_host_t *ha)
2014 device_t dev = ha->pci_dev;
2019 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2021 if (ha->hw_init == 0) {
2022 ret = qls_hw_nic_reset(ha);
2023 goto qls_hw_reset_exit;
2026 ret = qls_clear_routing_table(ha);
2028 goto qls_hw_reset_exit;
2030 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2032 goto qls_hw_reset_exit;
2035 * Wait for FIFO to empty
2039 data = READ_REG32(ha, Q81_CTL_STATUS);
2040 if (data & Q81_CTL_STATUS_NFE)
2042 qls_mdelay(__func__, 100);
2045 device_printf(dev, "%s: NFE bit not set\n", __func__);
2046 goto qls_hw_reset_exit;
2051 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2053 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2054 (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2056 qls_mdelay(__func__, 100);
2059 goto qls_hw_reset_exit;
2062 * Reset the NIC function
2064 ret = qls_hw_nic_reset(ha);
2066 goto qls_hw_reset_exit;
2068 ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2072 device_printf(dev, "%s: failed\n", __func__);
2078 * MPI Related Functions
2081 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2085 ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2091 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2095 ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2101 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2105 if ((ha->pci_func & 0x1) == 0)
2106 reg += Q81_FUNC0_MBX_OUT_REG0;
2108 reg += Q81_FUNC1_MBX_OUT_REG0;
2110 ret = qls_mpi_risc_rd_reg(ha, reg, data);
2116 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2120 if ((ha->pci_func & 0x1) == 0)
2121 reg += Q81_FUNC0_MBX_IN_REG0;
2123 reg += Q81_FUNC1_MBX_IN_REG0;
2125 ret = qls_mpi_risc_wr_reg(ha, reg, data);
2131 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2132 uint32_t *out_mbx, uint32_t o_count)
2136 uint32_t count = 50;
2138 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2139 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2141 data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2143 if (data32 & Q81_CTL_HCS_HTR_INTR) {
2144 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2146 goto qls_mbx_cmd_exit;
2149 if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2150 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2151 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2152 goto qls_mbx_cmd_exit;
2157 for (i = 0; i < i_count; i++) {
2158 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2161 device_printf(ha->pci_dev,
2162 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2164 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2165 goto qls_mbx_cmd_exit;
2170 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2172 qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2178 if (ha->flags.intr_enable == 0) {
2179 data32 = READ_REG32(ha, Q81_CTL_STATUS);
2181 if (!(data32 & Q81_CTL_STATUS_PI)) {
2182 qls_mdelay(__func__, 100);
2186 ret = qls_mbx_rd_reg(ha, 0, &data32);
2189 if ((data32 & 0xF000) == 0x4000) {
2190 out_mbx[0] = data32;
2192 for (i = 1; i < o_count; i++) {
2193 ret = qls_mbx_rd_reg(ha, i,
2203 out_mbx[i] = data32;
2206 } else if ((data32 & 0xF000) == 0x8000) {
2209 Q81_CTL_HOST_CMD_STATUS,\
2210 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2215 for (i = 1; i < o_count; i++) {
2216 out_mbx[i] = ha->mbox[i];
2222 qls_mdelay(__func__, 1000);
2227 if (ha->flags.intr_enable == 0) {
2228 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2229 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2233 ha->qla_initiate_recovery = 1;
2236 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2241 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2244 device_t dev = ha->pci_dev;
2247 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2249 mbox[0] = Q81_MBX_SET_MGMT_CTL;
2252 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2253 device_printf(dev, "%s failed\n", __func__);
2257 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2258 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2259 (mbox[0] == Q81_MBX_CMD_ERROR))){
2262 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2268 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2271 device_t dev = ha->pci_dev;
2276 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2278 mbox[0] = Q81_MBX_GET_MGMT_CTL;
2280 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2281 device_printf(dev, "%s failed\n", __func__);
2285 *t_status = mbox[1];
2291 qls_mbx_get_link_status(qla_host_t *ha)
2294 device_t dev = ha->pci_dev;
2297 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2299 mbox[0] = Q81_MBX_GET_LNK_STATUS;
2301 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2302 device_printf(dev, "%s failed\n", __func__);
2306 ha->link_status = mbox[1];
2307 ha->link_down_info = mbox[2];
2308 ha->link_hw_info = mbox[3];
2309 ha->link_dcbx_counters = mbox[4];
2310 ha->link_change_counters = mbox[5];
2312 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2313 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2319 qls_mbx_about_fw(qla_host_t *ha)
2322 device_t dev = ha->pci_dev;
2325 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2327 mbox[0] = Q81_MBX_ABOUT_FW;
2329 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2330 device_printf(dev, "%s failed\n", __func__);
2334 device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2335 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2339 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2344 device_t dev = ha->pci_dev;
2347 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2349 bzero(ha->mpi_dma.dma_b,(r_size << 2));
2350 b_paddr = ha->mpi_dma.dma_addr;
2352 mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2353 mbox[1] = r_addr & 0xFFFF;
2354 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2355 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2356 mbox[4] = (r_size >> 16) & 0xFFFF;
2357 mbox[5] = r_size & 0xFFFF;
2358 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2359 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2360 mbox[8] = (r_addr >> 16) & 0xFFFF;
2362 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2363 BUS_DMASYNC_PREREAD);
2365 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2366 device_printf(dev, "%s failed\n", __func__);
2369 if (mbox[0] != 0x4000) {
2370 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2373 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2374 BUS_DMASYNC_POSTREAD);
2375 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2382 qls_mpi_reset(qla_host_t *ha)
2386 device_t dev = ha->pci_dev;
2388 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2389 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2393 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2394 if (data & Q81_CTL_HCS_RISC_RESET) {
2395 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2396 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2399 qls_mdelay(__func__, 10);
2402 device_printf(dev, "%s: failed\n", __func__);