]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxge/qls_hw.c
LinuxKPI: sk_buff: implement skb_queue_splice_tail_init()
[FreeBSD/FreeBSD.git] / sys / dev / qlxge / qls_hw.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  * File: qls_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 #include <sys/cdefs.h>
36 #include "qls_os.h"
37 #include "qls_hw.h"
38 #include "qls_def.h"
39 #include "qls_inline.h"
40 #include "qls_ver.h"
41 #include "qls_glbl.h"
42 #include "qls_dbg.h"
43
44 /*
45  * Static Functions
46  */
47 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
48 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
49 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
50                 uint32_t add_mac, uint32_t index);
51
52 static int qls_init_rss(qla_host_t *ha);
53 static int qls_init_comp_queue(qla_host_t *ha, int cid);
54 static int qls_init_work_queue(qla_host_t *ha, int wid);
55 static int qls_init_fw_routing_table(qla_host_t *ha);
56 static int qls_hw_add_all_mcast(qla_host_t *ha);
57 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
58 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
59 static int qls_wait_for_flash_ready(qla_host_t *ha);
60
61 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
62 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
63
64 static void qls_free_tx_dma(qla_host_t *ha);
65 static int qls_alloc_tx_dma(qla_host_t *ha);
66 static void qls_free_rx_dma(qla_host_t *ha);
67 static int qls_alloc_rx_dma(qla_host_t *ha);
68 static void qls_free_mpi_dma(qla_host_t *ha);
69 static int qls_alloc_mpi_dma(qla_host_t *ha);
70 static void qls_free_rss_dma(qla_host_t *ha);
71 static int qls_alloc_rss_dma(qla_host_t *ha);
72
73 static int qls_flash_validate(qla_host_t *ha, const char *signature);
74
75 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
76 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
77                 uint32_t reg, uint32_t *data);
78 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
79                 uint32_t reg, uint32_t data);
80
81 static int qls_hw_reset(qla_host_t *ha);
82
83 /*
84  * MPI Related Functions
85  */
86 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
87                 uint32_t *out_mbx, uint32_t o_count);
88 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
89 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
90 static void qls_mbx_get_link_status(qla_host_t *ha);
91 static void qls_mbx_about_fw(qla_host_t *ha);
92
93 int
94 qls_get_msix_count(qla_host_t *ha)
95 {
96         return (ha->num_rx_rings);
97 }
98
99 static int
100 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
101 {
102         int err = 0, ret;
103         qla_host_t *ha;
104
105         err = sysctl_handle_int(oidp, &ret, 0, req);
106
107         if (err || !req->newptr)
108                 return (err);
109
110         if (ret == 1) {
111                 ha = (qla_host_t *)arg1;
112                 qls_mpi_core_dump(ha);
113         }
114         return (err);
115 }
116
117 static int
118 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
119 {
120         int err = 0, ret;
121         qla_host_t *ha;
122
123         err = sysctl_handle_int(oidp, &ret, 0, req);
124
125         if (err || !req->newptr)
126                 return (err);
127
128         if (ret == 1) {
129                 ha = (qla_host_t *)arg1;
130                 qls_mbx_get_link_status(ha);
131                 qls_mbx_about_fw(ha);
132         }
133         return (err);
134 }
135
136 void
137 qls_hw_add_sysctls(qla_host_t *ha)
138 {
139         device_t        dev;
140
141         dev = ha->pci_dev;
142
143         ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
144
145         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
146                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
147                 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
148                 ha->num_rx_rings, "Number of Completion Queues");
149
150         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
151                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
152                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
153                 ha->num_tx_rings, "Number of Transmit Rings");
154
155         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
156             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
157             OID_AUTO, "mpi_dump",
158             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
159             qls_syctl_mpi_dump, "I", "MPI Dump");
160
161         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
162             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
163             OID_AUTO, "link_status",
164             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
165             qls_syctl_link_status, "I", "Link Status");
166 }
167
168 /*
169  * Name: qls_free_dma
170  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
171  */
172 void
173 qls_free_dma(qla_host_t *ha)
174 {
175         qls_free_rss_dma(ha);
176         qls_free_mpi_dma(ha);
177         qls_free_tx_dma(ha);
178         qls_free_rx_dma(ha);
179         return;
180 }
181
182 /*
183  * Name: qls_alloc_dma
184  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
185  */
186 int
187 qls_alloc_dma(qla_host_t *ha)
188 {
189         if (qls_alloc_rx_dma(ha))
190                 return (-1);
191
192         if (qls_alloc_tx_dma(ha)) {
193                 qls_free_rx_dma(ha);
194                 return (-1);
195         }
196
197         if (qls_alloc_mpi_dma(ha)) {
198                 qls_free_tx_dma(ha);
199                 qls_free_rx_dma(ha);
200                 return (-1);
201         }
202
203         if (qls_alloc_rss_dma(ha)) {
204                 qls_free_mpi_dma(ha);
205                 qls_free_tx_dma(ha);
206                 qls_free_rx_dma(ha);
207                 return (-1);
208         }
209
210         return (0);
211 }
212
213 static int
214 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
215 {
216         uint32_t data32;
217         uint32_t count = 3;
218
219         while (count--) {
220                 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
221
222                 if (data32 & op)
223                         return (0);
224
225                 QLA_USEC_DELAY(100);
226         }
227         ha->qla_initiate_recovery = 1;
228         return (-1);
229 }
230
231 /*
232  * Name: qls_config_unicast_mac_addr
233  * Function: binds/unbinds a unicast MAC address to the interface.
234  */
235 static int
236 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
237 {
238         int ret = 0;
239         uint32_t mac_upper = 0;
240         uint32_t mac_lower = 0;
241         uint32_t value = 0, index;
242
243         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
244                 Q81_CTL_SEM_SET_MAC_SERDES)) {
245                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
246                 return(-1);
247         }
248
249         if (add_mac) {
250                 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
251                 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
252                                 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
253         }
254         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
255         if (ret)
256                 goto qls_config_unicast_mac_addr_exit;
257
258         index = 128 * (ha->pci_func & 0x1); /* index */
259
260         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
261                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
262
263         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
264         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
265
266         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
267         if (ret)
268                 goto qls_config_unicast_mac_addr_exit;
269
270         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
271                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
272
273         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
274         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
275
276         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
277         if (ret)
278                 goto qls_config_unicast_mac_addr_exit;
279
280         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
281                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
282
283         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
284
285         value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
286                         ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
287                         (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
288
289         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
290
291 qls_config_unicast_mac_addr_exit:
292         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
293         return (ret);
294 }
295
296 /*
297  * Name: qls_config_mcast_mac_addr
298  * Function: binds/unbinds a multicast MAC address to the interface.
299  */
300 static int
301 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
302         uint32_t index)
303 {
304         int ret = 0;
305         uint32_t mac_upper = 0;
306         uint32_t mac_lower = 0;
307         uint32_t value = 0;
308
309         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
310                 Q81_CTL_SEM_SET_MAC_SERDES)) {
311                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
312                 return(-1);
313         }
314
315         if (add_mac) {
316                 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
317                 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
318                                 (mac_addr[4] << 8) | mac_addr[5];
319         }
320         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
321         if (ret)
322                 goto qls_config_mcast_mac_addr_exit;
323
324         value = Q81_CTL_MAC_PROTO_AI_E |
325                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
326                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
327
328         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
329         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
330
331         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
332         if (ret)
333                 goto qls_config_mcast_mac_addr_exit;
334
335         value = Q81_CTL_MAC_PROTO_AI_E |
336                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
337                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
338
339         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
340         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
341
342 qls_config_mcast_mac_addr_exit:
343         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
344
345         return (ret);
346 }
347
348 /*
349  * Name: qls_set_mac_rcv_mode
350  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
351  */
352 static int
353 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
354 {
355         uint32_t data32;
356         uint32_t count = 3;
357
358         while (count--) {
359                 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
360
361                 if (data32 & op)
362                         return (0);
363
364                 QLA_USEC_DELAY(100);
365         }
366         ha->qla_initiate_recovery = 1;
367         return (-1);
368 }
369
370 static int
371 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
372 {
373         int ret = 0;
374
375         ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
376
377         if (ret) {
378                 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
379                         __func__, index, data);
380                 goto qls_load_route_idx_reg_exit;
381         }
382
383         WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
384         WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
385
386 qls_load_route_idx_reg_exit:
387         return (ret);
388 }
389
390 static int
391 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
392 {
393         int ret = 0;
394
395         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
396                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
397                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
398                 return(-1);
399         }
400
401         ret = qls_load_route_idx_reg(ha, index, data);
402
403         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
404
405         return (ret);
406 }
407
408 static int
409 qls_clear_routing_table(qla_host_t *ha)
410 {
411         int i, ret = 0;
412
413         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
414                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
415                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
416                 return(-1);
417         }
418
419         for (i = 0; i < 16; i++) {
420                 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
421                         (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
422                 if (ret)
423                         break;
424         }
425
426         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
427
428         return (ret);
429 }
430
431 int
432 qls_set_promisc(qla_host_t *ha)
433 {
434         int ret;
435
436         ret = qls_load_route_idx_reg_locked(ha,
437                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
438                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
439                         Q81_CTL_RD_VALID_PKT);
440         return (ret);
441 }
442
443 void
444 qls_reset_promisc(qla_host_t *ha)
445 {
446         qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
447                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
448         return;
449 }
450
451 int
452 qls_set_allmulti(qla_host_t *ha)
453 {
454         int ret;
455
456         ret = qls_load_route_idx_reg_locked(ha,
457                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
458                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
459                         Q81_CTL_RD_MCAST);
460         return (ret);
461 }
462
463 void
464 qls_reset_allmulti(qla_host_t *ha)
465 {
466         qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
467                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
468         return;
469 }
470
471 static int
472 qls_init_fw_routing_table(qla_host_t *ha)
473 {
474         int ret = 0;
475
476         ret = qls_clear_routing_table(ha);
477         if (ret)
478                 return (-1);
479
480         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
481                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
482                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
483                 return(-1);
484         }
485
486         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
487                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
488                         Q81_CTL_RD_ERROR_PKT);
489         if (ret)
490                 goto qls_init_fw_routing_table_exit;
491
492         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
493                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
494                         Q81_CTL_RD_BCAST);
495         if (ret)
496                 goto qls_init_fw_routing_table_exit;
497
498         if (ha->num_rx_rings > 1 ) {
499                 ret = qls_load_route_idx_reg(ha,
500                                 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
501                                 Q81_CTL_RI_TYPE_NICQMASK |
502                                 Q81_CTL_RI_IDX_RSS_MATCH),
503                                 Q81_CTL_RD_RSS_MATCH);
504                 if (ret)
505                         goto qls_init_fw_routing_table_exit;
506         }
507
508         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
509                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
510                         Q81_CTL_RD_MCAST_REG_MATCH);
511         if (ret)
512                 goto qls_init_fw_routing_table_exit;
513
514         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
515                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
516                         Q81_CTL_RD_CAM_HIT);
517         if (ret)
518                 goto qls_init_fw_routing_table_exit;
519
520 qls_init_fw_routing_table_exit:
521         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
522         return (ret);
523 }
524
525 static int
526 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
527 {
528 #if defined(INET) || defined(INET6)
529         struct ether_vlan_header *eh;
530         struct ip *ip;
531 #if defined(INET6)
532         struct ip6_hdr *ip6;
533 #endif
534         struct tcphdr *th;
535         uint32_t ehdrlen, ip_hlen;
536         int ret = 0;
537         uint16_t etype;
538         uint8_t buf[sizeof(struct ip6_hdr)];
539
540         eh = mtod(mp, struct ether_vlan_header *);
541
542         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
543                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
544                 etype = ntohs(eh->evl_proto);
545         } else {
546                 ehdrlen = ETHER_HDR_LEN;
547                 etype = ntohs(eh->evl_encap_proto);
548         }
549
550         switch (etype) {
551 #ifdef INET
552                 case ETHERTYPE_IP:
553                         ip = (struct ip *)(mp->m_data + ehdrlen);
554
555                         ip_hlen = sizeof (struct ip);
556
557                         if (mp->m_len < (ehdrlen + ip_hlen)) {
558                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
559                                 ip = (struct ip *)buf;
560                         }
561                         tx_mac->opcode = Q81_IOCB_TX_TSO;
562                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
563
564                         tx_mac->phdr_offsets = ehdrlen;
565
566                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
567                                                         Q81_TX_TSO_PHDR_SHIFT);
568
569                         ip->ip_sum = 0;
570
571                         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
572                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
573                                 
574                                 th = (struct tcphdr *)(ip + 1);
575
576                                 th->th_sum = in_pseudo(ip->ip_src.s_addr,
577                                                 ip->ip_dst.s_addr,
578                                                 htons(IPPROTO_TCP));
579                                 tx_mac->mss = mp->m_pkthdr.tso_segsz;
580                                 tx_mac->phdr_length = ip_hlen + ehdrlen +
581                                                         (th->th_off << 2);
582                                 break;
583                         }
584                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
585
586                         if (ip->ip_p == IPPROTO_TCP) {
587                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
588                         } else if (ip->ip_p == IPPROTO_UDP) {
589                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
590                         }
591                 break;
592 #endif
593
594 #ifdef INET6
595                 case ETHERTYPE_IPV6:
596                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
597
598                         ip_hlen = sizeof(struct ip6_hdr);
599
600                         if (mp->m_len < (ehdrlen + ip_hlen)) {
601                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
602                                         buf);
603                                 ip6 = (struct ip6_hdr *)buf;
604                         }
605
606                         tx_mac->opcode = Q81_IOCB_TX_TSO;
607                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
608                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
609
610                         tx_mac->phdr_offsets = ehdrlen;
611                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
612                                                         Q81_TX_TSO_PHDR_SHIFT);
613
614                         if (ip6->ip6_nxt == IPPROTO_TCP) {
615                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
616                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
617                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
618                         }
619                 break;
620 #endif
621
622                 default:
623                         ret = -1;
624                 break;
625         }
626
627         return (ret);
628 #else
629         return (-1);
630 #endif
631 }
632
633 #define QLA_TX_MIN_FREE 2
634 int
635 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
636 {
637         uint32_t txr_done, txr_next;
638
639         txr_done = ha->tx_ring[txr_idx].txr_done;
640         txr_next = ha->tx_ring[txr_idx].txr_next;
641
642         if (txr_done == txr_next) {
643                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
644         } else if (txr_done > txr_next) {
645                 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
646         } else {
647                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
648                         txr_done - txr_next;
649         }
650
651         if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
652                 return (-1);
653
654         return (0);
655 }
656
657 /*
658  * Name: qls_hw_send
659  * Function: Transmits a packet. It first checks if the packet is a
660  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
661  *      offload. If either of these creteria are not met, it is transmitted
662  *      as a regular ethernet frame.
663  */
664 int
665 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
666         uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
667 {
668         q81_tx_mac_t *tx_mac;
669         q81_txb_desc_t *tx_desc;
670         uint32_t total_length = 0;
671         uint32_t i;
672         device_t dev;
673         int ret = 0;
674
675         dev = ha->pci_dev;
676
677         total_length = mp->m_pkthdr.len;
678
679         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
680                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
681                         __func__, total_length);
682                 return (-1);
683         }
684
685         if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
686                 if (qls_hw_tx_done(ha, txr_idx)) {
687                         device_printf(dev, "%s: tx_free[%d] = %d\n",
688                                 __func__, txr_idx,
689                                 ha->tx_ring[txr_idx].txr_free);
690                         return (-1);
691                 }
692         }
693
694         tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
695
696         bzero(tx_mac, sizeof(q81_tx_mac_t));
697
698         if ((mp->m_pkthdr.csum_flags &
699                         (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
700                 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
701                 if (ret) 
702                         return (EINVAL);
703
704                 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
705                         ha->tx_ring[txr_idx].tx_tso_frames++;
706                 else
707                         ha->tx_ring[txr_idx].tx_frames++;
708                         
709         } else { 
710                 tx_mac->opcode = Q81_IOCB_TX_MAC;
711         }
712
713         if (mp->m_flags & M_VLANTAG) {
714                 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
715                 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
716
717                 ha->tx_ring[txr_idx].tx_vlan_frames++;
718         }
719
720         tx_mac->frame_length = total_length;
721
722         tx_mac->tid_lo = txr_next;
723
724         if (nsegs <= MAX_TX_MAC_DESC) {
725                 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
726                         tx_mac->tid_lo));
727
728                 for (i = 0; i < nsegs; i++) {
729                         tx_mac->txd[i].baddr = segs->ds_addr;
730                         tx_mac->txd[i].length = segs->ds_len;
731                         segs++;
732                 }
733                 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
734
735         } else {
736                 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
737                         tx_mac->tid_lo));
738
739                 tx_mac->txd[0].baddr =
740                         ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
741                 tx_mac->txd[0].length =
742                         nsegs * (sizeof(q81_txb_desc_t));
743                 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
744
745                 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
746
747                 for (i = 0; i < nsegs; i++) {
748                         tx_desc->baddr = segs->ds_addr;
749                         tx_desc->length = segs->ds_len;
750
751                         if (i == (nsegs -1))
752                                 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
753                         else
754                                 tx_desc->flags = 0;
755
756                         segs++;
757                         tx_desc++;
758                 }
759         }
760         txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
761         ha->tx_ring[txr_idx].txr_next = txr_next;
762
763         ha->tx_ring[txr_idx].txr_free--;
764
765         Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
766
767         return (0);
768 }
769
770 /*
771  * Name: qls_del_hw_if
772  * Function: Destroys the hardware specific entities corresponding to an
773  *      Ethernet Interface
774  */
775 void
776 qls_del_hw_if(qla_host_t *ha)
777 {
778         uint32_t value;
779         int i;
780         //int  count;
781
782         if (ha->hw_init == 0) {
783                 qls_hw_reset(ha);
784                 return;
785         }
786
787         for (i = 0;  i < ha->num_tx_rings; i++) {
788                 Q81_SET_WQ_INVALID(i); 
789         }
790         for (i = 0;  i < ha->num_rx_rings; i++) {
791                 Q81_SET_CQ_INVALID(i);
792         }
793
794         for (i = 0; i < ha->num_rx_rings; i++) {
795                 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
796         }
797
798         value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
799         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
800
801         value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
802         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
803         ha->flags.intr_enable = 0;
804
805         qls_hw_reset(ha);
806
807         return;
808 }
809
810 /*
811  * Name: qls_init_hw_if
812  * Function: Creates the hardware specific entities corresponding to an
813  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
814  *      corresponding to the interface. Enables LRO if allowed.
815  */
816 int
817 qls_init_hw_if(qla_host_t *ha)
818 {
819         uint32_t        value;
820         int             ret = 0;
821         int             i;
822
823         QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
824
825         ret = qls_hw_reset(ha);
826         if (ret)
827                 goto qls_init_hw_if_exit;
828
829         ha->vm_pgsize = 4096;
830
831         /* Enable FAE and EFE bits in System Register */
832         value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
833         value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
834
835         WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
836
837         /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
838         value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
839         WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
840
841         /* Function Specific Control Register - Set Page Size and Enable NIC */
842         value = Q81_CTL_FUNC_SPECIFIC_FE |
843                 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
844                 Q81_CTL_FUNC_SPECIFIC_EPC_O |
845                 Q81_CTL_FUNC_SPECIFIC_EPC_I |
846                 Q81_CTL_FUNC_SPECIFIC_EC;
847         value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) | 
848                         Q81_CTL_FUNC_SPECIFIC_FE |
849                         Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
850                         Q81_CTL_FUNC_SPECIFIC_EPC_O |
851                         Q81_CTL_FUNC_SPECIFIC_EPC_I |
852                         Q81_CTL_FUNC_SPECIFIC_EC;
853
854         WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
855
856         /* Interrupt Mask Register */
857         value = Q81_CTL_INTRM_PI;
858         value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
859
860         WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
861
862         /* Initialiatize Completion Queue */
863         for (i = 0; i < ha->num_rx_rings; i++) {
864                 ret = qls_init_comp_queue(ha, i);
865                 if (ret)
866                         goto qls_init_hw_if_exit;
867         }
868
869         if (ha->num_rx_rings > 1 ) {
870                 ret = qls_init_rss(ha);
871                 if (ret)
872                         goto qls_init_hw_if_exit;
873         }
874
875         /* Initialize Work Queue */
876
877         for (i = 0; i < ha->num_tx_rings; i++) {
878                 ret = qls_init_work_queue(ha, i);
879                 if (ret)
880                         goto qls_init_hw_if_exit;
881         }
882
883         if (ret)
884                 goto qls_init_hw_if_exit;
885
886         /* Set up CAM RAM with MAC Address */
887         ret = qls_config_unicast_mac_addr(ha, 1);
888         if (ret)
889                 goto qls_init_hw_if_exit;
890
891         ret = qls_hw_add_all_mcast(ha);
892         if (ret)
893                 goto qls_init_hw_if_exit;
894
895         /* Initialize Firmware Routing Table */
896         ret = qls_init_fw_routing_table(ha);
897         if (ret)
898                 goto qls_init_hw_if_exit;
899
900         /* Get Chip Revision ID */
901         ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
902
903         /* Enable Global Interrupt */
904         value = Q81_CTL_INTRE_EI;
905         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
906
907         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
908
909         /* Enable Interrupt Handshake Disable */
910         value = Q81_CTL_INTRE_IHD;
911         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
912
913         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
914
915         /* Enable Completion Interrupt */
916
917         ha->flags.intr_enable = 1;
918
919         for (i = 0; i < ha->num_rx_rings; i++) {
920                 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
921         }
922
923         ha->hw_init = 1;
924
925         qls_mbx_get_link_status(ha);
926
927         QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
928                 ha->rx_ring[0].cq_db_offset));
929         QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
930                 ha->tx_ring[0].wq_db_offset));
931
932         for (i = 0; i < ha->num_rx_rings; i++) {
933                 Q81_WR_CQ_CONS_IDX(i, 0);
934                 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
935                 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
936
937                 QL_DPRINT2((ha->pci_dev,
938                         "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
939                         "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
940                         Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
941                         Q81_RD_SBQ_IDX(i)));
942         }
943
944         for (i = 0; i < ha->num_rx_rings; i++) {
945                 Q81_SET_CQ_VALID(i);
946         }
947
948 qls_init_hw_if_exit:
949         QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
950         return (ret);
951 }
952
953 static int
954 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
955 {
956         uint32_t data32;
957         uint32_t count = 3;
958
959         while (count--) {
960                 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
961
962                 if ((data32 & bits) == value)
963                         return (0);
964                 
965                 QLA_USEC_DELAY(100);
966         }
967         ha->qla_initiate_recovery = 1;
968         device_printf(ha->pci_dev, "%s: failed\n", __func__);
969         return (-1);
970 }
971
972 static uint8_t q81_hash_key[] = {
973                         0xda, 0x56, 0x5a, 0x6d,
974                         0xc2, 0x0e, 0x5b, 0x25,
975                         0x3d, 0x25, 0x67, 0x41,
976                         0xb0, 0x8f, 0xa3, 0x43,
977                         0xcb, 0x2b, 0xca, 0xd0,
978                         0xb4, 0x30, 0x7b, 0xae,
979                         0xa3, 0x2d, 0xcb, 0x77,
980                         0x0c, 0xf2, 0x30, 0x80,
981                         0x3b, 0xb7, 0x42, 0x6a,
982                         0xfa, 0x01, 0xac, 0xbe };
983
984 static int
985 qls_init_rss(qla_host_t *ha)
986 {
987         q81_rss_icb_t   *rss_icb;
988         int             ret = 0;
989         int             i;
990         uint32_t        value;
991
992         rss_icb = ha->rss_dma.dma_b;
993
994         bzero(rss_icb, sizeof (q81_rss_icb_t));
995
996         rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
997                                 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
998                                 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
999                                 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6; 
1000
1001         rss_icb->mask = 0x3FF;
1002
1003         for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1004                 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1005         }
1006
1007         memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1008         memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1009
1010         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1011
1012         if (ret)
1013                 goto qls_init_rss_exit;
1014
1015         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1016
1017         if (ret) {
1018                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1019                 goto qls_init_rss_exit;
1020         }
1021
1022         value = (uint32_t)ha->rss_dma.dma_addr;
1023         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1024
1025         value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1026         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1027
1028         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1029
1030         value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1031                         Q81_CTL_CONFIG_LR;
1032
1033         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1034
1035         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1036
1037 qls_init_rss_exit:
1038         return (ret);
1039 }
1040
1041 static int
1042 qls_init_comp_queue(qla_host_t *ha, int cid)
1043 {
1044         q81_cq_icb_t    *cq_icb;
1045         qla_rx_ring_t   *rxr;
1046         int             ret = 0;
1047         uint32_t        value;
1048
1049         rxr = &ha->rx_ring[cid];
1050
1051         rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1052
1053         cq_icb = rxr->cq_icb_vaddr;
1054
1055         bzero(cq_icb, sizeof (q81_cq_icb_t));
1056
1057         cq_icb->msix_vector = cid;
1058         cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1059                         Q81_CQ_ICB_FLAGS_LI |
1060                         Q81_CQ_ICB_FLAGS_LL |
1061                         Q81_CQ_ICB_FLAGS_LS |
1062                         Q81_CQ_ICB_FLAGS_LV;
1063
1064         cq_icb->length_v = NUM_CQ_ENTRIES;
1065
1066         cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1067         cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1068
1069         cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1070         cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1071
1072         cq_icb->pkt_idelay = 10;
1073         cq_icb->idelay = 100;
1074
1075         cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1076         cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1077
1078         cq_icb->lbq_bsize = QLA_LGB_SIZE;
1079         cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1080
1081         cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1082         cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1083
1084         cq_icb->sbq_bsize = (uint16_t)ha->msize;
1085         cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1086
1087         QL_DUMP_CQ(ha);
1088
1089         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1090
1091         if (ret)
1092                 goto qls_init_comp_queue_exit;
1093
1094         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1095
1096         if (ret) {
1097                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1098                 goto qls_init_comp_queue_exit;
1099         }
1100
1101         value = (uint32_t)rxr->cq_icb_paddr;
1102         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1103
1104         value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1105         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1106
1107         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1108
1109         value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1110         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1111         value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1112         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1113
1114         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1115
1116         rxr->cq_next = 0;
1117         rxr->lbq_next = rxr->lbq_free = 0;
1118         rxr->sbq_next = rxr->sbq_free = 0;
1119         rxr->rx_free = rxr->rx_next = 0;
1120         rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1121         rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1122
1123 qls_init_comp_queue_exit:
1124         return (ret);
1125 }
1126
1127 static int
1128 qls_init_work_queue(qla_host_t *ha, int wid)
1129 {
1130         q81_wq_icb_t    *wq_icb;
1131         qla_tx_ring_t   *txr;
1132         int             ret = 0;
1133         uint32_t        value;
1134
1135         txr = &ha->tx_ring[wid];
1136
1137         txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1138                                                 + (ha->vm_pgsize * wid));
1139
1140         txr->wq_db_offset = (ha->vm_pgsize * wid);
1141
1142         wq_icb = txr->wq_icb_vaddr;
1143         bzero(wq_icb, sizeof (q81_wq_icb_t));
1144
1145         wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1146                                 Q81_WQ_ICB_VALID;
1147
1148         wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1149                         Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1150
1151         wq_icb->wqcqid_rss = wid;
1152
1153         wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1154         wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1155
1156         wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1157         wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1158
1159         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1160
1161         if (ret)
1162                 goto qls_init_wq_exit;
1163
1164         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1165
1166         if (ret) {
1167                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1168                 goto qls_init_wq_exit;
1169         }
1170
1171         value = (uint32_t)txr->wq_icb_paddr;
1172         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1173
1174         value = (uint32_t)(txr->wq_icb_paddr >> 32);
1175         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1176
1177         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1178
1179         value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1180         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1181         value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1182         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1183
1184         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1185
1186         txr->txr_free = NUM_TX_DESCRIPTORS;
1187         txr->txr_next = 0;
1188         txr->txr_done = 0;
1189
1190 qls_init_wq_exit:
1191         return (ret);
1192 }
1193
1194 static int
1195 qls_hw_add_all_mcast(qla_host_t *ha)
1196 {
1197         int i, nmcast;
1198
1199         nmcast = ha->nmcast;
1200
1201         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1202                 if ((ha->mcast[i].addr[0] != 0) || 
1203                         (ha->mcast[i].addr[1] != 0) ||
1204                         (ha->mcast[i].addr[2] != 0) ||
1205                         (ha->mcast[i].addr[3] != 0) ||
1206                         (ha->mcast[i].addr[4] != 0) ||
1207                         (ha->mcast[i].addr[5] != 0)) {
1208                         if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1209                                 1, i)) {
1210                                 device_printf(ha->pci_dev, "%s: failed\n",
1211                                         __func__);
1212                                 return (-1);
1213                         }
1214
1215                         nmcast--;
1216                 }
1217         }
1218         return 0;
1219 }
1220
1221 static int
1222 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1223 {
1224         int i;
1225
1226         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1227                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1228                         return 0; /* its been already added */
1229         }
1230
1231         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1232                 if ((ha->mcast[i].addr[0] == 0) && 
1233                         (ha->mcast[i].addr[1] == 0) &&
1234                         (ha->mcast[i].addr[2] == 0) &&
1235                         (ha->mcast[i].addr[3] == 0) &&
1236                         (ha->mcast[i].addr[4] == 0) &&
1237                         (ha->mcast[i].addr[5] == 0)) {
1238                         if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1239                                 return (-1);
1240
1241                         bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1242                         ha->nmcast++;   
1243
1244                         return 0;
1245                 }
1246         }
1247         return 0;
1248 }
1249
1250 static int
1251 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1252 {
1253         int i;
1254
1255         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1256                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1257                         if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1258                                 return (-1);
1259
1260                         ha->mcast[i].addr[0] = 0;
1261                         ha->mcast[i].addr[1] = 0;
1262                         ha->mcast[i].addr[2] = 0;
1263                         ha->mcast[i].addr[3] = 0;
1264                         ha->mcast[i].addr[4] = 0;
1265                         ha->mcast[i].addr[5] = 0;
1266
1267                         ha->nmcast--;   
1268
1269                         return 0;
1270                 }
1271         }
1272         return 0;
1273 }
1274
1275 /*
1276  * Name: qls_hw_set_multi
1277  * Function: Sets the Multicast Addresses provided the host O.S into the
1278  *      hardware (for the given interface)
1279  */
1280 void
1281 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1282         uint32_t add_mac)
1283 {
1284         int i;
1285
1286         for (i = 0; i < mcnt; i++) {
1287                 if (add_mac) {
1288                         if (qls_hw_add_mcast(ha, mta))
1289                                 break;
1290                 } else {
1291                         if (qls_hw_del_mcast(ha, mta))
1292                                 break;
1293                 }
1294                         
1295                 mta += Q8_MAC_ADDR_LEN;
1296         }
1297         return;
1298 }
1299
1300 void
1301 qls_update_link_state(qla_host_t *ha)
1302 {
1303         uint32_t link_state;
1304         uint32_t prev_link_state;
1305
1306         if (!(if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING)) {
1307                 ha->link_up = 0;
1308                 return;
1309         }
1310         link_state = READ_REG32(ha, Q81_CTL_STATUS);
1311
1312         prev_link_state =  ha->link_up;
1313
1314         if ((ha->pci_func & 0x1) == 0)
1315                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1316         else
1317                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1318
1319         if (prev_link_state !=  ha->link_up) {
1320                 if (ha->link_up) {
1321                         if_link_state_change(ha->ifp, LINK_STATE_UP);
1322                 } else {
1323                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1324                 }
1325         }
1326         return;
1327 }
1328
1329 static void
1330 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1331 {
1332         if (ha->tx_ring[r_idx].flags.wq_dma) {
1333                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1334                 ha->tx_ring[r_idx].flags.wq_dma = 0;
1335         }
1336
1337         if (ha->tx_ring[r_idx].flags.privb_dma) {
1338                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1339                 ha->tx_ring[r_idx].flags.privb_dma = 0;
1340         }
1341         return;
1342 }
1343
1344 static void
1345 qls_free_tx_dma(qla_host_t *ha)
1346 {
1347         int i, j;
1348         qla_tx_buf_t *txb;
1349
1350         for (i = 0; i < ha->num_tx_rings; i++) {
1351                 qls_free_tx_ring_dma(ha, i);
1352
1353                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1354                         txb = &ha->tx_ring[i].tx_buf[j];
1355
1356                         if (txb->map) {
1357                                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1358                         }
1359                 }
1360         }
1361
1362         if (ha->tx_tag != NULL) {
1363                 bus_dma_tag_destroy(ha->tx_tag);
1364                 ha->tx_tag = NULL;
1365         }
1366
1367         return;
1368 }
1369
1370 static int
1371 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1372 {
1373         int             ret = 0, i;
1374         uint8_t         *v_addr;
1375         bus_addr_t      p_addr;
1376         qla_tx_buf_t    *txb;
1377         device_t        dev = ha->pci_dev;
1378
1379         ha->tx_ring[ridx].wq_dma.alignment = 8;
1380         ha->tx_ring[ridx].wq_dma.size =
1381                 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1382
1383         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1384
1385         if (ret) {
1386                 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1387                 goto qls_alloc_tx_ring_dma_exit;
1388         }
1389         ha->tx_ring[ridx].flags.wq_dma = 1;
1390
1391         ha->tx_ring[ridx].privb_dma.alignment = 8;
1392         ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1393
1394         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1395
1396         if (ret) {
1397                 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1398                 goto qls_alloc_tx_ring_dma_exit;
1399         }
1400
1401         ha->tx_ring[ridx].flags.privb_dma = 1;
1402
1403         ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1404         ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1405
1406         v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1407         p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1408
1409         ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1410         ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1411
1412         ha->tx_ring[ridx].txr_cons_vaddr =
1413                 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1414         ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1415
1416         v_addr = v_addr + (PAGE_SIZE >> 1);
1417         p_addr = p_addr + (PAGE_SIZE >> 1);
1418
1419         txb = ha->tx_ring[ridx].tx_buf;
1420
1421         for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1422                 txb[i].oal_vaddr = v_addr;
1423                 txb[i].oal_paddr = p_addr;
1424
1425                 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1426                 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1427         }
1428
1429 qls_alloc_tx_ring_dma_exit:
1430         return (ret);
1431 }
1432
1433 static int
1434 qls_alloc_tx_dma(qla_host_t *ha)
1435 {
1436         int     i, j;
1437         int     ret = 0;
1438         qla_tx_buf_t *txb;
1439
1440         if (bus_dma_tag_create(NULL,    /* parent */
1441                 1, 0,    /* alignment, bounds */
1442                 BUS_SPACE_MAXADDR,       /* lowaddr */
1443                 BUS_SPACE_MAXADDR,       /* highaddr */
1444                 NULL, NULL,      /* filter, filterarg */
1445                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1446                 QLA_MAX_SEGMENTS,        /* nsegments */
1447                 PAGE_SIZE,        /* maxsegsize */
1448                 BUS_DMA_ALLOCNOW,        /* flags */
1449                 NULL,    /* lockfunc */
1450                 NULL,    /* lockfuncarg */
1451                 &ha->tx_tag)) {
1452                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1453                         __func__);
1454                 return (ENOMEM);
1455         }
1456
1457         for (i = 0; i < ha->num_tx_rings; i++) {
1458                 ret = qls_alloc_tx_ring_dma(ha, i);
1459
1460                 if (ret) {
1461                         qls_free_tx_dma(ha);
1462                         break;
1463                 }
1464
1465                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1466                         txb = &ha->tx_ring[i].tx_buf[j];
1467
1468                         ret = bus_dmamap_create(ha->tx_tag,
1469                                 BUS_DMA_NOWAIT, &txb->map);
1470                         if (ret) {
1471                                 ha->err_tx_dmamap_create++;
1472                                 device_printf(ha->pci_dev,
1473                                 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1474                                 __func__, ret, i, j);
1475
1476                                 qls_free_tx_dma(ha);
1477
1478                                 return (ret);
1479                         }
1480                 }
1481         }
1482
1483         return (ret);
1484 }
1485
1486 static void
1487 qls_free_rss_dma(qla_host_t *ha)
1488 {
1489         qls_free_dmabuf(ha, &ha->rss_dma);
1490         ha->flags.rss_dma = 0;
1491 }
1492
1493 static int
1494 qls_alloc_rss_dma(qla_host_t *ha)
1495 {
1496         int ret = 0;
1497
1498         ha->rss_dma.alignment = 4;
1499         ha->rss_dma.size = PAGE_SIZE;
1500
1501         ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1502
1503         if (ret)
1504                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1505         else
1506                 ha->flags.rss_dma = 1;
1507
1508         return (ret);
1509 }
1510
1511 static void
1512 qls_free_mpi_dma(qla_host_t *ha)
1513 {
1514         qls_free_dmabuf(ha, &ha->mpi_dma);
1515         ha->flags.mpi_dma = 0;
1516 }
1517
1518 static int
1519 qls_alloc_mpi_dma(qla_host_t *ha)
1520 {
1521         int ret = 0;
1522
1523         ha->mpi_dma.alignment = 4;
1524         ha->mpi_dma.size = (0x4000 * 4);
1525
1526         ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1527         if (ret)
1528                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1529         else
1530                 ha->flags.mpi_dma = 1;
1531
1532         return (ret);
1533 }
1534
1535 static void
1536 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1537 {
1538         if (ha->rx_ring[ridx].flags.cq_dma) {
1539                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1540                 ha->rx_ring[ridx].flags.cq_dma = 0;
1541         }
1542
1543         if (ha->rx_ring[ridx].flags.lbq_dma) {
1544                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1545                 ha->rx_ring[ridx].flags.lbq_dma = 0;
1546         }
1547
1548         if (ha->rx_ring[ridx].flags.sbq_dma) {
1549                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1550                 ha->rx_ring[ridx].flags.sbq_dma = 0;
1551         }
1552
1553         if (ha->rx_ring[ridx].flags.lb_dma) {
1554                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1555                 ha->rx_ring[ridx].flags.lb_dma = 0;
1556         }
1557         return;
1558 }
1559
1560 static void
1561 qls_free_rx_dma(qla_host_t *ha)
1562 {
1563         int i;
1564
1565         for (i = 0; i < ha->num_rx_rings; i++) {
1566                 qls_free_rx_ring_dma(ha, i);
1567         }
1568
1569         if (ha->rx_tag != NULL) {
1570                 bus_dma_tag_destroy(ha->rx_tag);
1571                 ha->rx_tag = NULL;
1572         }
1573
1574         return;
1575 }
1576
1577 static int
1578 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1579 {
1580         int                             i, ret = 0;
1581         uint8_t                         *v_addr;
1582         bus_addr_t                      p_addr;
1583         volatile q81_bq_addr_e_t        *bq_e;
1584         device_t                        dev = ha->pci_dev;
1585
1586         ha->rx_ring[ridx].cq_dma.alignment = 128;
1587         ha->rx_ring[ridx].cq_dma.size =
1588                 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1589
1590         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1591
1592         if (ret) {
1593                 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1594                 goto qls_alloc_rx_ring_dma_exit;
1595         }
1596         ha->rx_ring[ridx].flags.cq_dma = 1;
1597
1598         ha->rx_ring[ridx].lbq_dma.alignment = 8;
1599         ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1600
1601         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1602
1603         if (ret) {
1604                 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1605                 goto qls_alloc_rx_ring_dma_exit;
1606         }
1607         ha->rx_ring[ridx].flags.lbq_dma = 1;
1608
1609         ha->rx_ring[ridx].sbq_dma.alignment = 8;
1610         ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1611
1612         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1613
1614         if (ret) {
1615                 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1616                 goto qls_alloc_rx_ring_dma_exit;
1617         }
1618         ha->rx_ring[ridx].flags.sbq_dma = 1;
1619
1620         ha->rx_ring[ridx].lb_dma.alignment = 8;
1621         ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1622
1623         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1624         if (ret) {
1625                 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1626                 goto qls_alloc_rx_ring_dma_exit;
1627         }
1628         ha->rx_ring[ridx].flags.lb_dma = 1;
1629
1630         bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1631         bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1632         bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1633         bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1634
1635         /* completion queue */
1636         ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1637         ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1638
1639         v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1640         p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1641
1642         v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1643         p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1644
1645         /* completion queue icb */
1646         ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1647         ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1648
1649         v_addr = v_addr + (PAGE_SIZE >> 2);
1650         p_addr = p_addr + (PAGE_SIZE >> 2);
1651
1652         /* completion queue index register */
1653         ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1654         ha->rx_ring[ridx].cqi_paddr = p_addr;
1655
1656         v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1657         p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1658
1659         /* large buffer queue address table */
1660         ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1661         ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1662
1663         /* large buffer queue */
1664         ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1665         ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1666
1667         v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1668         p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1669
1670         /* small buffer queue address table */
1671         ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1672         ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1673
1674         /* small buffer queue */
1675         ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1676         ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1677
1678         ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1679         ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1680
1681         /* Initialize Large Buffer Queue Table */
1682
1683         p_addr = ha->rx_ring[ridx].lbq_paddr;
1684         bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1685
1686         bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1687         bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1688
1689         p_addr = ha->rx_ring[ridx].lb_paddr;
1690         bq_e = ha->rx_ring[ridx].lbq_vaddr;
1691
1692         for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1693                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1694                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1695
1696                 p_addr = p_addr + QLA_LGB_SIZE;
1697                 bq_e++;
1698         }
1699
1700         /* Initialize Small Buffer Queue Table */
1701
1702         p_addr = ha->rx_ring[ridx].sbq_paddr;
1703         bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1704
1705         for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1706                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1707                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1708
1709                 p_addr = p_addr + QLA_PAGE_SIZE;
1710                 bq_e++;
1711         }
1712
1713 qls_alloc_rx_ring_dma_exit:
1714         return (ret);
1715 }
1716
1717 static int
1718 qls_alloc_rx_dma(qla_host_t *ha)
1719 {
1720         int     i;
1721         int     ret = 0;
1722
1723         if (bus_dma_tag_create(NULL,    /* parent */
1724                         1, 0,    /* alignment, bounds */
1725                         BUS_SPACE_MAXADDR,       /* lowaddr */
1726                         BUS_SPACE_MAXADDR,       /* highaddr */
1727                         NULL, NULL,      /* filter, filterarg */
1728                         MJUM9BYTES,     /* maxsize */
1729                         1,        /* nsegments */
1730                         MJUM9BYTES,        /* maxsegsize */
1731                         BUS_DMA_ALLOCNOW,        /* flags */
1732                         NULL,    /* lockfunc */
1733                         NULL,    /* lockfuncarg */
1734                         &ha->rx_tag)) {
1735                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1736                         __func__);
1737
1738                 return (ENOMEM);
1739         }
1740
1741         for (i = 0; i < ha->num_rx_rings; i++) {
1742                 ret = qls_alloc_rx_ring_dma(ha, i);
1743
1744                 if (ret) {
1745                         qls_free_rx_dma(ha);
1746                         break;
1747                 }
1748         }
1749
1750         return (ret);
1751 }
1752
1753 static int
1754 qls_wait_for_flash_ready(qla_host_t *ha)
1755 {
1756         uint32_t data32;
1757         uint32_t count = 3;
1758
1759         while (count--) {
1760                 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1761
1762                 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1763                         goto qls_wait_for_flash_ready_exit;
1764                 
1765                 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1766                         return (0);
1767
1768                 QLA_USEC_DELAY(100);
1769         }
1770
1771 qls_wait_for_flash_ready_exit:
1772         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1773
1774         return (-1);
1775 }
1776
1777 /*
1778  * Name: qls_rd_flash32
1779  * Function: Read Flash Memory
1780  */
1781 int
1782 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1783 {
1784         int ret;
1785
1786         ret = qls_wait_for_flash_ready(ha);
1787
1788         if (ret)
1789                 return (ret);
1790
1791         WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1792
1793         ret = qls_wait_for_flash_ready(ha);
1794
1795         if (ret)
1796                 return (ret);
1797
1798         *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1799
1800         return 0;
1801 }
1802
1803 static int
1804 qls_flash_validate(qla_host_t *ha, const char *signature)
1805 {
1806         uint16_t csum16 = 0;
1807         uint16_t *data16;
1808         int i;
1809
1810         if (bcmp(ha->flash.id, signature, 4)) {
1811                 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1812                         "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1813                         ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1814                         signature));
1815                 return(-1);
1816         }
1817
1818         data16 = (uint16_t *)&ha->flash;
1819
1820         for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1821                 csum16 += *data16++;
1822         }
1823
1824         if (csum16) {
1825                 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1826                 return(-1);
1827         }
1828         return(0);
1829 }
1830
1831 int
1832 qls_rd_nic_params(qla_host_t *ha)
1833 {
1834         int             i, ret = 0;
1835         uint32_t        faddr;
1836         uint32_t        *qflash;
1837
1838         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1839                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1840                 return(-1);
1841         }
1842
1843         if ((ha->pci_func & 0x1) == 0)
1844                 faddr = Q81_F0_FLASH_OFFSET >> 2;
1845         else
1846                 faddr = Q81_F1_FLASH_OFFSET >> 2;
1847
1848         qflash = (uint32_t *)&ha->flash;
1849
1850         for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1851                 ret = qls_rd_flash32(ha, faddr, qflash);
1852
1853                 if (ret)
1854                         goto qls_rd_flash_data_exit;
1855
1856                 faddr++;
1857                 qflash++;
1858         }
1859
1860         QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1861
1862         ret = qls_flash_validate(ha, Q81_FLASH_ID);
1863
1864         if (ret)
1865                 goto qls_rd_flash_data_exit;
1866
1867         bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1868
1869         QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1870                 __func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1871                 ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1872
1873 qls_rd_flash_data_exit:
1874
1875         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1876
1877         return(ret);
1878 }
1879
1880 static int
1881 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1882 {
1883         uint32_t count = 30;
1884         uint32_t data;
1885
1886         while (count--) {
1887                 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1888
1889                 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1890
1891                 if (data & value) {
1892                         return (0);
1893                 } else {
1894                         QLA_USEC_DELAY(100);
1895                 }
1896         }
1897         ha->qla_initiate_recovery = 1;
1898         return (-1);
1899 }
1900
1901 static void
1902 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1903 {
1904         WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1905 }
1906
1907 static int
1908 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1909 {
1910         uint32_t data32;
1911         uint32_t count = 3;
1912
1913         while (count--) {
1914                 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1915
1916                 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1917                         goto qls_wait_for_proc_addr_ready_exit;
1918                 
1919                 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1920                         return (0);
1921
1922                 QLA_USEC_DELAY(100);
1923         }
1924
1925 qls_wait_for_proc_addr_ready_exit:
1926         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1927
1928         ha->qla_initiate_recovery = 1;
1929         return (-1);
1930 }
1931
1932 static int
1933 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1934         uint32_t *data)
1935 {
1936         int ret;
1937         uint32_t value;
1938
1939         ret = qls_wait_for_proc_addr_ready(ha);
1940
1941         if (ret)
1942                 goto qls_proc_addr_rd_reg_exit;
1943
1944         value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1945
1946         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1947
1948         ret = qls_wait_for_proc_addr_ready(ha);
1949
1950         if (ret)
1951                 goto qls_proc_addr_rd_reg_exit;
1952
1953         *data = READ_REG32(ha, Q81_CTL_PROC_DATA); 
1954
1955 qls_proc_addr_rd_reg_exit:
1956         return (ret);
1957 }
1958
1959 static int
1960 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1961         uint32_t data)
1962 {
1963         int ret;
1964         uint32_t value;
1965
1966         ret = qls_wait_for_proc_addr_ready(ha);
1967
1968         if (ret)
1969                 goto qls_proc_addr_wr_reg_exit;
1970
1971         WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1972
1973         value = addr_module | reg;
1974
1975         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1976
1977         ret = qls_wait_for_proc_addr_ready(ha);
1978
1979 qls_proc_addr_wr_reg_exit:
1980         return (ret);
1981 }
1982
1983 static int
1984 qls_hw_nic_reset(qla_host_t *ha)
1985 {
1986         int             count;
1987         uint32_t        data;
1988         device_t        dev = ha->pci_dev;
1989
1990         ha->hw_init = 0;
1991
1992         data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1993                         Q81_CTL_RESET_FUNC;
1994         WRITE_REG32(ha, Q81_CTL_RESET, data);
1995
1996         count = 10;
1997         while (count--) {
1998                 data = READ_REG32(ha, Q81_CTL_RESET);
1999                 if ((data & Q81_CTL_RESET_FUNC) == 0)
2000                         break;
2001                 QLA_USEC_DELAY(10);
2002         }
2003         if (count == 0) {
2004                 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2005                         __func__);
2006                 return (-1);
2007         }
2008         return (0);
2009 }
2010
2011 static int
2012 qls_hw_reset(qla_host_t *ha)
2013 {
2014         device_t        dev = ha->pci_dev;
2015         int             ret;
2016         int             count;
2017         uint32_t        data;
2018
2019         QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2020
2021         if (ha->hw_init == 0) {
2022                 ret = qls_hw_nic_reset(ha);
2023                 goto qls_hw_reset_exit;
2024         }
2025
2026         ret = qls_clear_routing_table(ha);
2027         if (ret) 
2028                 goto qls_hw_reset_exit;
2029
2030         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2031         if (ret) 
2032                 goto qls_hw_reset_exit;
2033
2034         /*
2035          * Wait for FIFO to empty
2036          */
2037         count = 5;
2038         while (count--) {
2039                 data = READ_REG32(ha, Q81_CTL_STATUS);
2040                 if (data & Q81_CTL_STATUS_NFE)
2041                         break;
2042                 qls_mdelay(__func__, 100);
2043         }
2044         if (count == 0) {
2045                 device_printf(dev, "%s: NFE bit not set\n", __func__);
2046                 goto qls_hw_reset_exit;
2047         }
2048
2049         count = 5;
2050         while (count--) {
2051                 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2052
2053                 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2054                         (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2055                         break;
2056                 qls_mdelay(__func__, 100);
2057         }
2058         if (count == 0)
2059                 goto qls_hw_reset_exit;
2060
2061         /*
2062          * Reset the NIC function
2063          */
2064         ret = qls_hw_nic_reset(ha);
2065         if (ret) 
2066                 goto qls_hw_reset_exit;
2067
2068         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2069
2070 qls_hw_reset_exit:
2071         if (ret)
2072                 device_printf(dev, "%s: failed\n", __func__);
2073                 
2074         return (ret);
2075 }
2076
2077 /*
2078  * MPI Related Functions
2079  */
2080 int
2081 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2082 {
2083         int ret;
2084
2085         ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2086                         reg, data);
2087         return (ret);
2088 }
2089
2090 int
2091 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2092 {
2093         int ret;
2094
2095         ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2096                         reg, data);
2097         return (ret);
2098 }
2099
2100 int
2101 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2102 {
2103         int ret;
2104
2105         if ((ha->pci_func & 0x1) == 0)
2106                 reg += Q81_FUNC0_MBX_OUT_REG0;
2107         else
2108                 reg += Q81_FUNC1_MBX_OUT_REG0;
2109
2110         ret = qls_mpi_risc_rd_reg(ha, reg, data);
2111
2112         return (ret);
2113 }
2114
2115 int
2116 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2117 {
2118         int ret;
2119
2120         if ((ha->pci_func & 0x1) == 0)
2121                 reg += Q81_FUNC0_MBX_IN_REG0;
2122         else
2123                 reg += Q81_FUNC1_MBX_IN_REG0;
2124
2125         ret = qls_mpi_risc_wr_reg(ha, reg, data);
2126
2127         return (ret);
2128 }
2129
2130 static int
2131 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2132         uint32_t *out_mbx, uint32_t o_count)
2133 {
2134         int i, ret = -1;
2135         uint32_t data32;
2136         uint32_t count = 50;
2137
2138         QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2139                 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2140
2141         data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2142
2143         if (data32 & Q81_CTL_HCS_HTR_INTR) {
2144                 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2145                         __func__, data32);
2146                 goto qls_mbx_cmd_exit;
2147         }
2148
2149         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2150                 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2151                 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2152                 goto qls_mbx_cmd_exit;
2153         }
2154
2155         ha->mbx_done = 0;
2156
2157         for (i = 0; i < i_count; i++) {
2158                 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2159
2160                 if (ret) {
2161                         device_printf(ha->pci_dev,
2162                                 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2163                                 i, *in_mbx);
2164                         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2165                         goto qls_mbx_cmd_exit;
2166                 }
2167
2168                 in_mbx++;
2169         }
2170         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2171
2172         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2173
2174         ret = -1;
2175         ha->mbx_done = 0;
2176
2177         while (count--) {
2178                 if (ha->flags.intr_enable == 0) {
2179                         data32 = READ_REG32(ha, Q81_CTL_STATUS);
2180
2181                         if (!(data32 & Q81_CTL_STATUS_PI)) {
2182                                 qls_mdelay(__func__, 100);
2183                                 continue;
2184                         }
2185
2186                         ret = qls_mbx_rd_reg(ha, 0, &data32);
2187
2188                         if (ret == 0 ) {
2189                                 if ((data32 & 0xF000) == 0x4000) {
2190                                         out_mbx[0] = data32;
2191
2192                                         for (i = 1; i < o_count; i++) {
2193                                                 ret = qls_mbx_rd_reg(ha, i,
2194                                                                 &data32);
2195                                                 if (ret) {
2196                                                         device_printf(
2197                                                                 ha->pci_dev,
2198                                                                 "%s: mbx_rd[%d]"
2199                                                                 " failed\n",
2200                                                                 __func__, i);
2201                                                         break;
2202                                                 }
2203                                                 out_mbx[i] = data32;
2204                                         }
2205                                         break;
2206                                 } else if ((data32 & 0xF000) == 0x8000) {
2207                                         count = 50;
2208                                         WRITE_REG32(ha,\
2209                                                 Q81_CTL_HOST_CMD_STATUS,\
2210                                                 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2211                                 }
2212                         }
2213                 } else {
2214                         if (ha->mbx_done) {
2215                                 for (i = 1; i < o_count; i++) {
2216                                         out_mbx[i] = ha->mbox[i];
2217                                 }
2218                                 ret = 0;
2219                                 break;
2220                         }
2221                 }
2222                 qls_mdelay(__func__, 1000);
2223         }
2224
2225 qls_mbx_cmd_exit:
2226
2227         if (ha->flags.intr_enable == 0) {
2228                 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2229                         Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2230         }
2231
2232         if (ret) {
2233                 ha->qla_initiate_recovery = 1;
2234         }
2235
2236         QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2237         return (ret);
2238 }
2239
2240 static int
2241 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2242 {
2243         uint32_t *mbox;
2244         device_t dev = ha->pci_dev;
2245
2246         mbox = ha->mbox;
2247         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2248
2249         mbox[0] = Q81_MBX_SET_MGMT_CTL;
2250         mbox[1] = t_ctrl;
2251
2252         if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2253                 device_printf(dev, "%s failed\n", __func__);
2254                 return (-1);
2255         }
2256
2257         if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2258                 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2259                         (mbox[0] == Q81_MBX_CMD_ERROR))){
2260                 return (0);
2261         }
2262         device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2263         return (-1);
2264
2265 }
2266
2267 static int
2268 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2269 {
2270         uint32_t *mbox;
2271         device_t dev = ha->pci_dev;
2272
2273         *t_status = 0;
2274
2275         mbox = ha->mbox;
2276         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2277
2278         mbox[0] = Q81_MBX_GET_MGMT_CTL;
2279
2280         if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2281                 device_printf(dev, "%s failed\n", __func__);
2282                 return (-1);
2283         }
2284
2285         *t_status = mbox[1];
2286
2287         return (0);
2288 }
2289
2290 static void
2291 qls_mbx_get_link_status(qla_host_t *ha)
2292 {
2293         uint32_t *mbox;
2294         device_t dev = ha->pci_dev;
2295
2296         mbox = ha->mbox;
2297         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2298
2299         mbox[0] = Q81_MBX_GET_LNK_STATUS;
2300
2301         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2302                 device_printf(dev, "%s failed\n", __func__);
2303                 return;
2304         }
2305
2306         ha->link_status                 = mbox[1];
2307         ha->link_down_info              = mbox[2];
2308         ha->link_hw_info                = mbox[3];
2309         ha->link_dcbx_counters          = mbox[4];
2310         ha->link_change_counters        = mbox[5];
2311
2312         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2313                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2314
2315         return;
2316 }
2317
2318 static void
2319 qls_mbx_about_fw(qla_host_t *ha)
2320 {
2321         uint32_t *mbox;
2322         device_t dev = ha->pci_dev;
2323
2324         mbox = ha->mbox;
2325         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2326
2327         mbox[0] = Q81_MBX_ABOUT_FW;
2328
2329         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2330                 device_printf(dev, "%s failed\n", __func__);
2331                 return;
2332         }
2333
2334         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2335                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2336 }
2337
2338 int
2339 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2340         uint32_t r_size)
2341 {
2342         bus_addr_t b_paddr;
2343         uint32_t *mbox;
2344         device_t dev = ha->pci_dev;
2345
2346         mbox = ha->mbox;
2347         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2348
2349         bzero(ha->mpi_dma.dma_b,(r_size << 2));
2350         b_paddr = ha->mpi_dma.dma_addr;
2351
2352         mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2353         mbox[1] = r_addr & 0xFFFF;
2354         mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2355         mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2356         mbox[4] = (r_size >> 16) & 0xFFFF;
2357         mbox[5] = r_size & 0xFFFF;
2358         mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2359         mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2360         mbox[8] = (r_addr >> 16) & 0xFFFF;
2361
2362         bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2363                 BUS_DMASYNC_PREREAD);
2364
2365         if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2366                 device_printf(dev, "%s failed\n", __func__);
2367                 return (-1);
2368         }
2369         if (mbox[0] != 0x4000) {
2370                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2371                 return (-1);
2372         } else {
2373                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2374                         BUS_DMASYNC_POSTREAD);
2375                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2376         }
2377
2378         return (0);
2379 }
2380
2381 int 
2382 qls_mpi_reset(qla_host_t *ha)
2383 {
2384         int             count;
2385         uint32_t        data;
2386         device_t        dev = ha->pci_dev;
2387
2388         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2389                 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2390
2391         count = 10;
2392         while (count--) {
2393                 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2394                 if (data & Q81_CTL_HCS_RISC_RESET) {
2395                         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2396                                 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2397                         break;
2398                 }
2399                 qls_mdelay(__func__, 10);
2400         }
2401         if (count == 0) {
2402                 device_printf(dev, "%s: failed\n", __func__);
2403                 return (-1);
2404         }
2405         return (0);
2406 }