]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxge/qls_hw.c
qlxgb/qlxgbe/qlxge: Fix build without INET and/or without INET6.
[FreeBSD/FreeBSD.git] / sys / dev / qlxge / qls_hw.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  * File: qls_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45
46 /*
47  * Static Functions
48  */
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52                 uint32_t add_mac, uint32_t index);
53
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
62
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
65
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
74
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
76
77 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
78 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
79                 uint32_t reg, uint32_t *data);
80 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
81                 uint32_t reg, uint32_t data);
82
83 static int qls_hw_reset(qla_host_t *ha);
84
85 /*
86  * MPI Related Functions
87  */
88 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
89                 uint32_t *out_mbx, uint32_t o_count);
90 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
91 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
92 static void qls_mbx_get_link_status(qla_host_t *ha);
93 static void qls_mbx_about_fw(qla_host_t *ha);
94
95 int
96 qls_get_msix_count(qla_host_t *ha)
97 {
98         return (ha->num_rx_rings);
99 }
100
101 static int
102 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
103 {
104         int err = 0, ret;
105         qla_host_t *ha;
106
107         err = sysctl_handle_int(oidp, &ret, 0, req);
108
109         if (err || !req->newptr)
110                 return (err);
111
112         if (ret == 1) {
113                 ha = (qla_host_t *)arg1;
114                 qls_mpi_core_dump(ha);
115         }
116         return (err);
117 }
118
119 static int
120 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
121 {
122         int err = 0, ret;
123         qla_host_t *ha;
124
125         err = sysctl_handle_int(oidp, &ret, 0, req);
126
127         if (err || !req->newptr)
128                 return (err);
129
130         if (ret == 1) {
131                 ha = (qla_host_t *)arg1;
132                 qls_mbx_get_link_status(ha);
133                 qls_mbx_about_fw(ha);
134         }
135         return (err);
136 }
137
138 void
139 qls_hw_add_sysctls(qla_host_t *ha)
140 {
141         device_t        dev;
142
143         dev = ha->pci_dev;
144
145         ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
146
147         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
148                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
149                 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
150                 ha->num_rx_rings, "Number of Completion Queues");
151
152         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
153                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
154                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
155                 ha->num_tx_rings, "Number of Transmit Rings");
156
157         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
158             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
159             OID_AUTO, "mpi_dump",
160             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
161             qls_syctl_mpi_dump, "I", "MPI Dump");
162
163         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
164             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
165             OID_AUTO, "link_status",
166             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0,
167             qls_syctl_link_status, "I", "Link Status");
168 }
169
170 /*
171  * Name: qls_free_dma
172  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
173  */
174 void
175 qls_free_dma(qla_host_t *ha)
176 {
177         qls_free_rss_dma(ha);
178         qls_free_mpi_dma(ha);
179         qls_free_tx_dma(ha);
180         qls_free_rx_dma(ha);
181         return;
182 }
183
184 /*
185  * Name: qls_alloc_dma
186  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
187  */
188 int
189 qls_alloc_dma(qla_host_t *ha)
190 {
191         if (qls_alloc_rx_dma(ha))
192                 return (-1);
193
194         if (qls_alloc_tx_dma(ha)) {
195                 qls_free_rx_dma(ha);
196                 return (-1);
197         }
198
199         if (qls_alloc_mpi_dma(ha)) {
200                 qls_free_tx_dma(ha);
201                 qls_free_rx_dma(ha);
202                 return (-1);
203         }
204
205         if (qls_alloc_rss_dma(ha)) {
206                 qls_free_mpi_dma(ha);
207                 qls_free_tx_dma(ha);
208                 qls_free_rx_dma(ha);
209                 return (-1);
210         }
211
212         return (0);
213 }
214
215 static int
216 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
217 {
218         uint32_t data32;
219         uint32_t count = 3;
220
221         while (count--) {
222                 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
223
224                 if (data32 & op)
225                         return (0);
226
227                 QLA_USEC_DELAY(100);
228         }
229         ha->qla_initiate_recovery = 1;
230         return (-1);
231 }
232
233 /*
234  * Name: qls_config_unicast_mac_addr
235  * Function: binds/unbinds a unicast MAC address to the interface.
236  */
237 static int
238 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
239 {
240         int ret = 0;
241         uint32_t mac_upper = 0;
242         uint32_t mac_lower = 0;
243         uint32_t value = 0, index;
244
245         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
246                 Q81_CTL_SEM_SET_MAC_SERDES)) {
247                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
248                 return(-1);
249         }
250
251         if (add_mac) {
252                 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
253                 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
254                                 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
255         }
256         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
257         if (ret)
258                 goto qls_config_unicast_mac_addr_exit;
259
260         index = 128 * (ha->pci_func & 0x1); /* index */
261
262         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
263                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
264
265         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
266         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
267
268         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
269         if (ret)
270                 goto qls_config_unicast_mac_addr_exit;
271
272         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
273                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
274
275         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
276         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
277
278         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
279         if (ret)
280                 goto qls_config_unicast_mac_addr_exit;
281
282         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
283                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
284
285         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
286
287         value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
288                         ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
289                         (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
290
291         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
292
293 qls_config_unicast_mac_addr_exit:
294         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
295         return (ret);
296 }
297
298 /*
299  * Name: qls_config_mcast_mac_addr
300  * Function: binds/unbinds a multicast MAC address to the interface.
301  */
302 static int
303 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
304         uint32_t index)
305 {
306         int ret = 0;
307         uint32_t mac_upper = 0;
308         uint32_t mac_lower = 0;
309         uint32_t value = 0;
310
311         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
312                 Q81_CTL_SEM_SET_MAC_SERDES)) {
313                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
314                 return(-1);
315         }
316
317         if (add_mac) {
318                 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
319                 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
320                                 (mac_addr[4] << 8) | mac_addr[5];
321         }
322         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
323         if (ret)
324                 goto qls_config_mcast_mac_addr_exit;
325
326         value = Q81_CTL_MAC_PROTO_AI_E |
327                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
328                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
329
330         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
331         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
332
333         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
334         if (ret)
335                 goto qls_config_mcast_mac_addr_exit;
336
337         value = Q81_CTL_MAC_PROTO_AI_E |
338                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
339                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
340
341         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
342         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
343
344 qls_config_mcast_mac_addr_exit:
345         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
346
347         return (ret);
348 }
349
350 /*
351  * Name: qls_set_mac_rcv_mode
352  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
353  */
354 static int
355 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
356 {
357         uint32_t data32;
358         uint32_t count = 3;
359
360         while (count--) {
361                 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
362
363                 if (data32 & op)
364                         return (0);
365
366                 QLA_USEC_DELAY(100);
367         }
368         ha->qla_initiate_recovery = 1;
369         return (-1);
370 }
371
372 static int
373 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
374 {
375         int ret = 0;
376
377         ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
378
379         if (ret) {
380                 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
381                         __func__, index, data);
382                 goto qls_load_route_idx_reg_exit;
383         }
384
385         WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
386         WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
387
388 qls_load_route_idx_reg_exit:
389         return (ret);
390 }
391
392 static int
393 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
394 {
395         int ret = 0;
396
397         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
398                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
399                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
400                 return(-1);
401         }
402
403         ret = qls_load_route_idx_reg(ha, index, data);
404
405         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
406
407         return (ret);
408 }
409
410 static int
411 qls_clear_routing_table(qla_host_t *ha)
412 {
413         int i, ret = 0;
414
415         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
416                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
417                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
418                 return(-1);
419         }
420
421         for (i = 0; i < 16; i++) {
422                 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
423                         (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
424                 if (ret)
425                         break;
426         }
427
428         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
429
430         return (ret);
431 }
432
433 int
434 qls_set_promisc(qla_host_t *ha)
435 {
436         int ret;
437
438         ret = qls_load_route_idx_reg_locked(ha,
439                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
440                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
441                         Q81_CTL_RD_VALID_PKT);
442         return (ret);
443 }
444
445 void
446 qls_reset_promisc(qla_host_t *ha)
447 {
448         qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
449                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
450         return;
451 }
452
453 int
454 qls_set_allmulti(qla_host_t *ha)
455 {
456         int ret;
457
458         ret = qls_load_route_idx_reg_locked(ha,
459                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
460                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
461                         Q81_CTL_RD_MCAST);
462         return (ret);
463 }
464
465 void
466 qls_reset_allmulti(qla_host_t *ha)
467 {
468         qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
469                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
470         return;
471 }
472
473 static int
474 qls_init_fw_routing_table(qla_host_t *ha)
475 {
476         int ret = 0;
477
478         ret = qls_clear_routing_table(ha);
479         if (ret)
480                 return (-1);
481
482         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
483                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
484                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
485                 return(-1);
486         }
487
488         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
489                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
490                         Q81_CTL_RD_ERROR_PKT);
491         if (ret)
492                 goto qls_init_fw_routing_table_exit;
493
494         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
495                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
496                         Q81_CTL_RD_BCAST);
497         if (ret)
498                 goto qls_init_fw_routing_table_exit;
499
500         if (ha->num_rx_rings > 1 ) {
501                 ret = qls_load_route_idx_reg(ha,
502                                 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
503                                 Q81_CTL_RI_TYPE_NICQMASK |
504                                 Q81_CTL_RI_IDX_RSS_MATCH),
505                                 Q81_CTL_RD_RSS_MATCH);
506                 if (ret)
507                         goto qls_init_fw_routing_table_exit;
508         }
509
510         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
511                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
512                         Q81_CTL_RD_MCAST_REG_MATCH);
513         if (ret)
514                 goto qls_init_fw_routing_table_exit;
515
516         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
517                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
518                         Q81_CTL_RD_CAM_HIT);
519         if (ret)
520                 goto qls_init_fw_routing_table_exit;
521
522 qls_init_fw_routing_table_exit:
523         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
524         return (ret);
525 }
526
527 static int
528 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
529 {
530 #if defined(INET) || defined(INET6)
531         struct ether_vlan_header *eh;
532         struct ip *ip;
533 #if defined(INET6)
534         struct ip6_hdr *ip6;
535 #endif
536         struct tcphdr *th;
537         uint32_t ehdrlen, ip_hlen;
538         int ret = 0;
539         uint16_t etype;
540         uint8_t buf[sizeof(struct ip6_hdr)];
541
542         eh = mtod(mp, struct ether_vlan_header *);
543
544         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
545                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
546                 etype = ntohs(eh->evl_proto);
547         } else {
548                 ehdrlen = ETHER_HDR_LEN;
549                 etype = ntohs(eh->evl_encap_proto);
550         }
551
552         switch (etype) {
553 #ifdef INET
554                 case ETHERTYPE_IP:
555                         ip = (struct ip *)(mp->m_data + ehdrlen);
556
557                         ip_hlen = sizeof (struct ip);
558
559                         if (mp->m_len < (ehdrlen + ip_hlen)) {
560                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
561                                 ip = (struct ip *)buf;
562                         }
563                         tx_mac->opcode = Q81_IOCB_TX_TSO;
564                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
565
566                         tx_mac->phdr_offsets = ehdrlen;
567
568                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
569                                                         Q81_TX_TSO_PHDR_SHIFT);
570
571                         ip->ip_sum = 0;
572
573                         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
574                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
575                                 
576                                 th = (struct tcphdr *)(ip + 1);
577
578                                 th->th_sum = in_pseudo(ip->ip_src.s_addr,
579                                                 ip->ip_dst.s_addr,
580                                                 htons(IPPROTO_TCP));
581                                 tx_mac->mss = mp->m_pkthdr.tso_segsz;
582                                 tx_mac->phdr_length = ip_hlen + ehdrlen +
583                                                         (th->th_off << 2);
584                                 break;
585                         }
586                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
587
588                         if (ip->ip_p == IPPROTO_TCP) {
589                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
590                         } else if (ip->ip_p == IPPROTO_UDP) {
591                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
592                         }
593                 break;
594 #endif
595
596 #ifdef INET6
597                 case ETHERTYPE_IPV6:
598                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
599
600                         ip_hlen = sizeof(struct ip6_hdr);
601
602                         if (mp->m_len < (ehdrlen + ip_hlen)) {
603                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
604                                         buf);
605                                 ip6 = (struct ip6_hdr *)buf;
606                         }
607
608                         tx_mac->opcode = Q81_IOCB_TX_TSO;
609                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
610                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
611
612                         tx_mac->phdr_offsets = ehdrlen;
613                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
614                                                         Q81_TX_TSO_PHDR_SHIFT);
615
616                         if (ip6->ip6_nxt == IPPROTO_TCP) {
617                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
618                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
619                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
620                         }
621                 break;
622 #endif
623
624                 default:
625                         ret = -1;
626                 break;
627         }
628
629         return (ret);
630 #else
631         return (-1);
632 #endif
633 }
634
635 #define QLA_TX_MIN_FREE 2
636 int
637 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
638 {
639         uint32_t txr_done, txr_next;
640
641         txr_done = ha->tx_ring[txr_idx].txr_done;
642         txr_next = ha->tx_ring[txr_idx].txr_next;
643
644         if (txr_done == txr_next) {
645                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
646         } else if (txr_done > txr_next) {
647                 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
648         } else {
649                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
650                         txr_done - txr_next;
651         }
652
653         if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
654                 return (-1);
655
656         return (0);
657 }
658
659 /*
660  * Name: qls_hw_send
661  * Function: Transmits a packet. It first checks if the packet is a
662  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
663  *      offload. If either of these creteria are not met, it is transmitted
664  *      as a regular ethernet frame.
665  */
666 int
667 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
668         uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
669 {
670         q81_tx_mac_t *tx_mac;
671         q81_txb_desc_t *tx_desc;
672         uint32_t total_length = 0;
673         uint32_t i;
674         device_t dev;
675         int ret = 0;
676
677         dev = ha->pci_dev;
678
679         total_length = mp->m_pkthdr.len;
680
681         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
682                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
683                         __func__, total_length);
684                 return (-1);
685         }
686
687         if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
688                 if (qls_hw_tx_done(ha, txr_idx)) {
689                         device_printf(dev, "%s: tx_free[%d] = %d\n",
690                                 __func__, txr_idx,
691                                 ha->tx_ring[txr_idx].txr_free);
692                         return (-1);
693                 }
694         }
695
696         tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
697
698         bzero(tx_mac, sizeof(q81_tx_mac_t));
699
700         if ((mp->m_pkthdr.csum_flags &
701                         (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
702                 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
703                 if (ret) 
704                         return (EINVAL);
705
706                 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
707                         ha->tx_ring[txr_idx].tx_tso_frames++;
708                 else
709                         ha->tx_ring[txr_idx].tx_frames++;
710                         
711         } else { 
712                 tx_mac->opcode = Q81_IOCB_TX_MAC;
713         }
714
715         if (mp->m_flags & M_VLANTAG) {
716                 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
717                 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
718
719                 ha->tx_ring[txr_idx].tx_vlan_frames++;
720         }
721
722         tx_mac->frame_length = total_length;
723
724         tx_mac->tid_lo = txr_next;
725
726         if (nsegs <= MAX_TX_MAC_DESC) {
727                 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
728                         tx_mac->tid_lo));
729
730                 for (i = 0; i < nsegs; i++) {
731                         tx_mac->txd[i].baddr = segs->ds_addr;
732                         tx_mac->txd[i].length = segs->ds_len;
733                         segs++;
734                 }
735                 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
736
737         } else {
738                 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
739                         tx_mac->tid_lo));
740
741                 tx_mac->txd[0].baddr =
742                         ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
743                 tx_mac->txd[0].length =
744                         nsegs * (sizeof(q81_txb_desc_t));
745                 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
746
747                 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
748
749                 for (i = 0; i < nsegs; i++) {
750                         tx_desc->baddr = segs->ds_addr;
751                         tx_desc->length = segs->ds_len;
752
753                         if (i == (nsegs -1))
754                                 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
755                         else
756                                 tx_desc->flags = 0;
757
758                         segs++;
759                         tx_desc++;
760                 }
761         }
762         txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
763         ha->tx_ring[txr_idx].txr_next = txr_next;
764
765         ha->tx_ring[txr_idx].txr_free--;
766
767         Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
768
769         return (0);
770 }
771
772 /*
773  * Name: qls_del_hw_if
774  * Function: Destroys the hardware specific entities corresponding to an
775  *      Ethernet Interface
776  */
777 void
778 qls_del_hw_if(qla_host_t *ha)
779 {
780         uint32_t value;
781         int i;
782         //int  count;
783
784         if (ha->hw_init == 0) {
785                 qls_hw_reset(ha);
786                 return;
787         }
788
789         for (i = 0;  i < ha->num_tx_rings; i++) {
790                 Q81_SET_WQ_INVALID(i); 
791         }
792         for (i = 0;  i < ha->num_rx_rings; i++) {
793                 Q81_SET_CQ_INVALID(i);
794         }
795
796         for (i = 0; i < ha->num_rx_rings; i++) {
797                 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
798         }
799
800         value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
801         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
802
803         value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
804         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
805         ha->flags.intr_enable = 0;
806
807         qls_hw_reset(ha);
808
809         return;
810 }
811
812 /*
813  * Name: qls_init_hw_if
814  * Function: Creates the hardware specific entities corresponding to an
815  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
816  *      corresponding to the interface. Enables LRO if allowed.
817  */
818 int
819 qls_init_hw_if(qla_host_t *ha)
820 {
821         uint32_t        value;
822         int             ret = 0;
823         int             i;
824
825         QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
826
827         ret = qls_hw_reset(ha);
828         if (ret)
829                 goto qls_init_hw_if_exit;
830
831         ha->vm_pgsize = 4096;
832
833         /* Enable FAE and EFE bits in System Register */
834         value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
835         value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
836
837         WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
838
839         /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
840         value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
841         WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
842
843         /* Function Specific Control Register - Set Page Size and Enable NIC */
844         value = Q81_CTL_FUNC_SPECIFIC_FE |
845                 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
846                 Q81_CTL_FUNC_SPECIFIC_EPC_O |
847                 Q81_CTL_FUNC_SPECIFIC_EPC_I |
848                 Q81_CTL_FUNC_SPECIFIC_EC;
849         value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) | 
850                         Q81_CTL_FUNC_SPECIFIC_FE |
851                         Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
852                         Q81_CTL_FUNC_SPECIFIC_EPC_O |
853                         Q81_CTL_FUNC_SPECIFIC_EPC_I |
854                         Q81_CTL_FUNC_SPECIFIC_EC;
855
856         WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
857
858         /* Interrupt Mask Register */
859         value = Q81_CTL_INTRM_PI;
860         value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
861
862         WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
863
864         /* Initialiatize Completion Queue */
865         for (i = 0; i < ha->num_rx_rings; i++) {
866                 ret = qls_init_comp_queue(ha, i);
867                 if (ret)
868                         goto qls_init_hw_if_exit;
869         }
870
871         if (ha->num_rx_rings > 1 ) {
872                 ret = qls_init_rss(ha);
873                 if (ret)
874                         goto qls_init_hw_if_exit;
875         }
876
877         /* Initialize Work Queue */
878
879         for (i = 0; i < ha->num_tx_rings; i++) {
880                 ret = qls_init_work_queue(ha, i);
881                 if (ret)
882                         goto qls_init_hw_if_exit;
883         }
884
885         if (ret)
886                 goto qls_init_hw_if_exit;
887
888         /* Set up CAM RAM with MAC Address */
889         ret = qls_config_unicast_mac_addr(ha, 1);
890         if (ret)
891                 goto qls_init_hw_if_exit;
892
893         ret = qls_hw_add_all_mcast(ha);
894         if (ret)
895                 goto qls_init_hw_if_exit;
896
897         /* Initialize Firmware Routing Table */
898         ret = qls_init_fw_routing_table(ha);
899         if (ret)
900                 goto qls_init_hw_if_exit;
901
902         /* Get Chip Revision ID */
903         ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
904
905         /* Enable Global Interrupt */
906         value = Q81_CTL_INTRE_EI;
907         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
908
909         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
910
911         /* Enable Interrupt Handshake Disable */
912         value = Q81_CTL_INTRE_IHD;
913         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
914
915         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
916
917         /* Enable Completion Interrupt */
918
919         ha->flags.intr_enable = 1;
920
921         for (i = 0; i < ha->num_rx_rings; i++) {
922                 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
923         }
924
925         ha->hw_init = 1;
926
927         qls_mbx_get_link_status(ha);
928
929         QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
930                 ha->rx_ring[0].cq_db_offset));
931         QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
932                 ha->tx_ring[0].wq_db_offset));
933
934         for (i = 0; i < ha->num_rx_rings; i++) {
935                 Q81_WR_CQ_CONS_IDX(i, 0);
936                 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
937                 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
938
939                 QL_DPRINT2((ha->pci_dev,
940                         "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
941                         "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
942                         Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
943                         Q81_RD_SBQ_IDX(i)));
944         }
945
946         for (i = 0; i < ha->num_rx_rings; i++) {
947                 Q81_SET_CQ_VALID(i);
948         }
949
950 qls_init_hw_if_exit:
951         QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
952         return (ret);
953 }
954
955 static int
956 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
957 {
958         uint32_t data32;
959         uint32_t count = 3;
960
961         while (count--) {
962                 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
963
964                 if ((data32 & bits) == value)
965                         return (0);
966                 
967                 QLA_USEC_DELAY(100);
968         }
969         ha->qla_initiate_recovery = 1;
970         device_printf(ha->pci_dev, "%s: failed\n", __func__);
971         return (-1);
972 }
973
974 static uint8_t q81_hash_key[] = {
975                         0xda, 0x56, 0x5a, 0x6d,
976                         0xc2, 0x0e, 0x5b, 0x25,
977                         0x3d, 0x25, 0x67, 0x41,
978                         0xb0, 0x8f, 0xa3, 0x43,
979                         0xcb, 0x2b, 0xca, 0xd0,
980                         0xb4, 0x30, 0x7b, 0xae,
981                         0xa3, 0x2d, 0xcb, 0x77,
982                         0x0c, 0xf2, 0x30, 0x80,
983                         0x3b, 0xb7, 0x42, 0x6a,
984                         0xfa, 0x01, 0xac, 0xbe };
985
986 static int
987 qls_init_rss(qla_host_t *ha)
988 {
989         q81_rss_icb_t   *rss_icb;
990         int             ret = 0;
991         int             i;
992         uint32_t        value;
993
994         rss_icb = ha->rss_dma.dma_b;
995
996         bzero(rss_icb, sizeof (q81_rss_icb_t));
997
998         rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
999                                 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1000                                 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1001                                 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6; 
1002
1003         rss_icb->mask = 0x3FF;
1004
1005         for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1006                 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1007         }
1008
1009         memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1010         memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1011
1012         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1013
1014         if (ret)
1015                 goto qls_init_rss_exit;
1016
1017         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1018
1019         if (ret) {
1020                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1021                 goto qls_init_rss_exit;
1022         }
1023
1024         value = (uint32_t)ha->rss_dma.dma_addr;
1025         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1026
1027         value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1028         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1029
1030         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1031
1032         value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1033                         Q81_CTL_CONFIG_LR;
1034
1035         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1036
1037         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1038
1039 qls_init_rss_exit:
1040         return (ret);
1041 }
1042
1043 static int
1044 qls_init_comp_queue(qla_host_t *ha, int cid)
1045 {
1046         q81_cq_icb_t    *cq_icb;
1047         qla_rx_ring_t   *rxr;
1048         int             ret = 0;
1049         uint32_t        value;
1050
1051         rxr = &ha->rx_ring[cid];
1052
1053         rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1054
1055         cq_icb = rxr->cq_icb_vaddr;
1056
1057         bzero(cq_icb, sizeof (q81_cq_icb_t));
1058
1059         cq_icb->msix_vector = cid;
1060         cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1061                         Q81_CQ_ICB_FLAGS_LI |
1062                         Q81_CQ_ICB_FLAGS_LL |
1063                         Q81_CQ_ICB_FLAGS_LS |
1064                         Q81_CQ_ICB_FLAGS_LV;
1065
1066         cq_icb->length_v = NUM_CQ_ENTRIES;
1067
1068         cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1069         cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1070
1071         cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1072         cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1073
1074         cq_icb->pkt_idelay = 10;
1075         cq_icb->idelay = 100;
1076
1077         cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1078         cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1079
1080         cq_icb->lbq_bsize = QLA_LGB_SIZE;
1081         cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1082
1083         cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1084         cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1085
1086         cq_icb->sbq_bsize = (uint16_t)ha->msize;
1087         cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1088
1089         QL_DUMP_CQ(ha);
1090
1091         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1092
1093         if (ret)
1094                 goto qls_init_comp_queue_exit;
1095
1096         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1097
1098         if (ret) {
1099                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1100                 goto qls_init_comp_queue_exit;
1101         }
1102
1103         value = (uint32_t)rxr->cq_icb_paddr;
1104         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1105
1106         value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1107         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1108
1109         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1110
1111         value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1112         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1113         value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1114         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1115
1116         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1117
1118         rxr->cq_next = 0;
1119         rxr->lbq_next = rxr->lbq_free = 0;
1120         rxr->sbq_next = rxr->sbq_free = 0;
1121         rxr->rx_free = rxr->rx_next = 0;
1122         rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1123         rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1124
1125 qls_init_comp_queue_exit:
1126         return (ret);
1127 }
1128
1129 static int
1130 qls_init_work_queue(qla_host_t *ha, int wid)
1131 {
1132         q81_wq_icb_t    *wq_icb;
1133         qla_tx_ring_t   *txr;
1134         int             ret = 0;
1135         uint32_t        value;
1136
1137         txr = &ha->tx_ring[wid];
1138
1139         txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1140                                                 + (ha->vm_pgsize * wid));
1141
1142         txr->wq_db_offset = (ha->vm_pgsize * wid);
1143
1144         wq_icb = txr->wq_icb_vaddr;
1145         bzero(wq_icb, sizeof (q81_wq_icb_t));
1146
1147         wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1148                                 Q81_WQ_ICB_VALID;
1149
1150         wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1151                         Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1152
1153         wq_icb->wqcqid_rss = wid;
1154
1155         wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1156         wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1157
1158         wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1159         wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1160
1161         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1162
1163         if (ret)
1164                 goto qls_init_wq_exit;
1165
1166         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1167
1168         if (ret) {
1169                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1170                 goto qls_init_wq_exit;
1171         }
1172
1173         value = (uint32_t)txr->wq_icb_paddr;
1174         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1175
1176         value = (uint32_t)(txr->wq_icb_paddr >> 32);
1177         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1178
1179         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1180
1181         value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1182         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1183         value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1184         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1185
1186         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1187
1188         txr->txr_free = NUM_TX_DESCRIPTORS;
1189         txr->txr_next = 0;
1190         txr->txr_done = 0;
1191
1192 qls_init_wq_exit:
1193         return (ret);
1194 }
1195
1196 static int
1197 qls_hw_add_all_mcast(qla_host_t *ha)
1198 {
1199         int i, nmcast;
1200
1201         nmcast = ha->nmcast;
1202
1203         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1204                 if ((ha->mcast[i].addr[0] != 0) || 
1205                         (ha->mcast[i].addr[1] != 0) ||
1206                         (ha->mcast[i].addr[2] != 0) ||
1207                         (ha->mcast[i].addr[3] != 0) ||
1208                         (ha->mcast[i].addr[4] != 0) ||
1209                         (ha->mcast[i].addr[5] != 0)) {
1210                         if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1211                                 1, i)) {
1212                                 device_printf(ha->pci_dev, "%s: failed\n",
1213                                         __func__);
1214                                 return (-1);
1215                         }
1216
1217                         nmcast--;
1218                 }
1219         }
1220         return 0;
1221 }
1222
1223 static int
1224 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1225 {
1226         int i;
1227
1228         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1229                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1230                         return 0; /* its been already added */
1231         }
1232
1233         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1234                 if ((ha->mcast[i].addr[0] == 0) && 
1235                         (ha->mcast[i].addr[1] == 0) &&
1236                         (ha->mcast[i].addr[2] == 0) &&
1237                         (ha->mcast[i].addr[3] == 0) &&
1238                         (ha->mcast[i].addr[4] == 0) &&
1239                         (ha->mcast[i].addr[5] == 0)) {
1240                         if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1241                                 return (-1);
1242
1243                         bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1244                         ha->nmcast++;   
1245
1246                         return 0;
1247                 }
1248         }
1249         return 0;
1250 }
1251
1252 static int
1253 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1254 {
1255         int i;
1256
1257         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1258                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1259                         if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1260                                 return (-1);
1261
1262                         ha->mcast[i].addr[0] = 0;
1263                         ha->mcast[i].addr[1] = 0;
1264                         ha->mcast[i].addr[2] = 0;
1265                         ha->mcast[i].addr[3] = 0;
1266                         ha->mcast[i].addr[4] = 0;
1267                         ha->mcast[i].addr[5] = 0;
1268
1269                         ha->nmcast--;   
1270
1271                         return 0;
1272                 }
1273         }
1274         return 0;
1275 }
1276
1277 /*
1278  * Name: qls_hw_set_multi
1279  * Function: Sets the Multicast Addresses provided the host O.S into the
1280  *      hardware (for the given interface)
1281  */
1282 void
1283 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1284         uint32_t add_mac)
1285 {
1286         int i;
1287
1288         for (i = 0; i < mcnt; i++) {
1289                 if (add_mac) {
1290                         if (qls_hw_add_mcast(ha, mta))
1291                                 break;
1292                 } else {
1293                         if (qls_hw_del_mcast(ha, mta))
1294                                 break;
1295                 }
1296                         
1297                 mta += Q8_MAC_ADDR_LEN;
1298         }
1299         return;
1300 }
1301
1302 void
1303 qls_update_link_state(qla_host_t *ha)
1304 {
1305         uint32_t link_state;
1306         uint32_t prev_link_state;
1307
1308         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1309                 ha->link_up = 0;
1310                 return;
1311         }
1312         link_state = READ_REG32(ha, Q81_CTL_STATUS);
1313
1314         prev_link_state =  ha->link_up;
1315
1316         if ((ha->pci_func & 0x1) == 0)
1317                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1318         else
1319                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1320
1321         if (prev_link_state !=  ha->link_up) {
1322                 if (ha->link_up) {
1323                         if_link_state_change(ha->ifp, LINK_STATE_UP);
1324                 } else {
1325                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1326                 }
1327         }
1328         return;
1329 }
1330
1331 static void
1332 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1333 {
1334         if (ha->tx_ring[r_idx].flags.wq_dma) {
1335                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1336                 ha->tx_ring[r_idx].flags.wq_dma = 0;
1337         }
1338
1339         if (ha->tx_ring[r_idx].flags.privb_dma) {
1340                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1341                 ha->tx_ring[r_idx].flags.privb_dma = 0;
1342         }
1343         return;
1344 }
1345
1346 static void
1347 qls_free_tx_dma(qla_host_t *ha)
1348 {
1349         int i, j;
1350         qla_tx_buf_t *txb;
1351
1352         for (i = 0; i < ha->num_tx_rings; i++) {
1353                 qls_free_tx_ring_dma(ha, i);
1354
1355                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1356                         txb = &ha->tx_ring[i].tx_buf[j];
1357
1358                         if (txb->map) {
1359                                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1360                         }
1361                 }
1362         }
1363
1364         if (ha->tx_tag != NULL) {
1365                 bus_dma_tag_destroy(ha->tx_tag);
1366                 ha->tx_tag = NULL;
1367         }
1368
1369         return;
1370 }
1371
1372 static int
1373 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1374 {
1375         int             ret = 0, i;
1376         uint8_t         *v_addr;
1377         bus_addr_t      p_addr;
1378         qla_tx_buf_t    *txb;
1379         device_t        dev = ha->pci_dev;
1380
1381         ha->tx_ring[ridx].wq_dma.alignment = 8;
1382         ha->tx_ring[ridx].wq_dma.size =
1383                 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1384
1385         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1386
1387         if (ret) {
1388                 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1389                 goto qls_alloc_tx_ring_dma_exit;
1390         }
1391         ha->tx_ring[ridx].flags.wq_dma = 1;
1392
1393         ha->tx_ring[ridx].privb_dma.alignment = 8;
1394         ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1395
1396         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1397
1398         if (ret) {
1399                 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1400                 goto qls_alloc_tx_ring_dma_exit;
1401         }
1402
1403         ha->tx_ring[ridx].flags.privb_dma = 1;
1404
1405         ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1406         ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1407
1408         v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1409         p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1410
1411         ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1412         ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1413
1414         ha->tx_ring[ridx].txr_cons_vaddr =
1415                 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1416         ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1417
1418         v_addr = v_addr + (PAGE_SIZE >> 1);
1419         p_addr = p_addr + (PAGE_SIZE >> 1);
1420
1421         txb = ha->tx_ring[ridx].tx_buf;
1422
1423         for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1424                 txb[i].oal_vaddr = v_addr;
1425                 txb[i].oal_paddr = p_addr;
1426
1427                 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1428                 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1429         }
1430
1431 qls_alloc_tx_ring_dma_exit:
1432         return (ret);
1433 }
1434
1435 static int
1436 qls_alloc_tx_dma(qla_host_t *ha)
1437 {
1438         int     i, j;
1439         int     ret = 0;
1440         qla_tx_buf_t *txb;
1441
1442         if (bus_dma_tag_create(NULL,    /* parent */
1443                 1, 0,    /* alignment, bounds */
1444                 BUS_SPACE_MAXADDR,       /* lowaddr */
1445                 BUS_SPACE_MAXADDR,       /* highaddr */
1446                 NULL, NULL,      /* filter, filterarg */
1447                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1448                 QLA_MAX_SEGMENTS,        /* nsegments */
1449                 PAGE_SIZE,        /* maxsegsize */
1450                 BUS_DMA_ALLOCNOW,        /* flags */
1451                 NULL,    /* lockfunc */
1452                 NULL,    /* lockfuncarg */
1453                 &ha->tx_tag)) {
1454                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1455                         __func__);
1456                 return (ENOMEM);
1457         }
1458
1459         for (i = 0; i < ha->num_tx_rings; i++) {
1460                 ret = qls_alloc_tx_ring_dma(ha, i);
1461
1462                 if (ret) {
1463                         qls_free_tx_dma(ha);
1464                         break;
1465                 }
1466
1467                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1468                         txb = &ha->tx_ring[i].tx_buf[j];
1469
1470                         ret = bus_dmamap_create(ha->tx_tag,
1471                                 BUS_DMA_NOWAIT, &txb->map);
1472                         if (ret) {
1473                                 ha->err_tx_dmamap_create++;
1474                                 device_printf(ha->pci_dev,
1475                                 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1476                                 __func__, ret, i, j);
1477
1478                                 qls_free_tx_dma(ha);
1479
1480                                 return (ret);
1481                         }
1482                 }
1483         }
1484
1485         return (ret);
1486 }
1487
1488 static void
1489 qls_free_rss_dma(qla_host_t *ha)
1490 {
1491         qls_free_dmabuf(ha, &ha->rss_dma);
1492         ha->flags.rss_dma = 0;
1493 }
1494
1495 static int
1496 qls_alloc_rss_dma(qla_host_t *ha)
1497 {
1498         int ret = 0;
1499
1500         ha->rss_dma.alignment = 4;
1501         ha->rss_dma.size = PAGE_SIZE;
1502
1503         ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1504
1505         if (ret)
1506                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1507         else
1508                 ha->flags.rss_dma = 1;
1509
1510         return (ret);
1511 }
1512
1513 static void
1514 qls_free_mpi_dma(qla_host_t *ha)
1515 {
1516         qls_free_dmabuf(ha, &ha->mpi_dma);
1517         ha->flags.mpi_dma = 0;
1518 }
1519
1520 static int
1521 qls_alloc_mpi_dma(qla_host_t *ha)
1522 {
1523         int ret = 0;
1524
1525         ha->mpi_dma.alignment = 4;
1526         ha->mpi_dma.size = (0x4000 * 4);
1527
1528         ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1529         if (ret)
1530                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1531         else
1532                 ha->flags.mpi_dma = 1;
1533
1534         return (ret);
1535 }
1536
1537 static void
1538 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1539 {
1540         if (ha->rx_ring[ridx].flags.cq_dma) {
1541                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1542                 ha->rx_ring[ridx].flags.cq_dma = 0;
1543         }
1544
1545         if (ha->rx_ring[ridx].flags.lbq_dma) {
1546                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1547                 ha->rx_ring[ridx].flags.lbq_dma = 0;
1548         }
1549
1550         if (ha->rx_ring[ridx].flags.sbq_dma) {
1551                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1552                 ha->rx_ring[ridx].flags.sbq_dma = 0;
1553         }
1554
1555         if (ha->rx_ring[ridx].flags.lb_dma) {
1556                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1557                 ha->rx_ring[ridx].flags.lb_dma = 0;
1558         }
1559         return;
1560 }
1561
1562 static void
1563 qls_free_rx_dma(qla_host_t *ha)
1564 {
1565         int i;
1566
1567         for (i = 0; i < ha->num_rx_rings; i++) {
1568                 qls_free_rx_ring_dma(ha, i);
1569         }
1570
1571         if (ha->rx_tag != NULL) {
1572                 bus_dma_tag_destroy(ha->rx_tag);
1573                 ha->rx_tag = NULL;
1574         }
1575
1576         return;
1577 }
1578
1579 static int
1580 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1581 {
1582         int                             i, ret = 0;
1583         uint8_t                         *v_addr;
1584         bus_addr_t                      p_addr;
1585         volatile q81_bq_addr_e_t        *bq_e;
1586         device_t                        dev = ha->pci_dev;
1587
1588         ha->rx_ring[ridx].cq_dma.alignment = 128;
1589         ha->rx_ring[ridx].cq_dma.size =
1590                 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1591
1592         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1593
1594         if (ret) {
1595                 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1596                 goto qls_alloc_rx_ring_dma_exit;
1597         }
1598         ha->rx_ring[ridx].flags.cq_dma = 1;
1599
1600         ha->rx_ring[ridx].lbq_dma.alignment = 8;
1601         ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1602
1603         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1604
1605         if (ret) {
1606                 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1607                 goto qls_alloc_rx_ring_dma_exit;
1608         }
1609         ha->rx_ring[ridx].flags.lbq_dma = 1;
1610
1611         ha->rx_ring[ridx].sbq_dma.alignment = 8;
1612         ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1613
1614         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1615
1616         if (ret) {
1617                 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1618                 goto qls_alloc_rx_ring_dma_exit;
1619         }
1620         ha->rx_ring[ridx].flags.sbq_dma = 1;
1621
1622         ha->rx_ring[ridx].lb_dma.alignment = 8;
1623         ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1624
1625         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1626         if (ret) {
1627                 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1628                 goto qls_alloc_rx_ring_dma_exit;
1629         }
1630         ha->rx_ring[ridx].flags.lb_dma = 1;
1631
1632         bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1633         bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1634         bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1635         bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1636
1637         /* completion queue */
1638         ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1639         ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1640
1641         v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1642         p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1643
1644         v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1645         p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1646
1647         /* completion queue icb */
1648         ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1649         ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1650
1651         v_addr = v_addr + (PAGE_SIZE >> 2);
1652         p_addr = p_addr + (PAGE_SIZE >> 2);
1653
1654         /* completion queue index register */
1655         ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1656         ha->rx_ring[ridx].cqi_paddr = p_addr;
1657
1658         v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1659         p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1660
1661         /* large buffer queue address table */
1662         ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1663         ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1664
1665         /* large buffer queue */
1666         ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1667         ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1668
1669         v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1670         p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1671
1672         /* small buffer queue address table */
1673         ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1674         ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1675
1676         /* small buffer queue */
1677         ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1678         ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1679
1680         ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1681         ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1682
1683         /* Initialize Large Buffer Queue Table */
1684
1685         p_addr = ha->rx_ring[ridx].lbq_paddr;
1686         bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1687
1688         bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1689         bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1690
1691         p_addr = ha->rx_ring[ridx].lb_paddr;
1692         bq_e = ha->rx_ring[ridx].lbq_vaddr;
1693
1694         for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1695                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1696                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1697
1698                 p_addr = p_addr + QLA_LGB_SIZE;
1699                 bq_e++;
1700         }
1701
1702         /* Initialize Small Buffer Queue Table */
1703
1704         p_addr = ha->rx_ring[ridx].sbq_paddr;
1705         bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1706
1707         for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1708                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1709                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1710
1711                 p_addr = p_addr + QLA_PAGE_SIZE;
1712                 bq_e++;
1713         }
1714
1715 qls_alloc_rx_ring_dma_exit:
1716         return (ret);
1717 }
1718
1719 static int
1720 qls_alloc_rx_dma(qla_host_t *ha)
1721 {
1722         int     i;
1723         int     ret = 0;
1724
1725         if (bus_dma_tag_create(NULL,    /* parent */
1726                         1, 0,    /* alignment, bounds */
1727                         BUS_SPACE_MAXADDR,       /* lowaddr */
1728                         BUS_SPACE_MAXADDR,       /* highaddr */
1729                         NULL, NULL,      /* filter, filterarg */
1730                         MJUM9BYTES,     /* maxsize */
1731                         1,        /* nsegments */
1732                         MJUM9BYTES,        /* maxsegsize */
1733                         BUS_DMA_ALLOCNOW,        /* flags */
1734                         NULL,    /* lockfunc */
1735                         NULL,    /* lockfuncarg */
1736                         &ha->rx_tag)) {
1737                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1738                         __func__);
1739
1740                 return (ENOMEM);
1741         }
1742
1743         for (i = 0; i < ha->num_rx_rings; i++) {
1744                 ret = qls_alloc_rx_ring_dma(ha, i);
1745
1746                 if (ret) {
1747                         qls_free_rx_dma(ha);
1748                         break;
1749                 }
1750         }
1751
1752         return (ret);
1753 }
1754
1755 static int
1756 qls_wait_for_flash_ready(qla_host_t *ha)
1757 {
1758         uint32_t data32;
1759         uint32_t count = 3;
1760
1761         while (count--) {
1762                 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1763
1764                 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1765                         goto qls_wait_for_flash_ready_exit;
1766                 
1767                 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1768                         return (0);
1769
1770                 QLA_USEC_DELAY(100);
1771         }
1772
1773 qls_wait_for_flash_ready_exit:
1774         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1775
1776         return (-1);
1777 }
1778
1779 /*
1780  * Name: qls_rd_flash32
1781  * Function: Read Flash Memory
1782  */
1783 int
1784 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1785 {
1786         int ret;
1787
1788         ret = qls_wait_for_flash_ready(ha);
1789
1790         if (ret)
1791                 return (ret);
1792
1793         WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1794
1795         ret = qls_wait_for_flash_ready(ha);
1796
1797         if (ret)
1798                 return (ret);
1799
1800         *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1801
1802         return 0;
1803 }
1804
1805 static int
1806 qls_flash_validate(qla_host_t *ha, const char *signature)
1807 {
1808         uint16_t csum16 = 0;
1809         uint16_t *data16;
1810         int i;
1811
1812         if (bcmp(ha->flash.id, signature, 4)) {
1813                 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1814                         "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1815                         ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1816                         signature));
1817                 return(-1);
1818         }
1819
1820         data16 = (uint16_t *)&ha->flash;
1821
1822         for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1823                 csum16 += *data16++;
1824         }
1825
1826         if (csum16) {
1827                 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1828                 return(-1);
1829         }
1830         return(0);
1831 }
1832
1833 int
1834 qls_rd_nic_params(qla_host_t *ha)
1835 {
1836         int             i, ret = 0;
1837         uint32_t        faddr;
1838         uint32_t        *qflash;
1839
1840         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1841                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1842                 return(-1);
1843         }
1844
1845         if ((ha->pci_func & 0x1) == 0)
1846                 faddr = Q81_F0_FLASH_OFFSET >> 2;
1847         else
1848                 faddr = Q81_F1_FLASH_OFFSET >> 2;
1849
1850         qflash = (uint32_t *)&ha->flash;
1851
1852         for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1853                 ret = qls_rd_flash32(ha, faddr, qflash);
1854
1855                 if (ret)
1856                         goto qls_rd_flash_data_exit;
1857
1858                 faddr++;
1859                 qflash++;
1860         }
1861
1862         QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1863
1864         ret = qls_flash_validate(ha, Q81_FLASH_ID);
1865
1866         if (ret)
1867                 goto qls_rd_flash_data_exit;
1868
1869         bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1870
1871         QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1872                 __func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1873                 ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1874
1875 qls_rd_flash_data_exit:
1876
1877         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1878
1879         return(ret);
1880 }
1881
1882 static int
1883 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1884 {
1885         uint32_t count = 30;
1886         uint32_t data;
1887
1888         while (count--) {
1889                 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1890
1891                 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1892
1893                 if (data & value) {
1894                         return (0);
1895                 } else {
1896                         QLA_USEC_DELAY(100);
1897                 }
1898         }
1899         ha->qla_initiate_recovery = 1;
1900         return (-1);
1901 }
1902
1903 static void
1904 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1905 {
1906         WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1907 }
1908
1909 static int
1910 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1911 {
1912         uint32_t data32;
1913         uint32_t count = 3;
1914
1915         while (count--) {
1916                 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1917
1918                 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1919                         goto qls_wait_for_proc_addr_ready_exit;
1920                 
1921                 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1922                         return (0);
1923
1924                 QLA_USEC_DELAY(100);
1925         }
1926
1927 qls_wait_for_proc_addr_ready_exit:
1928         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1929
1930         ha->qla_initiate_recovery = 1;
1931         return (-1);
1932 }
1933
1934 static int
1935 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1936         uint32_t *data)
1937 {
1938         int ret;
1939         uint32_t value;
1940
1941         ret = qls_wait_for_proc_addr_ready(ha);
1942
1943         if (ret)
1944                 goto qls_proc_addr_rd_reg_exit;
1945
1946         value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1947
1948         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1949
1950         ret = qls_wait_for_proc_addr_ready(ha);
1951
1952         if (ret)
1953                 goto qls_proc_addr_rd_reg_exit;
1954
1955         *data = READ_REG32(ha, Q81_CTL_PROC_DATA); 
1956
1957 qls_proc_addr_rd_reg_exit:
1958         return (ret);
1959 }
1960
1961 static int
1962 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1963         uint32_t data)
1964 {
1965         int ret;
1966         uint32_t value;
1967
1968         ret = qls_wait_for_proc_addr_ready(ha);
1969
1970         if (ret)
1971                 goto qls_proc_addr_wr_reg_exit;
1972
1973         WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
1974
1975         value = addr_module | reg;
1976
1977         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1978
1979         ret = qls_wait_for_proc_addr_ready(ha);
1980
1981 qls_proc_addr_wr_reg_exit:
1982         return (ret);
1983 }
1984
1985 static int
1986 qls_hw_nic_reset(qla_host_t *ha)
1987 {
1988         int             count;
1989         uint32_t        data;
1990         device_t        dev = ha->pci_dev;
1991
1992         ha->hw_init = 0;
1993
1994         data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
1995                         Q81_CTL_RESET_FUNC;
1996         WRITE_REG32(ha, Q81_CTL_RESET, data);
1997
1998         count = 10;
1999         while (count--) {
2000                 data = READ_REG32(ha, Q81_CTL_RESET);
2001                 if ((data & Q81_CTL_RESET_FUNC) == 0)
2002                         break;
2003                 QLA_USEC_DELAY(10);
2004         }
2005         if (count == 0) {
2006                 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2007                         __func__);
2008                 return (-1);
2009         }
2010         return (0);
2011 }
2012
2013 static int
2014 qls_hw_reset(qla_host_t *ha)
2015 {
2016         device_t        dev = ha->pci_dev;
2017         int             ret;
2018         int             count;
2019         uint32_t        data;
2020
2021         QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2022
2023         if (ha->hw_init == 0) {
2024                 ret = qls_hw_nic_reset(ha);
2025                 goto qls_hw_reset_exit;
2026         }
2027
2028         ret = qls_clear_routing_table(ha);
2029         if (ret) 
2030                 goto qls_hw_reset_exit;
2031
2032         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2033         if (ret) 
2034                 goto qls_hw_reset_exit;
2035
2036         /*
2037          * Wait for FIFO to empty
2038          */
2039         count = 5;
2040         while (count--) {
2041                 data = READ_REG32(ha, Q81_CTL_STATUS);
2042                 if (data & Q81_CTL_STATUS_NFE)
2043                         break;
2044                 qls_mdelay(__func__, 100);
2045         }
2046         if (count == 0) {
2047                 device_printf(dev, "%s: NFE bit not set\n", __func__);
2048                 goto qls_hw_reset_exit;
2049         }
2050
2051         count = 5;
2052         while (count--) {
2053                 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2054
2055                 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2056                         (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2057                         break;
2058                 qls_mdelay(__func__, 100);
2059         }
2060         if (count == 0)
2061                 goto qls_hw_reset_exit;
2062
2063         /*
2064          * Reset the NIC function
2065          */
2066         ret = qls_hw_nic_reset(ha);
2067         if (ret) 
2068                 goto qls_hw_reset_exit;
2069
2070         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2071
2072 qls_hw_reset_exit:
2073         if (ret)
2074                 device_printf(dev, "%s: failed\n", __func__);
2075                 
2076         return (ret);
2077 }
2078
2079 /*
2080  * MPI Related Functions
2081  */
2082 int
2083 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2084 {
2085         int ret;
2086
2087         ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2088                         reg, data);
2089         return (ret);
2090 }
2091
2092 int
2093 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2094 {
2095         int ret;
2096
2097         ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2098                         reg, data);
2099         return (ret);
2100 }
2101
2102 int
2103 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2104 {
2105         int ret;
2106
2107         if ((ha->pci_func & 0x1) == 0)
2108                 reg += Q81_FUNC0_MBX_OUT_REG0;
2109         else
2110                 reg += Q81_FUNC1_MBX_OUT_REG0;
2111
2112         ret = qls_mpi_risc_rd_reg(ha, reg, data);
2113
2114         return (ret);
2115 }
2116
2117 int
2118 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2119 {
2120         int ret;
2121
2122         if ((ha->pci_func & 0x1) == 0)
2123                 reg += Q81_FUNC0_MBX_IN_REG0;
2124         else
2125                 reg += Q81_FUNC1_MBX_IN_REG0;
2126
2127         ret = qls_mpi_risc_wr_reg(ha, reg, data);
2128
2129         return (ret);
2130 }
2131
2132 static int
2133 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2134         uint32_t *out_mbx, uint32_t o_count)
2135 {
2136         int i, ret = -1;
2137         uint32_t data32;
2138         uint32_t count = 50;
2139
2140         QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2141                 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2142
2143         data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2144
2145         if (data32 & Q81_CTL_HCS_HTR_INTR) {
2146                 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2147                         __func__, data32);
2148                 goto qls_mbx_cmd_exit;
2149         }
2150
2151         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2152                 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2153                 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2154                 goto qls_mbx_cmd_exit;
2155         }
2156
2157         ha->mbx_done = 0;
2158
2159         for (i = 0; i < i_count; i++) {
2160                 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2161
2162                 if (ret) {
2163                         device_printf(ha->pci_dev,
2164                                 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2165                                 i, *in_mbx);
2166                         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2167                         goto qls_mbx_cmd_exit;
2168                 }
2169
2170                 in_mbx++;
2171         }
2172         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2173
2174         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2175
2176         ret = -1;
2177         ha->mbx_done = 0;
2178
2179         while (count--) {
2180                 if (ha->flags.intr_enable == 0) {
2181                         data32 = READ_REG32(ha, Q81_CTL_STATUS);
2182
2183                         if (!(data32 & Q81_CTL_STATUS_PI)) {
2184                                 qls_mdelay(__func__, 100);
2185                                 continue;
2186                         }
2187
2188                         ret = qls_mbx_rd_reg(ha, 0, &data32);
2189
2190                         if (ret == 0 ) {
2191                                 if ((data32 & 0xF000) == 0x4000) {
2192                                         out_mbx[0] = data32;
2193
2194                                         for (i = 1; i < o_count; i++) {
2195                                                 ret = qls_mbx_rd_reg(ha, i,
2196                                                                 &data32);
2197                                                 if (ret) {
2198                                                         device_printf(
2199                                                                 ha->pci_dev,
2200                                                                 "%s: mbx_rd[%d]"
2201                                                                 " failed\n",
2202                                                                 __func__, i);
2203                                                         break;
2204                                                 }
2205                                                 out_mbx[i] = data32;
2206                                         }
2207                                         break;
2208                                 } else if ((data32 & 0xF000) == 0x8000) {
2209                                         count = 50;
2210                                         WRITE_REG32(ha,\
2211                                                 Q81_CTL_HOST_CMD_STATUS,\
2212                                                 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2213                                 }
2214                         }
2215                 } else {
2216                         if (ha->mbx_done) {
2217                                 for (i = 1; i < o_count; i++) {
2218                                         out_mbx[i] = ha->mbox[i];
2219                                 }
2220                                 ret = 0;
2221                                 break;
2222                         }
2223                 }
2224                 qls_mdelay(__func__, 1000);
2225         }
2226
2227 qls_mbx_cmd_exit:
2228
2229         if (ha->flags.intr_enable == 0) {
2230                 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2231                         Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2232         }
2233
2234         if (ret) {
2235                 ha->qla_initiate_recovery = 1;
2236         }
2237
2238         QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2239         return (ret);
2240 }
2241
2242 static int
2243 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2244 {
2245         uint32_t *mbox;
2246         device_t dev = ha->pci_dev;
2247
2248         mbox = ha->mbox;
2249         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2250
2251         mbox[0] = Q81_MBX_SET_MGMT_CTL;
2252         mbox[1] = t_ctrl;
2253
2254         if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2255                 device_printf(dev, "%s failed\n", __func__);
2256                 return (-1);
2257         }
2258
2259         if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2260                 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2261                         (mbox[0] == Q81_MBX_CMD_ERROR))){
2262                 return (0);
2263         }
2264         device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2265         return (-1);
2266
2267 }
2268
2269 static int
2270 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2271 {
2272         uint32_t *mbox;
2273         device_t dev = ha->pci_dev;
2274
2275         *t_status = 0;
2276
2277         mbox = ha->mbox;
2278         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2279
2280         mbox[0] = Q81_MBX_GET_MGMT_CTL;
2281
2282         if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2283                 device_printf(dev, "%s failed\n", __func__);
2284                 return (-1);
2285         }
2286
2287         *t_status = mbox[1];
2288
2289         return (0);
2290 }
2291
2292 static void
2293 qls_mbx_get_link_status(qla_host_t *ha)
2294 {
2295         uint32_t *mbox;
2296         device_t dev = ha->pci_dev;
2297
2298         mbox = ha->mbox;
2299         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2300
2301         mbox[0] = Q81_MBX_GET_LNK_STATUS;
2302
2303         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2304                 device_printf(dev, "%s failed\n", __func__);
2305                 return;
2306         }
2307
2308         ha->link_status                 = mbox[1];
2309         ha->link_down_info              = mbox[2];
2310         ha->link_hw_info                = mbox[3];
2311         ha->link_dcbx_counters          = mbox[4];
2312         ha->link_change_counters        = mbox[5];
2313
2314         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2315                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2316
2317         return;
2318 }
2319
2320 static void
2321 qls_mbx_about_fw(qla_host_t *ha)
2322 {
2323         uint32_t *mbox;
2324         device_t dev = ha->pci_dev;
2325
2326         mbox = ha->mbox;
2327         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2328
2329         mbox[0] = Q81_MBX_ABOUT_FW;
2330
2331         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2332                 device_printf(dev, "%s failed\n", __func__);
2333                 return;
2334         }
2335
2336         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2337                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2338 }
2339
2340 int
2341 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2342         uint32_t r_size)
2343 {
2344         bus_addr_t b_paddr;
2345         uint32_t *mbox;
2346         device_t dev = ha->pci_dev;
2347
2348         mbox = ha->mbox;
2349         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2350
2351         bzero(ha->mpi_dma.dma_b,(r_size << 2));
2352         b_paddr = ha->mpi_dma.dma_addr;
2353
2354         mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2355         mbox[1] = r_addr & 0xFFFF;
2356         mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2357         mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2358         mbox[4] = (r_size >> 16) & 0xFFFF;
2359         mbox[5] = r_size & 0xFFFF;
2360         mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2361         mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2362         mbox[8] = (r_addr >> 16) & 0xFFFF;
2363
2364         bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2365                 BUS_DMASYNC_PREREAD);
2366
2367         if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2368                 device_printf(dev, "%s failed\n", __func__);
2369                 return (-1);
2370         }
2371         if (mbox[0] != 0x4000) {
2372                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2373                 return (-1);
2374         } else {
2375                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2376                         BUS_DMASYNC_POSTREAD);
2377                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2378         }
2379
2380         return (0);
2381 }
2382
2383 int 
2384 qls_mpi_reset(qla_host_t *ha)
2385 {
2386         int             count;
2387         uint32_t        data;
2388         device_t        dev = ha->pci_dev;
2389
2390         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2391                 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2392
2393         count = 10;
2394         while (count--) {
2395                 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2396                 if (data & Q81_CTL_HCS_RISC_RESET) {
2397                         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2398                                 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2399                         break;
2400                 }
2401                 qls_mdelay(__func__, 10);
2402         }
2403         if (count == 0) {
2404                 device_printf(dev, "%s: failed\n", __func__);
2405                 return (-1);
2406         }
2407         return (0);
2408 }