]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/qlxge/qls_hw.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / qlxge / qls_hw.c
1 /*
2  * Copyright (c) 2013-2014 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: qls_hw.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  * Content: Contains Hardware dependant functions
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36
37
38 #include "qls_os.h"
39 #include "qls_hw.h"
40 #include "qls_def.h"
41 #include "qls_inline.h"
42 #include "qls_ver.h"
43 #include "qls_glbl.h"
44 #include "qls_dbg.h"
45
46 /*
47  * Static Functions
48  */
49 static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op);
50 static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac);
51 static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
52                 uint32_t add_mac, uint32_t index);
53
54 static int qls_init_rss(qla_host_t *ha);
55 static int qls_init_comp_queue(qla_host_t *ha, int cid);
56 static int qls_init_work_queue(qla_host_t *ha, int wid);
57 static int qls_init_fw_routing_table(qla_host_t *ha);
58 static int qls_hw_add_all_mcast(qla_host_t *ha);
59 static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta);
60 static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta);
61 static int qls_wait_for_flash_ready(qla_host_t *ha);
62
63 static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value);
64 static void qls_sem_unlock(qla_host_t *ha, uint32_t mask);
65
66 static void qls_free_tx_dma(qla_host_t *ha);
67 static int qls_alloc_tx_dma(qla_host_t *ha);
68 static void qls_free_rx_dma(qla_host_t *ha);
69 static int qls_alloc_rx_dma(qla_host_t *ha);
70 static void qls_free_mpi_dma(qla_host_t *ha);
71 static int qls_alloc_mpi_dma(qla_host_t *ha);
72 static void qls_free_rss_dma(qla_host_t *ha);
73 static int qls_alloc_rss_dma(qla_host_t *ha);
74
75 static int qls_flash_validate(qla_host_t *ha, const char *signature);
76
77
78 static int qls_wait_for_proc_addr_ready(qla_host_t *ha);
79 static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module,
80                 uint32_t reg, uint32_t *data);
81 static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module,
82                 uint32_t reg, uint32_t data);
83
84 static int qls_hw_reset(qla_host_t *ha);
85
86 /*
87  * MPI Related Functions
88  */
89 static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
90                 uint32_t *out_mbx, uint32_t o_count);
91 static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl);
92 static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status);
93 static void qls_mbx_get_link_status(qla_host_t *ha);
94 static void qls_mbx_about_fw(qla_host_t *ha);
95
96 int
97 qls_get_msix_count(qla_host_t *ha)
98 {
99         return (ha->num_rx_rings);
100 }
101
102 static int
103 qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS)
104 {
105         int err = 0, ret;
106         qla_host_t *ha;
107
108         err = sysctl_handle_int(oidp, &ret, 0, req);
109
110         if (err || !req->newptr)
111                 return (err);
112
113
114         if (ret == 1) {
115                 ha = (qla_host_t *)arg1;
116                 qls_mpi_core_dump(ha);
117         }
118         return (err);
119 }
120
121 static int
122 qls_syctl_link_status(SYSCTL_HANDLER_ARGS)
123 {
124         int err = 0, ret;
125         qla_host_t *ha;
126
127         err = sysctl_handle_int(oidp, &ret, 0, req);
128
129         if (err || !req->newptr)
130                 return (err);
131
132
133         if (ret == 1) {
134                 ha = (qla_host_t *)arg1;
135                 qls_mbx_get_link_status(ha);
136                 qls_mbx_about_fw(ha);
137         }
138         return (err);
139 }
140
141 void
142 qls_hw_add_sysctls(qla_host_t *ha)
143 {
144         device_t        dev;
145
146         dev = ha->pci_dev;
147
148         ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS;
149
150         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
151                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
152                 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings,
153                 ha->num_rx_rings, "Number of Completion Queues");
154
155         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
156                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
157                 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings,
158                 ha->num_tx_rings, "Number of Transmit Rings");
159
160         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
161                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
162                 OID_AUTO, "mpi_dump", CTLTYPE_INT | CTLFLAG_RW,
163                 (void *)ha, 0,
164                 qls_syctl_mpi_dump, "I", "MPI Dump");
165
166         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
167                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
169                 (void *)ha, 0,
170                 qls_syctl_link_status, "I", "Link Status");
171 }
172
173 /*
174  * Name: qls_free_dma
175  * Function: Frees the DMA'able memory allocated in qls_alloc_dma()
176  */
177 void
178 qls_free_dma(qla_host_t *ha)
179 {
180         qls_free_rss_dma(ha);
181         qls_free_mpi_dma(ha);
182         qls_free_tx_dma(ha);
183         qls_free_rx_dma(ha);
184         return;
185 }
186
187 /*
188  * Name: qls_alloc_dma
189  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
190  */
191 int
192 qls_alloc_dma(qla_host_t *ha)
193 {
194         if (qls_alloc_rx_dma(ha))
195                 return (-1);
196
197         if (qls_alloc_tx_dma(ha)) {
198                 qls_free_rx_dma(ha);
199                 return (-1);
200         }
201
202         if (qls_alloc_mpi_dma(ha)) {
203                 qls_free_tx_dma(ha);
204                 qls_free_rx_dma(ha);
205                 return (-1);
206         }
207
208         if (qls_alloc_rss_dma(ha)) {
209                 qls_free_mpi_dma(ha);
210                 qls_free_tx_dma(ha);
211                 qls_free_rx_dma(ha);
212                 return (-1);
213         }
214
215         return (0);
216 }
217
218
219 static int
220 qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op)
221 {
222         uint32_t data32;
223         uint32_t count = 3;
224
225         while (count--) {
226                 data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX);
227
228                 if (data32 & op)
229                         return (0);
230
231                 QLA_USEC_DELAY(100);
232         }
233         ha->qla_initiate_recovery = 1;
234         return (-1);
235 }
236
237 /*
238  * Name: qls_config_unicast_mac_addr
239  * Function: binds/unbinds a unicast MAC address to the interface.
240  */
241 static int
242 qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac)
243 {
244         int ret = 0;
245         uint32_t mac_upper = 0;
246         uint32_t mac_lower = 0;
247         uint32_t value = 0, index;
248
249         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
250                 Q81_CTL_SEM_SET_MAC_SERDES)) {
251                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
252                 return(-1);
253         }
254
255         if (add_mac) {
256                 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1];
257                 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) |
258                                 (ha->mac_addr[4] << 8) | ha->mac_addr[5];
259         }
260         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
261         if (ret)
262                 goto qls_config_unicast_mac_addr_exit;
263         
264         index = 128 * (ha->pci_func & 0x1); /* index */
265
266         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
267                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC;
268
269         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
270         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
271
272         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
273         if (ret)
274                 goto qls_config_unicast_mac_addr_exit;
275
276         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
277                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1;
278
279         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
280         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
281
282         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
283         if (ret)
284                 goto qls_config_unicast_mac_addr_exit;
285
286         value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
287                 Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2;
288
289         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
290
291         value = Q81_CAM_MAC_OFF2_ROUTE_NIC |
292                         ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) |
293                         (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT);
294
295         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value);
296
297 qls_config_unicast_mac_addr_exit:
298         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
299         return (ret);
300 }
301
302 /*
303  * Name: qls_config_mcast_mac_addr
304  * Function: binds/unbinds a multicast MAC address to the interface.
305  */
306 static int
307 qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
308         uint32_t index)
309 {
310         int ret = 0;
311         uint32_t mac_upper = 0;
312         uint32_t mac_lower = 0;
313         uint32_t value = 0;
314
315         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES,
316                 Q81_CTL_SEM_SET_MAC_SERDES)) {
317                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
318                 return(-1);
319         }
320
321         if (add_mac) {
322                 mac_upper = (mac_addr[0] << 8) | mac_addr[1];
323                 mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
324                                 (mac_addr[4] << 8) | mac_addr[5];
325         }
326         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
327         if (ret)
328                 goto qls_config_mcast_mac_addr_exit;
329         
330         value = Q81_CTL_MAC_PROTO_AI_E |
331                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
332                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ;
333
334         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
335         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower);
336
337         ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW);
338         if (ret)
339                 goto qls_config_mcast_mac_addr_exit;
340
341         value = Q81_CTL_MAC_PROTO_AI_E |
342                         (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) |
343                         Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1;
344
345         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value);
346         WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper);
347
348 qls_config_mcast_mac_addr_exit:
349         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES);
350
351         return (ret);
352 }
353
354 /*
355  * Name: qls_set_mac_rcv_mode
356  * Function: Enable/Disable AllMulticast and Promiscous Modes.
357  */
358 static int
359 qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op)
360 {
361         uint32_t data32;
362         uint32_t count = 3;
363
364         while (count--) {
365                 data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX);
366
367                 if (data32 & op)
368                         return (0);
369
370                 QLA_USEC_DELAY(100);
371         }
372         ha->qla_initiate_recovery = 1;
373         return (-1);
374 }
375
376 static int
377 qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data)
378 {
379         int ret = 0;
380
381         ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW);
382
383         if (ret) {
384                 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n",
385                         __func__, index, data);
386                 goto qls_load_route_idx_reg_exit;
387         }
388
389         
390         WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index);
391         WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data);
392
393 qls_load_route_idx_reg_exit:
394         return (ret);
395 }
396
397 static int
398 qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data)
399 {
400         int ret = 0;
401
402         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
403                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
404                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
405                 return(-1);
406         }
407
408         ret = qls_load_route_idx_reg(ha, index, data);
409
410         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
411
412         return (ret);
413 }
414
415 static int
416 qls_clear_routing_table(qla_host_t *ha)
417 {
418         int i, ret = 0;
419
420         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
421                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
422                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
423                 return(-1);
424         }
425
426         for (i = 0; i < 16; i++) {
427                 ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK|
428                         (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0);
429                 if (ret)
430                         break;
431         }
432
433         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
434
435         return (ret);
436 }
437
438 int
439 qls_set_promisc(qla_host_t *ha)
440 {
441         int ret;
442
443         ret = qls_load_route_idx_reg_locked(ha,
444                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
445                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ),
446                         Q81_CTL_RD_VALID_PKT);
447         return (ret);
448 }
449
450 void
451 qls_reset_promisc(qla_host_t *ha)
452 {
453         int ret;
454
455         ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
456                         Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0);
457         return;
458 }
459
460 int
461 qls_set_allmulti(qla_host_t *ha)
462 {
463         int ret;
464
465         ret = qls_load_route_idx_reg_locked(ha,
466                         (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK |
467                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ),
468                         Q81_CTL_RD_MCAST);
469         return (ret);
470 }
471
472 void
473 qls_reset_allmulti(qla_host_t *ha)
474 {
475         int ret;
476
477         ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK |
478                         Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0);
479         return;
480 }
481
482
483 static int
484 qls_init_fw_routing_table(qla_host_t *ha)
485 {
486         int ret = 0;
487
488         ret = qls_clear_routing_table(ha);
489         if (ret)
490                 return (-1);
491
492         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG,
493                 Q81_CTL_SEM_SET_RIDX_DATAREG)) {
494                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
495                 return(-1);
496         }
497
498         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP |
499                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR),
500                         Q81_CTL_RD_ERROR_PKT);
501         if (ret)
502                 goto qls_init_fw_routing_table_exit;
503
504         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
505                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST),
506                         Q81_CTL_RD_BCAST);
507         if (ret)
508                 goto qls_init_fw_routing_table_exit;
509
510         if (ha->num_rx_rings > 1 ) {
511                 ret = qls_load_route_idx_reg(ha,
512                                 (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS |
513                                 Q81_CTL_RI_TYPE_NICQMASK |
514                                 Q81_CTL_RI_IDX_RSS_MATCH),
515                                 Q81_CTL_RD_RSS_MATCH);
516                 if (ret)
517                         goto qls_init_fw_routing_table_exit;
518         }
519
520         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
521                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH),
522                         Q81_CTL_RD_MCAST_REG_MATCH);
523         if (ret)
524                 goto qls_init_fw_routing_table_exit;
525
526         ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ |
527                         Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT),
528                         Q81_CTL_RD_CAM_HIT);
529         if (ret)
530                 goto qls_init_fw_routing_table_exit;
531
532 qls_init_fw_routing_table_exit:
533         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG);
534         return (ret);
535 }
536
537 static int
538 qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac)
539 {
540         struct ether_vlan_header *eh;
541         struct ip *ip;
542         struct ip6_hdr *ip6;
543         struct tcphdr *th;
544         uint32_t ehdrlen, ip_hlen;
545         int ret = 0;
546         uint16_t etype;
547         device_t dev;
548         uint8_t buf[sizeof(struct ip6_hdr)];
549
550         dev = ha->pci_dev;
551
552         eh = mtod(mp, struct ether_vlan_header *);
553
554         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
555                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
556                 etype = ntohs(eh->evl_proto);
557         } else {
558                 ehdrlen = ETHER_HDR_LEN;
559                 etype = ntohs(eh->evl_encap_proto);
560         }
561
562         switch (etype) {
563                 case ETHERTYPE_IP:
564                         ip = (struct ip *)(mp->m_data + ehdrlen);
565
566                         ip_hlen = sizeof (struct ip);
567
568                         if (mp->m_len < (ehdrlen + ip_hlen)) {
569                                 m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
570                                 ip = (struct ip *)buf;
571                         }
572                         tx_mac->opcode = Q81_IOCB_TX_TSO;
573                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ;
574
575                         tx_mac->phdr_offsets = ehdrlen;
576
577                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
578                                                         Q81_TX_TSO_PHDR_SHIFT);
579
580                         ip->ip_sum = 0;
581
582                         if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
583                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO;
584                                 
585                                 th = (struct tcphdr *)(ip + 1);
586
587                                 th->th_sum = in_pseudo(ip->ip_src.s_addr,
588                                                 ip->ip_dst.s_addr,
589                                                 htons(IPPROTO_TCP));
590                                 tx_mac->mss = mp->m_pkthdr.tso_segsz;
591                                 tx_mac->phdr_length = ip_hlen + ehdrlen +
592                                                         (th->th_off << 2);
593                                 break;
594                         }
595                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
596
597
598                         if (ip->ip_p == IPPROTO_TCP) {
599                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
600                         } else if (ip->ip_p == IPPROTO_UDP) {
601                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
602                         }
603                 break;
604
605                 case ETHERTYPE_IPV6:
606                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
607
608                         ip_hlen = sizeof(struct ip6_hdr);
609
610                         if (mp->m_len < (ehdrlen + ip_hlen)) {
611                                 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
612                                         buf);
613                                 ip6 = (struct ip6_hdr *)buf;
614                         }
615
616                         tx_mac->opcode = Q81_IOCB_TX_TSO;
617                         tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ;
618                         tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ;
619
620                         tx_mac->phdr_offsets = ehdrlen;
621                         tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) <<
622                                                         Q81_TX_TSO_PHDR_SHIFT);
623
624                         if (ip6->ip6_nxt == IPPROTO_TCP) {
625                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC;
626                         } else if (ip6->ip6_nxt == IPPROTO_UDP) {
627                                 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC;
628                         }
629                 break;
630
631                 default:
632                         ret = -1;
633                 break;
634         }
635
636         return (ret);
637 }
638
639 #define QLA_TX_MIN_FREE 2
640 int
641 qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx)
642 {
643         uint32_t txr_done, txr_next;
644
645         txr_done = ha->tx_ring[txr_idx].txr_done;
646         txr_next = ha->tx_ring[txr_idx].txr_next;
647
648         if (txr_done == txr_next) {
649                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
650         } else if (txr_done > txr_next) {
651                 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
652         } else {
653                 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
654                         txr_done - txr_next;
655         }
656
657         if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
658                 return (-1);
659
660         return (0);
661 }
662
663 /*
664  * Name: qls_hw_send
665  * Function: Transmits a packet. It first checks if the packet is a
666  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
667  *      offload. If either of these creteria are not met, it is transmitted
668  *      as a regular ethernet frame.
669  */
670 int
671 qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
672         uint32_t txr_next,  struct mbuf *mp, uint32_t txr_idx)
673 {
674         q81_tx_mac_t *tx_mac;
675         q81_txb_desc_t *tx_desc;
676         uint32_t total_length = 0;
677         uint32_t i;
678         device_t dev;
679         int ret = 0;
680
681         dev = ha->pci_dev;
682
683         total_length = mp->m_pkthdr.len;
684
685         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
686                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
687                         __func__, total_length);
688                 return (-1);
689         }
690
691         if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
692                 if (qls_hw_tx_done(ha, txr_idx)) {
693                         device_printf(dev, "%s: tx_free[%d] = %d\n",
694                                 __func__, txr_idx,
695                                 ha->tx_ring[txr_idx].txr_free);
696                         return (-1);
697                 }
698         }
699
700         tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
701
702         bzero(tx_mac, sizeof(q81_tx_mac_t));
703         
704         if ((mp->m_pkthdr.csum_flags &
705                         (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) {
706
707                 ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac);
708                 if (ret) 
709                         return (EINVAL);
710
711                 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
712                         ha->tx_ring[txr_idx].tx_tso_frames++;
713                 else
714                         ha->tx_ring[txr_idx].tx_frames++;
715                         
716         } else { 
717                 tx_mac->opcode = Q81_IOCB_TX_MAC;
718         }
719
720         if (mp->m_flags & M_VLANTAG) {
721
722                 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag;
723                 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V;
724
725                 ha->tx_ring[txr_idx].tx_vlan_frames++;
726         }
727
728         tx_mac->frame_length = total_length;
729
730         tx_mac->tid_lo = txr_next;
731
732         if (nsegs <= MAX_TX_MAC_DESC) {
733
734                 QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length,
735                         tx_mac->tid_lo));
736
737                 for (i = 0; i < nsegs; i++) {
738                         tx_mac->txd[i].baddr = segs->ds_addr;
739                         tx_mac->txd[i].length = segs->ds_len;
740                         segs++;
741                 }
742                 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E;
743
744         } else {
745                 QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length,
746                         tx_mac->tid_lo));
747
748                 tx_mac->txd[0].baddr =
749                         ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
750                 tx_mac->txd[0].length =
751                         nsegs * (sizeof(q81_txb_desc_t));
752                 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C;
753
754                 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
755
756                 for (i = 0; i < nsegs; i++) {
757                         tx_desc->baddr = segs->ds_addr;
758                         tx_desc->length = segs->ds_len;
759
760                         if (i == (nsegs -1))
761                                 tx_desc->flags = Q81_RXB_DESC_FLAGS_E;
762                         else
763                                 tx_desc->flags = 0;
764
765                         segs++;
766                         tx_desc++;
767                 }
768         }
769         txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
770         ha->tx_ring[txr_idx].txr_next = txr_next;
771
772         ha->tx_ring[txr_idx].txr_free--;
773
774         Q81_WR_WQ_PROD_IDX(txr_idx, txr_next);
775
776         return (0);
777 }
778
779 /*
780  * Name: qls_del_hw_if
781  * Function: Destroys the hardware specific entities corresponding to an
782  *      Ethernet Interface
783  */
784 void
785 qls_del_hw_if(qla_host_t *ha)
786 {
787         uint32_t value;
788         int i;
789         //int  count;
790
791         if (ha->hw_init == 0) {
792                 qls_hw_reset(ha);
793                 return;
794         }
795
796         for (i = 0;  i < ha->num_tx_rings; i++) {
797                 Q81_SET_WQ_INVALID(i); 
798         }
799         for (i = 0;  i < ha->num_rx_rings; i++) {
800                 Q81_SET_CQ_INVALID(i);
801         }
802
803         for (i = 0; i < ha->num_rx_rings; i++) {
804                 Q81_DISABLE_INTR(ha, i); /* MSI-x i */
805         }
806
807         value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT);
808         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
809
810         value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT);
811         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
812         ha->flags.intr_enable = 0;
813
814         qls_hw_reset(ha);
815
816         return;
817 }
818
819 /*
820  * Name: qls_init_hw_if
821  * Function: Creates the hardware specific entities corresponding to an
822  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
823  *      corresponding to the interface. Enables LRO if allowed.
824  */
825 int
826 qls_init_hw_if(qla_host_t *ha)
827 {
828         device_t        dev;
829         uint32_t        value;
830         int             ret = 0;
831         int             i;
832
833
834         QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__));
835
836         dev = ha->pci_dev;
837
838         ret = qls_hw_reset(ha);
839         if (ret)
840                 goto qls_init_hw_if_exit;
841
842         ha->vm_pgsize = 4096;
843
844         /* Enable FAE and EFE bits in System Register */
845         value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE;
846         value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value;
847
848         WRITE_REG32(ha, Q81_CTL_SYSTEM, value);
849
850         /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */
851         value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT);
852         WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value);
853
854         /* Function Specific Control Register - Set Page Size and Enable NIC */
855         value = Q81_CTL_FUNC_SPECIFIC_FE |
856                 Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK |
857                 Q81_CTL_FUNC_SPECIFIC_EPC_O |
858                 Q81_CTL_FUNC_SPECIFIC_EPC_I |
859                 Q81_CTL_FUNC_SPECIFIC_EC;
860         value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) | 
861                         Q81_CTL_FUNC_SPECIFIC_FE |
862                         Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K |
863                         Q81_CTL_FUNC_SPECIFIC_EPC_O |
864                         Q81_CTL_FUNC_SPECIFIC_EPC_I |
865                         Q81_CTL_FUNC_SPECIFIC_EC;
866
867         WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value);
868
869         /* Interrupt Mask Register */
870         value = Q81_CTL_INTRM_PI;
871         value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value;
872         
873         WRITE_REG32(ha, Q81_CTL_INTR_MASK, value);
874
875         /* Initialiatize Completion Queue */
876         for (i = 0; i < ha->num_rx_rings; i++) {
877                 ret = qls_init_comp_queue(ha, i);
878                 if (ret)
879                         goto qls_init_hw_if_exit;
880         }
881
882         if (ha->num_rx_rings > 1 ) {
883                 ret = qls_init_rss(ha);
884                 if (ret)
885                         goto qls_init_hw_if_exit;
886         }
887
888         /* Initialize Work Queue */
889
890         for (i = 0; i < ha->num_tx_rings; i++) {
891                 ret = qls_init_work_queue(ha, i);
892                 if (ret)
893                         goto qls_init_hw_if_exit;
894         }
895
896         if (ret)
897                 goto qls_init_hw_if_exit;
898
899         /* Set up CAM RAM with MAC Address */
900         ret = qls_config_unicast_mac_addr(ha, 1);
901         if (ret)
902                 goto qls_init_hw_if_exit;
903
904         ret = qls_hw_add_all_mcast(ha);
905         if (ret)
906                 goto qls_init_hw_if_exit;
907
908         /* Initialize Firmware Routing Table */
909         ret = qls_init_fw_routing_table(ha);
910         if (ret)
911                 goto qls_init_hw_if_exit;
912
913         /* Get Chip Revision ID */
914         ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID);
915
916         /* Enable Global Interrupt */
917         value = Q81_CTL_INTRE_EI;
918         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
919
920         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
921
922         /* Enable Interrupt Handshake Disable */
923         value = Q81_CTL_INTRE_IHD;
924         value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value;
925
926         WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value);
927
928         /* Enable Completion Interrupt */
929
930         ha->flags.intr_enable = 1;
931
932         for (i = 0; i < ha->num_rx_rings; i++) {
933                 Q81_ENABLE_INTR(ha, i); /* MSI-x i */
934         }
935
936         ha->hw_init = 1;
937
938         qls_mbx_get_link_status(ha);
939
940         QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__,
941                 ha->rx_ring[0].cq_db_offset));
942         QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__,
943                 ha->tx_ring[0].wq_db_offset));
944
945         for (i = 0; i < ha->num_rx_rings; i++) {
946
947                 Q81_WR_CQ_CONS_IDX(i, 0);
948                 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in);
949                 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in);
950
951                 QL_DPRINT2((dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]"
952                         "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__,
953                         Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i),
954                         Q81_RD_SBQ_IDX(i)));
955         }
956
957         for (i = 0; i < ha->num_rx_rings; i++) {
958                 Q81_SET_CQ_VALID(i);
959         }
960
961 qls_init_hw_if_exit:
962         QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__));
963         return (ret);
964 }
965
966 static int
967 qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value)
968 {
969         uint32_t data32;
970         uint32_t count = 3;
971
972         while (count--) {
973
974                 data32 = READ_REG32(ha, Q81_CTL_CONFIG);
975
976                 if ((data32 & bits) == value)
977                         return (0);
978                 
979                 QLA_USEC_DELAY(100);
980         }
981         ha->qla_initiate_recovery = 1;
982         device_printf(ha->pci_dev, "%s: failed\n", __func__);
983         return (-1);
984 }
985
986 static uint8_t q81_hash_key[] = {
987                         0xda, 0x56, 0x5a, 0x6d,
988                         0xc2, 0x0e, 0x5b, 0x25,
989                         0x3d, 0x25, 0x67, 0x41,
990                         0xb0, 0x8f, 0xa3, 0x43,
991                         0xcb, 0x2b, 0xca, 0xd0,
992                         0xb4, 0x30, 0x7b, 0xae,
993                         0xa3, 0x2d, 0xcb, 0x77,
994                         0x0c, 0xf2, 0x30, 0x80,
995                         0x3b, 0xb7, 0x42, 0x6a,
996                         0xfa, 0x01, 0xac, 0xbe };
997
998 static int
999 qls_init_rss(qla_host_t *ha)
1000 {
1001         q81_rss_icb_t   *rss_icb;
1002         int             ret = 0;
1003         int             i;
1004         uint32_t        value;
1005
1006         rss_icb = ha->rss_dma.dma_b;
1007
1008         bzero(rss_icb, sizeof (q81_rss_icb_t));
1009
1010         rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K |
1011                                 Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI |
1012                                 Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM |
1013                                 Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6; 
1014
1015         rss_icb->mask = 0x3FF;
1016
1017         for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) {
1018                 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1));
1019         }
1020
1021         memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40);
1022         memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16);
1023
1024         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1025
1026         if (ret)
1027                 goto qls_init_rss_exit;
1028
1029         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1030
1031         if (ret) {
1032                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1033                 goto qls_init_rss_exit;
1034         }
1035
1036         value = (uint32_t)ha->rss_dma.dma_addr;
1037         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1038
1039         value = (uint32_t)(ha->rss_dma.dma_addr >> 32);
1040         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1041
1042         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1043
1044         value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) |
1045                         Q81_CTL_CONFIG_LR;
1046
1047         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1048
1049         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0);
1050
1051 qls_init_rss_exit:
1052         return (ret);
1053 }
1054
1055 static int
1056 qls_init_comp_queue(qla_host_t *ha, int cid)
1057 {
1058         q81_cq_icb_t    *cq_icb;
1059         qla_rx_ring_t   *rxr;
1060         int             ret = 0;
1061         uint32_t        value;
1062
1063         rxr = &ha->rx_ring[cid];
1064
1065         rxr->cq_db_offset = ha->vm_pgsize * (128 + cid);
1066
1067         cq_icb = rxr->cq_icb_vaddr;
1068
1069         bzero(cq_icb, sizeof (q81_cq_icb_t));
1070
1071         cq_icb->msix_vector = cid;
1072         cq_icb->flags = Q81_CQ_ICB_FLAGS_LC |
1073                         Q81_CQ_ICB_FLAGS_LI |
1074                         Q81_CQ_ICB_FLAGS_LL |
1075                         Q81_CQ_ICB_FLAGS_LS |
1076                         Q81_CQ_ICB_FLAGS_LV;
1077         
1078         cq_icb->length_v = NUM_CQ_ENTRIES;
1079
1080         cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF);
1081         cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF;
1082
1083         cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF);
1084         cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF;
1085
1086         cq_icb->pkt_idelay = 10;
1087         cq_icb->idelay = 100;
1088
1089         cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF);
1090         cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1091
1092         cq_icb->lbq_bsize = QLA_LGB_SIZE;
1093         cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES;
1094
1095         cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF);
1096         cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF;
1097
1098         cq_icb->sbq_bsize = (uint16_t)ha->msize;
1099         cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES;
1100
1101         QL_DUMP_CQ(ha);
1102
1103         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1104
1105         if (ret)
1106                 goto qls_init_comp_queue_exit;
1107
1108         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1109
1110         if (ret) {
1111                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1112                 goto qls_init_comp_queue_exit;
1113         }
1114
1115         value = (uint32_t)rxr->cq_icb_paddr;
1116         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1117
1118         value = (uint32_t)(rxr->cq_icb_paddr >> 32);
1119         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1120
1121         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1122
1123         value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1124         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ;
1125         value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1126         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1127
1128         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0);
1129
1130         rxr->cq_next = 0;
1131         rxr->lbq_next = rxr->lbq_free = 0;
1132         rxr->sbq_next = rxr->sbq_free = 0;
1133         rxr->rx_free = rxr->rx_next = 0;
1134         rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF;
1135         rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF;
1136
1137 qls_init_comp_queue_exit:
1138         return (ret);
1139 }
1140
1141 static int
1142 qls_init_work_queue(qla_host_t *ha, int wid)
1143 {
1144         q81_wq_icb_t    *wq_icb;
1145         qla_tx_ring_t   *txr;
1146         int             ret = 0;
1147         uint32_t        value;
1148
1149         txr = &ha->tx_ring[wid];
1150
1151         txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1
1152                                                 + (ha->vm_pgsize * wid));
1153
1154         txr->wq_db_offset = (ha->vm_pgsize * wid);
1155
1156         wq_icb = txr->wq_icb_vaddr;
1157         bzero(wq_icb, sizeof (q81_wq_icb_t));
1158
1159         wq_icb->length_v = NUM_TX_DESCRIPTORS  |
1160                                 Q81_WQ_ICB_VALID;
1161
1162         wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI |
1163                         Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC;
1164
1165         wq_icb->wqcqid_rss = wid;
1166
1167         wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF;
1168         wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF;
1169
1170         wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF;
1171         wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF;
1172
1173         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1174
1175         if (ret)
1176                 goto qls_init_wq_exit;
1177
1178         ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB);
1179
1180         if (ret) {
1181                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1182                 goto qls_init_wq_exit;
1183         }
1184
1185         value = (uint32_t)txr->wq_icb_paddr;
1186         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value);
1187
1188         value = (uint32_t)(txr->wq_icb_paddr >> 32);
1189         WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value);
1190
1191         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB);
1192
1193         value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK;
1194         value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ;
1195         value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT);
1196         WRITE_REG32(ha, Q81_CTL_CONFIG, value);
1197
1198         ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0);
1199
1200         txr->txr_free = NUM_TX_DESCRIPTORS;
1201         txr->txr_next = 0;
1202         txr->txr_done = 0;
1203
1204 qls_init_wq_exit:
1205         return (ret);
1206 }
1207
1208 static int
1209 qls_hw_add_all_mcast(qla_host_t *ha)
1210 {
1211         int i, nmcast;
1212
1213         nmcast = ha->nmcast;
1214
1215         for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
1216                 if ((ha->mcast[i].addr[0] != 0) || 
1217                         (ha->mcast[i].addr[1] != 0) ||
1218                         (ha->mcast[i].addr[2] != 0) ||
1219                         (ha->mcast[i].addr[3] != 0) ||
1220                         (ha->mcast[i].addr[4] != 0) ||
1221                         (ha->mcast[i].addr[5] != 0)) {
1222
1223                         if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr,
1224                                 1, i)) {
1225                                 device_printf(ha->pci_dev, "%s: failed\n",
1226                                         __func__);
1227                                 return (-1);
1228                         }
1229
1230                         nmcast--;
1231                 }
1232         }
1233         return 0;
1234 }
1235
1236 static int
1237 qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
1238 {
1239         int i;
1240
1241         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1242
1243                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
1244                         return 0; /* its been already added */
1245         }
1246
1247         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1248
1249                 if ((ha->mcast[i].addr[0] == 0) && 
1250                         (ha->mcast[i].addr[1] == 0) &&
1251                         (ha->mcast[i].addr[2] == 0) &&
1252                         (ha->mcast[i].addr[3] == 0) &&
1253                         (ha->mcast[i].addr[4] == 0) &&
1254                         (ha->mcast[i].addr[5] == 0)) {
1255
1256                         if (qls_config_mcast_mac_addr(ha, mta, 1, i))
1257                                 return (-1);
1258
1259                         bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN);
1260                         ha->nmcast++;   
1261
1262                         return 0;
1263                 }
1264         }
1265         return 0;
1266 }
1267
1268 static int
1269 qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
1270 {
1271         int i;
1272
1273         for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
1274                 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
1275
1276                         if (qls_config_mcast_mac_addr(ha, mta, 0, i))
1277                                 return (-1);
1278
1279                         ha->mcast[i].addr[0] = 0;
1280                         ha->mcast[i].addr[1] = 0;
1281                         ha->mcast[i].addr[2] = 0;
1282                         ha->mcast[i].addr[3] = 0;
1283                         ha->mcast[i].addr[4] = 0;
1284                         ha->mcast[i].addr[5] = 0;
1285
1286                         ha->nmcast--;   
1287
1288                         return 0;
1289                 }
1290         }
1291         return 0;
1292 }
1293
1294 /*
1295  * Name: qls_hw_set_multi
1296  * Function: Sets the Multicast Addresses provided the host O.S into the
1297  *      hardware (for the given interface)
1298  */
1299 void
1300 qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1301         uint32_t add_mac)
1302 {
1303         int i;
1304
1305         for (i = 0; i < mcnt; i++) {
1306                 if (add_mac) {
1307                         if (qls_hw_add_mcast(ha, mta))
1308                                 break;
1309                 } else {
1310                         if (qls_hw_del_mcast(ha, mta))
1311                                 break;
1312                 }
1313                         
1314                 mta += Q8_MAC_ADDR_LEN;
1315         }
1316         return;
1317 }
1318
1319 void
1320 qls_update_link_state(qla_host_t *ha)
1321 {
1322         uint32_t link_state;
1323         uint32_t prev_link_state;
1324
1325         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1326                 ha->link_up = 0;
1327                 return;
1328         }
1329         link_state = READ_REG32(ha, Q81_CTL_STATUS);
1330
1331         prev_link_state =  ha->link_up;
1332
1333         if ((ha->pci_func & 0x1) == 0)
1334                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0);
1335         else
1336                 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0);
1337
1338         if (prev_link_state !=  ha->link_up) {
1339
1340
1341                 if (ha->link_up) {
1342                         if_link_state_change(ha->ifp, LINK_STATE_UP);
1343                 } else {
1344                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1345                 }
1346         }
1347         return;
1348 }
1349
1350 static void
1351 qls_free_tx_ring_dma(qla_host_t *ha, int r_idx)
1352 {
1353         if (ha->tx_ring[r_idx].flags.wq_dma) {
1354                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
1355                 ha->tx_ring[r_idx].flags.wq_dma = 0;
1356         }
1357
1358         if (ha->tx_ring[r_idx].flags.privb_dma) {
1359                 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
1360                 ha->tx_ring[r_idx].flags.privb_dma = 0;
1361         }
1362         return;
1363 }
1364
1365 static void
1366 qls_free_tx_dma(qla_host_t *ha)
1367 {
1368         int i, j;
1369         qla_tx_buf_t *txb;
1370
1371         for (i = 0; i < ha->num_tx_rings; i++) {
1372
1373                 qls_free_tx_ring_dma(ha, i);
1374
1375                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1376
1377                         txb = &ha->tx_ring[i].tx_buf[j];
1378
1379                         if (txb->map) {
1380                                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1381                         }
1382                 }
1383         }
1384
1385         if (ha->tx_tag != NULL) {
1386                 bus_dma_tag_destroy(ha->tx_tag);
1387                 ha->tx_tag = NULL;
1388         }
1389
1390         return;
1391 }
1392
1393 static int
1394 qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx)
1395 {
1396         int             ret = 0, i;
1397         uint8_t         *v_addr;
1398         bus_addr_t      p_addr;
1399         qla_tx_buf_t    *txb;
1400         device_t        dev = ha->pci_dev;
1401
1402         ha->tx_ring[ridx].wq_dma.alignment = 8;
1403         ha->tx_ring[ridx].wq_dma.size =
1404                 NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t));
1405
1406         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
1407
1408         if (ret) {
1409                 device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx);
1410                 goto qls_alloc_tx_ring_dma_exit;
1411         }
1412         ha->tx_ring[ridx].flags.wq_dma = 1;
1413
1414         ha->tx_ring[ridx].privb_dma.alignment = 8;
1415         ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
1416
1417         ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
1418
1419         if (ret) {
1420                 device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx);
1421                 goto qls_alloc_tx_ring_dma_exit;
1422         }
1423
1424         ha->tx_ring[ridx].flags.privb_dma = 1;
1425
1426         ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
1427         ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
1428
1429         v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
1430         p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
1431
1432         ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
1433         ha->tx_ring[ridx].wq_icb_paddr = p_addr;
1434
1435         ha->tx_ring[ridx].txr_cons_vaddr =
1436                 (uint32_t *)(v_addr + (PAGE_SIZE >> 1));
1437         ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
1438
1439         v_addr = v_addr + (PAGE_SIZE >> 1);
1440         p_addr = p_addr + (PAGE_SIZE >> 1);
1441
1442         txb = ha->tx_ring[ridx].tx_buf;
1443
1444         for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1445
1446                 txb[i].oal_vaddr = v_addr;
1447                 txb[i].oal_paddr = p_addr;
1448
1449                 v_addr = v_addr + QLA_OAL_BLK_SIZE;
1450                 p_addr = p_addr + QLA_OAL_BLK_SIZE;
1451         }
1452
1453 qls_alloc_tx_ring_dma_exit:
1454         return (ret);
1455 }
1456
1457 static int
1458 qls_alloc_tx_dma(qla_host_t *ha)
1459 {
1460         int     i, j;
1461         int     ret = 0;
1462         qla_tx_buf_t *txb;
1463
1464         if (bus_dma_tag_create(NULL,    /* parent */
1465                 1, 0,    /* alignment, bounds */
1466                 BUS_SPACE_MAXADDR,       /* lowaddr */
1467                 BUS_SPACE_MAXADDR,       /* highaddr */
1468                 NULL, NULL,      /* filter, filterarg */
1469                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1470                 QLA_MAX_SEGMENTS,        /* nsegments */
1471                 PAGE_SIZE,        /* maxsegsize */
1472                 BUS_DMA_ALLOCNOW,        /* flags */
1473                 NULL,    /* lockfunc */
1474                 NULL,    /* lockfuncarg */
1475                 &ha->tx_tag)) {
1476                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1477                         __func__);
1478                 return (ENOMEM);
1479         }
1480
1481         for (i = 0; i < ha->num_tx_rings; i++) {
1482
1483                 ret = qls_alloc_tx_ring_dma(ha, i);
1484
1485                 if (ret) {
1486                         qls_free_tx_dma(ha);
1487                         break;
1488                 }
1489
1490                 for (j = 0; j < NUM_TX_DESCRIPTORS; j++) {
1491
1492                         txb = &ha->tx_ring[i].tx_buf[j];
1493
1494                         ret = bus_dmamap_create(ha->tx_tag,
1495                                 BUS_DMA_NOWAIT, &txb->map);
1496                         if (ret) {
1497                                 ha->err_tx_dmamap_create++;
1498                                 device_printf(ha->pci_dev,
1499                                 "%s: bus_dmamap_create failed[%d, %d, %d]\n",
1500                                 __func__, ret, i, j);
1501
1502                                 qls_free_tx_dma(ha);
1503
1504                                 return (ret);
1505                         }
1506                 }
1507         }
1508
1509         return (ret);
1510 }
1511
1512 static void
1513 qls_free_rss_dma(qla_host_t *ha)
1514 {
1515         qls_free_dmabuf(ha, &ha->rss_dma);
1516         ha->flags.rss_dma = 0;
1517 }
1518
1519 static int
1520 qls_alloc_rss_dma(qla_host_t *ha)
1521 {
1522         int ret = 0;
1523
1524         ha->rss_dma.alignment = 4;
1525         ha->rss_dma.size = PAGE_SIZE;
1526
1527         ret = qls_alloc_dmabuf(ha, &ha->rss_dma);
1528
1529         if (ret)
1530                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1531         else
1532                 ha->flags.rss_dma = 1;
1533
1534         return (ret);
1535 }
1536
1537 static void
1538 qls_free_mpi_dma(qla_host_t *ha)
1539 {
1540         qls_free_dmabuf(ha, &ha->mpi_dma);
1541         ha->flags.mpi_dma = 0;
1542 }
1543
1544 static int
1545 qls_alloc_mpi_dma(qla_host_t *ha)
1546 {
1547         int ret = 0;
1548
1549         ha->mpi_dma.alignment = 4;
1550         ha->mpi_dma.size = (0x4000 * 4);
1551
1552         ret = qls_alloc_dmabuf(ha, &ha->mpi_dma);
1553         if (ret)
1554                 device_printf(ha->pci_dev, "%s: failed\n", __func__);
1555         else
1556                 ha->flags.mpi_dma = 1;
1557
1558         return (ret);
1559 }
1560
1561 static void
1562 qls_free_rx_ring_dma(qla_host_t *ha, int ridx)
1563 {
1564         if (ha->rx_ring[ridx].flags.cq_dma) {
1565                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1566                 ha->rx_ring[ridx].flags.cq_dma = 0;
1567         }
1568
1569         if (ha->rx_ring[ridx].flags.lbq_dma) {
1570                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1571                 ha->rx_ring[ridx].flags.lbq_dma = 0;
1572         }
1573
1574         if (ha->rx_ring[ridx].flags.sbq_dma) {
1575                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1576                 ha->rx_ring[ridx].flags.sbq_dma = 0;
1577         }
1578
1579         if (ha->rx_ring[ridx].flags.lb_dma) {
1580                 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1581                 ha->rx_ring[ridx].flags.lb_dma = 0;
1582         }
1583         return;
1584 }
1585
1586 static void
1587 qls_free_rx_dma(qla_host_t *ha)
1588 {
1589         int i;
1590
1591         for (i = 0; i < ha->num_rx_rings; i++) {
1592                 qls_free_rx_ring_dma(ha, i);
1593         }
1594
1595         if (ha->rx_tag != NULL) {
1596                 bus_dma_tag_destroy(ha->rx_tag);
1597                 ha->rx_tag = NULL;
1598         }
1599
1600         return;
1601 }
1602
1603 static int
1604 qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx)
1605 {
1606         int                             i, ret = 0;
1607         uint8_t                         *v_addr;
1608         bus_addr_t                      p_addr;
1609         volatile q81_bq_addr_e_t        *bq_e;
1610         device_t                        dev = ha->pci_dev;
1611
1612         ha->rx_ring[ridx].cq_dma.alignment = 128;
1613         ha->rx_ring[ridx].cq_dma.size =
1614                 (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE;
1615
1616         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma);
1617
1618         if (ret) {
1619                 device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx);
1620                 goto qls_alloc_rx_ring_dma_exit;
1621         }
1622         ha->rx_ring[ridx].flags.cq_dma = 1;
1623
1624         ha->rx_ring[ridx].lbq_dma.alignment = 8;
1625         ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE;
1626
1627         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma);
1628
1629         if (ret) {
1630                 device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx);
1631                 goto qls_alloc_rx_ring_dma_exit;
1632         }
1633         ha->rx_ring[ridx].flags.lbq_dma = 1;
1634
1635         ha->rx_ring[ridx].sbq_dma.alignment = 8;
1636         ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE;
1637
1638         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma);
1639
1640         if (ret) {
1641                 device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx);
1642                 goto qls_alloc_rx_ring_dma_exit;
1643         }
1644         ha->rx_ring[ridx].flags.sbq_dma = 1;
1645
1646         ha->rx_ring[ridx].lb_dma.alignment = 8;
1647         ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES);
1648
1649         ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma);
1650         if (ret) {
1651                 device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx);
1652                 goto qls_alloc_rx_ring_dma_exit;
1653         }
1654         ha->rx_ring[ridx].flags.lb_dma = 1;
1655
1656         bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size);
1657         bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size);
1658         bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size);
1659         bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size);
1660
1661         /* completion queue */
1662         ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b;
1663         ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr;
1664
1665         v_addr = ha->rx_ring[ridx].cq_dma.dma_b;
1666         p_addr = ha->rx_ring[ridx].cq_dma.dma_addr;
1667
1668         v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1669         p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t)));
1670
1671         /* completion queue icb */
1672         ha->rx_ring[ridx].cq_icb_vaddr = v_addr;
1673         ha->rx_ring[ridx].cq_icb_paddr = p_addr;
1674
1675         v_addr = v_addr + (PAGE_SIZE >> 2);
1676         p_addr = p_addr + (PAGE_SIZE >> 2);
1677
1678         /* completion queue index register */
1679         ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr;
1680         ha->rx_ring[ridx].cqi_paddr = p_addr;
1681
1682         v_addr = ha->rx_ring[ridx].lbq_dma.dma_b;
1683         p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr;
1684
1685         /* large buffer queue address table */
1686         ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr;
1687         ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr;
1688
1689         /* large buffer queue */
1690         ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE;
1691         ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE;
1692         
1693         v_addr = ha->rx_ring[ridx].sbq_dma.dma_b;
1694         p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr;
1695
1696         /* small buffer queue address table */
1697         ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr;
1698         ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr;
1699
1700         /* small buffer queue */
1701         ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE;
1702         ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE;
1703
1704         ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b;
1705         ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr;
1706
1707         /* Initialize Large Buffer Queue Table */
1708
1709         p_addr = ha->rx_ring[ridx].lbq_paddr;
1710         bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr;
1711
1712         bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1713         bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1714
1715         p_addr = ha->rx_ring[ridx].lb_paddr;
1716         bq_e = ha->rx_ring[ridx].lbq_vaddr;
1717
1718         for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) {
1719                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1720                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1721
1722                 p_addr = p_addr + QLA_LGB_SIZE;
1723                 bq_e++;
1724         }
1725
1726         /* Initialize Small Buffer Queue Table */
1727
1728         p_addr = ha->rx_ring[ridx].sbq_paddr;
1729         bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr;
1730
1731         for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) {
1732                 bq_e->addr_lo = p_addr & 0xFFFFFFFF;
1733                 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF;
1734
1735                 p_addr = p_addr + QLA_PAGE_SIZE;
1736                 bq_e++;
1737         }
1738
1739 qls_alloc_rx_ring_dma_exit:
1740         return (ret);
1741 }
1742
1743 static int
1744 qls_alloc_rx_dma(qla_host_t *ha)
1745 {
1746         int     i;
1747         int     ret = 0;
1748
1749         if (bus_dma_tag_create(NULL,    /* parent */
1750                         1, 0,    /* alignment, bounds */
1751                         BUS_SPACE_MAXADDR,       /* lowaddr */
1752                         BUS_SPACE_MAXADDR,       /* highaddr */
1753                         NULL, NULL,      /* filter, filterarg */
1754                         MJUM9BYTES,     /* maxsize */
1755                         1,        /* nsegments */
1756                         MJUM9BYTES,        /* maxsegsize */
1757                         BUS_DMA_ALLOCNOW,        /* flags */
1758                         NULL,    /* lockfunc */
1759                         NULL,    /* lockfuncarg */
1760                         &ha->rx_tag)) {
1761
1762                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1763                         __func__);
1764
1765                 return (ENOMEM);
1766         }
1767
1768         for (i = 0; i < ha->num_rx_rings; i++) {
1769                 ret = qls_alloc_rx_ring_dma(ha, i);
1770
1771                 if (ret) {
1772                         qls_free_rx_dma(ha);
1773                         break;
1774                 }
1775         }
1776
1777         return (ret);
1778 }
1779
1780 static int
1781 qls_wait_for_flash_ready(qla_host_t *ha)
1782 {
1783         uint32_t data32;
1784         uint32_t count = 3;
1785
1786         while (count--) {
1787
1788                 data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR);
1789
1790                 if (data32 & Q81_CTL_FLASH_ADDR_ERR)
1791                         goto qls_wait_for_flash_ready_exit;
1792                 
1793                 if (data32 & Q81_CTL_FLASH_ADDR_RDY)
1794                         return (0);
1795
1796                 QLA_USEC_DELAY(100);
1797         }
1798
1799 qls_wait_for_flash_ready_exit:
1800         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1801
1802         return (-1);
1803 }
1804
1805 /*
1806  * Name: qls_rd_flash32
1807  * Function: Read Flash Memory
1808  */
1809 int
1810 qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data)
1811 {
1812         int ret;
1813
1814         ret = qls_wait_for_flash_ready(ha);
1815
1816         if (ret)
1817                 return (ret);
1818
1819         WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R));
1820
1821         ret = qls_wait_for_flash_ready(ha);
1822
1823         if (ret)
1824                 return (ret);
1825
1826         *data = READ_REG32(ha, Q81_CTL_FLASH_DATA);
1827
1828         return 0;
1829 }
1830
1831 static int
1832 qls_flash_validate(qla_host_t *ha, const char *signature)
1833 {
1834         uint16_t csum16 = 0;
1835         uint16_t *data16;
1836         int i;
1837
1838         if (bcmp(ha->flash.id, signature, 4)) {
1839                 QL_DPRINT1((ha->pci_dev, "%s: invalid signature "
1840                         "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0],
1841                         ha->flash.id[1], ha->flash.id[2], ha->flash.id[3],
1842                         signature));
1843                 return(-1);
1844         }
1845
1846         data16 = (uint16_t *)&ha->flash;
1847
1848         for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) {
1849                 csum16 += *data16++;
1850         }
1851
1852         if (csum16) {
1853                 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__));
1854                 return(-1);
1855         }
1856         return(0);
1857 }
1858
1859 int
1860 qls_rd_nic_params(qla_host_t *ha)
1861 {
1862         int             i, ret = 0;
1863         uint32_t        faddr;
1864         uint32_t        *qflash;
1865
1866         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) {
1867                 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__));
1868                 return(-1);
1869         }
1870
1871         if ((ha->pci_func & 0x1) == 0)
1872                 faddr = Q81_F0_FLASH_OFFSET >> 2;
1873         else
1874                 faddr = Q81_F1_FLASH_OFFSET >> 2;
1875
1876         qflash = (uint32_t *)&ha->flash;
1877
1878         for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) {
1879
1880                 ret = qls_rd_flash32(ha, faddr, qflash);
1881
1882                 if (ret)
1883                         goto qls_rd_flash_data_exit;
1884
1885                 faddr++;
1886                 qflash++;
1887         }
1888
1889         QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t)));
1890
1891         ret = qls_flash_validate(ha, Q81_FLASH_ID);
1892
1893         if (ret)
1894                 goto qls_rd_flash_data_exit;
1895
1896         bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN);
1897
1898         QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
1899                 __func__, ha->mac_addr[0],  ha->mac_addr[1], ha->mac_addr[2],
1900                 ha->mac_addr[3], ha->mac_addr[4],  ha->mac_addr[5]));
1901
1902 qls_rd_flash_data_exit:
1903
1904         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH);
1905
1906         return(ret);
1907 }
1908
1909 static int
1910 qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value)
1911 {
1912         uint32_t count = 30;
1913         uint32_t data;
1914
1915         while (count--) {
1916                 WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value));
1917         
1918                 data = READ_REG32(ha, Q81_CTL_SEMAPHORE);
1919
1920                 if (data & value) {
1921                         return (0);
1922                 } else {
1923                         QLA_USEC_DELAY(100);
1924                 }
1925         }
1926         ha->qla_initiate_recovery = 1;
1927         return (-1);
1928 }
1929
1930 static void
1931 qls_sem_unlock(qla_host_t *ha, uint32_t mask)
1932 {
1933         WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask);
1934 }
1935
1936 static int
1937 qls_wait_for_proc_addr_ready(qla_host_t *ha)
1938 {
1939         uint32_t data32;
1940         uint32_t count = 3;
1941
1942         while (count--) {
1943
1944                 data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR);
1945
1946                 if (data32 & Q81_CTL_PROC_ADDR_ERR)
1947                         goto qls_wait_for_proc_addr_ready_exit;
1948                 
1949                 if (data32 & Q81_CTL_PROC_ADDR_RDY)
1950                         return (0);
1951
1952                 QLA_USEC_DELAY(100);
1953         }
1954
1955 qls_wait_for_proc_addr_ready_exit:
1956         QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__));
1957
1958         ha->qla_initiate_recovery = 1;
1959         return (-1);
1960 }
1961
1962 static int
1963 qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1964         uint32_t *data)
1965 {
1966         int ret;
1967         uint32_t value;
1968
1969         ret = qls_wait_for_proc_addr_ready(ha);
1970
1971         if (ret)
1972                 goto qls_proc_addr_rd_reg_exit;
1973
1974         value = addr_module | reg | Q81_CTL_PROC_ADDR_READ;
1975
1976         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
1977
1978         ret = qls_wait_for_proc_addr_ready(ha);
1979
1980         if (ret)
1981                 goto qls_proc_addr_rd_reg_exit;
1982         
1983         *data = READ_REG32(ha, Q81_CTL_PROC_DATA); 
1984
1985 qls_proc_addr_rd_reg_exit:
1986         return (ret);
1987 }
1988
1989 static int
1990 qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg,
1991         uint32_t data)
1992 {
1993         int ret;
1994         uint32_t value;
1995
1996         ret = qls_wait_for_proc_addr_ready(ha);
1997
1998         if (ret)
1999                 goto qls_proc_addr_wr_reg_exit;
2000
2001         WRITE_REG32(ha, Q81_CTL_PROC_DATA, data);
2002
2003         value = addr_module | reg;
2004
2005         WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value);
2006
2007         ret = qls_wait_for_proc_addr_ready(ha);
2008
2009 qls_proc_addr_wr_reg_exit:
2010         return (ret);
2011 }
2012
2013 static int
2014 qls_hw_nic_reset(qla_host_t *ha)
2015 {
2016         int             count;
2017         uint32_t        data;
2018         device_t        dev = ha->pci_dev;
2019         
2020         ha->hw_init = 0;
2021
2022         data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) |
2023                         Q81_CTL_RESET_FUNC;
2024         WRITE_REG32(ha, Q81_CTL_RESET, data);
2025
2026         count = 10;
2027         while (count--) {
2028                 data = READ_REG32(ha, Q81_CTL_RESET);
2029                 if ((data & Q81_CTL_RESET_FUNC) == 0)
2030                         break;
2031                 QLA_USEC_DELAY(10);
2032         }
2033         if (count == 0) {
2034                 device_printf(dev, "%s: Bit 15 not cleared after Reset\n",
2035                         __func__);
2036                 return (-1);
2037         }
2038         return (0);
2039 }
2040         
2041 static int
2042 qls_hw_reset(qla_host_t *ha)
2043 {
2044         device_t        dev = ha->pci_dev;
2045         int             ret;
2046         int             count;
2047         uint32_t        data;
2048
2049         QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init));
2050
2051         if (ha->hw_init == 0) {
2052                 ret = qls_hw_nic_reset(ha);
2053                 goto qls_hw_reset_exit;
2054         }
2055
2056         ret = qls_clear_routing_table(ha);
2057         if (ret) 
2058                 goto qls_hw_reset_exit;
2059
2060         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP);
2061         if (ret) 
2062                 goto qls_hw_reset_exit;
2063
2064         /*
2065          * Wait for FIFO to empty
2066          */
2067         count = 5;
2068         while (count--) {
2069                 data = READ_REG32(ha, Q81_CTL_STATUS);
2070                 if (data & Q81_CTL_STATUS_NFE)
2071                         break;
2072                 qls_mdelay(__func__, 100);
2073         }
2074         if (count == 0) {
2075                 device_printf(dev, "%s: NFE bit not set\n", __func__);
2076                 goto qls_hw_reset_exit;
2077         }
2078
2079         count = 5;
2080         while (count--) {
2081                 (void)qls_mbx_get_mgmt_ctrl(ha, &data);
2082
2083                 if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) &&
2084                         (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT))
2085                         break;
2086                 qls_mdelay(__func__, 100);
2087         }
2088         if (count == 0)
2089                 goto qls_hw_reset_exit;
2090
2091         /*
2092          * Reset the NIC function
2093          */
2094         ret = qls_hw_nic_reset(ha);
2095         if (ret) 
2096                 goto qls_hw_reset_exit;
2097         
2098         ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME);
2099
2100 qls_hw_reset_exit:
2101         if (ret)
2102                 device_printf(dev, "%s: failed\n", __func__);
2103                 
2104         return (ret);
2105 }
2106
2107 /*
2108  * MPI Related Functions
2109  */
2110 int
2111 qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2112 {
2113         int ret;
2114
2115         ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2116                         reg, data);
2117         return (ret);
2118 }
2119
2120 int
2121 qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2122 {
2123         int ret;
2124
2125         ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC,
2126                         reg, data);
2127         return (ret);
2128 }
2129
2130 int
2131 qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data)
2132 {
2133         int ret;
2134
2135         if ((ha->pci_func & 0x1) == 0)
2136                 reg += Q81_FUNC0_MBX_OUT_REG0;
2137         else
2138                 reg += Q81_FUNC1_MBX_OUT_REG0;
2139
2140         ret = qls_mpi_risc_rd_reg(ha, reg, data);
2141
2142         return (ret);
2143 }
2144
2145 int
2146 qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data)
2147 {
2148         int ret;
2149
2150         if ((ha->pci_func & 0x1) == 0)
2151                 reg += Q81_FUNC0_MBX_IN_REG0;
2152         else
2153                 reg += Q81_FUNC1_MBX_IN_REG0;
2154
2155         ret = qls_mpi_risc_wr_reg(ha, reg, data);
2156
2157         return (ret);
2158 }
2159
2160
2161 static int
2162 qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count,
2163         uint32_t *out_mbx, uint32_t o_count)
2164 {
2165         int i, ret = -1;
2166         uint32_t data32, mbx_cmd = 0;
2167         uint32_t count = 50;
2168
2169         QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n",
2170                 __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2)));
2171
2172         data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2173
2174         if (data32 & Q81_CTL_HCS_HTR_INTR) {
2175                 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n",
2176                         __func__, data32);
2177                 goto qls_mbx_cmd_exit;
2178         }
2179
2180         if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV,
2181                 Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) {
2182                 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__);
2183                 goto qls_mbx_cmd_exit;
2184         }
2185
2186         ha->mbx_done = 0;
2187
2188         mbx_cmd = *in_mbx;
2189
2190         for (i = 0; i < i_count; i++) {
2191
2192                 ret = qls_mbx_wr_reg(ha, i, *in_mbx);
2193
2194                 if (ret) {
2195                         device_printf(ha->pci_dev,
2196                                 "%s: mbx_wr[%d, 0x%08x] failed\n", __func__,
2197                                 i, *in_mbx);
2198                         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2199                         goto qls_mbx_cmd_exit;
2200                 }
2201
2202                 in_mbx++;
2203         }
2204         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR);
2205
2206         qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV);
2207
2208         ret = -1;
2209         ha->mbx_done = 0;
2210
2211         while (count--) {
2212
2213                 if (ha->flags.intr_enable == 0) {
2214                         data32 = READ_REG32(ha, Q81_CTL_STATUS);
2215
2216                         if (!(data32 & Q81_CTL_STATUS_PI)) {
2217                                 qls_mdelay(__func__, 100);
2218                                 continue;
2219                         }
2220
2221                         ret = qls_mbx_rd_reg(ha, 0, &data32);
2222
2223                         if (ret == 0 ) {
2224                                 if ((data32 & 0xF000) == 0x4000) {
2225
2226                                         out_mbx[0] = data32;
2227
2228                                         for (i = 1; i < o_count; i++) {
2229                                                 ret = qls_mbx_rd_reg(ha, i,
2230                                                                 &data32);
2231                                                 if (ret) {
2232                                                         device_printf(
2233                                                                 ha->pci_dev,
2234                                                                 "%s: mbx_rd[%d]"
2235                                                                 " failed\n",
2236                                                                 __func__, i);
2237                                                         break;
2238                                                 }
2239                                                 out_mbx[i] = data32;
2240                                         }
2241                                         break;
2242                                 } else if ((data32 & 0xF000) == 0x8000) {
2243                                         count = 50;
2244                                         WRITE_REG32(ha,\
2245                                                 Q81_CTL_HOST_CMD_STATUS,\
2246                                                 Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2247                                 }
2248                         }
2249                 } else {
2250                         if (ha->mbx_done) {
2251                                 for (i = 1; i < o_count; i++) {
2252                                         out_mbx[i] = ha->mbox[i];
2253                                 }
2254                                 ret = 0;
2255                                 break;
2256                         }
2257                 }
2258                 qls_mdelay(__func__, 1000);
2259         }
2260
2261 qls_mbx_cmd_exit:
2262
2263         if (ha->flags.intr_enable == 0) {
2264                 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2265                         Q81_CTL_HCS_CMD_CLR_RTH_INTR);
2266         }
2267
2268         if (ret) {
2269                 ha->qla_initiate_recovery = 1;
2270         }
2271
2272         QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret));
2273         return (ret);
2274 }
2275
2276 static int
2277 qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl)
2278 {
2279         uint32_t *mbox;
2280         device_t dev = ha->pci_dev;
2281
2282         mbox = ha->mbox;
2283         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2284
2285         mbox[0] = Q81_MBX_SET_MGMT_CTL;
2286         mbox[1] = t_ctrl;
2287
2288         if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) {
2289                 device_printf(dev, "%s failed\n", __func__);
2290                 return (-1);
2291         }
2292
2293         if ((mbox[0] == Q81_MBX_CMD_COMPLETE) ||
2294                 ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) &&
2295                         (mbox[0] == Q81_MBX_CMD_ERROR))){
2296                 return (0);
2297         }
2298         device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]);
2299         return (-1);
2300
2301 }
2302
2303 static int
2304 qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status)
2305 {
2306         uint32_t *mbox;
2307         device_t dev = ha->pci_dev;
2308
2309         *t_status = 0;
2310
2311         mbox = ha->mbox;
2312         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2313
2314         mbox[0] = Q81_MBX_GET_MGMT_CTL;
2315
2316         if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) {
2317                 device_printf(dev, "%s failed\n", __func__);
2318                 return (-1);
2319         }
2320
2321         *t_status = mbox[1];
2322
2323         return (0);
2324 }
2325
2326 static void
2327 qls_mbx_get_link_status(qla_host_t *ha)
2328 {
2329         uint32_t *mbox;
2330         device_t dev = ha->pci_dev;
2331
2332         mbox = ha->mbox;
2333         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2334
2335         mbox[0] = Q81_MBX_GET_LNK_STATUS;
2336
2337         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2338                 device_printf(dev, "%s failed\n", __func__);
2339                 return;
2340         }
2341
2342         ha->link_status                 = mbox[1];
2343         ha->link_down_info              = mbox[2];
2344         ha->link_hw_info                = mbox[3];
2345         ha->link_dcbx_counters          = mbox[4];
2346         ha->link_change_counters        = mbox[5];
2347
2348         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2349                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2350
2351         return;
2352 }
2353
2354 static void
2355 qls_mbx_about_fw(qla_host_t *ha)
2356 {
2357         uint32_t *mbox;
2358         device_t dev = ha->pci_dev;
2359
2360         mbox = ha->mbox;
2361         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2362
2363         mbox[0] = Q81_MBX_ABOUT_FW;
2364
2365         if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) {
2366                 device_printf(dev, "%s failed\n", __func__);
2367                 return;
2368         }
2369
2370         device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
2371                 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]);
2372 }
2373
2374 int
2375 qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr,
2376         uint32_t r_size)
2377 {
2378         bus_addr_t b_paddr;
2379         uint32_t *mbox;
2380         device_t dev = ha->pci_dev;
2381
2382         mbox = ha->mbox;
2383         bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS));
2384
2385         bzero(ha->mpi_dma.dma_b,(r_size << 2));
2386         b_paddr = ha->mpi_dma.dma_addr;
2387
2388         mbox[0] = Q81_MBX_DUMP_RISC_RAM;
2389         mbox[1] = r_addr & 0xFFFF;
2390         mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF;
2391         mbox[3] = ((uint32_t)b_paddr) & 0xFFFF;
2392         mbox[4] = (r_size >> 16) & 0xFFFF;
2393         mbox[5] = r_size & 0xFFFF;
2394         mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF;
2395         mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF;
2396         mbox[8] = (r_addr >> 16) & 0xFFFF;
2397
2398         bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2399                 BUS_DMASYNC_PREREAD);
2400
2401         if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) {
2402                 device_printf(dev, "%s failed\n", __func__);
2403                 return (-1);
2404         }
2405         if (mbox[0] != 0x4000) {
2406                 device_printf(ha->pci_dev, "%s: failed!\n", __func__);
2407                 return (-1);
2408         } else {
2409                 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map,
2410                         BUS_DMASYNC_POSTREAD);
2411                 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2));
2412         }
2413
2414         return (0);
2415 }
2416
2417 int 
2418 qls_mpi_reset(qla_host_t *ha)
2419 {
2420         int             count;
2421         uint32_t        data;
2422         device_t        dev = ha->pci_dev;
2423         
2424         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2425                 Q81_CTL_HCS_CMD_SET_RISC_RESET);
2426
2427         count = 10;
2428         while (count--) {
2429                 data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS);
2430                 if (data & Q81_CTL_HCS_RISC_RESET) {
2431                         WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\
2432                                 Q81_CTL_HCS_CMD_CLR_RISC_RESET);
2433                         break;
2434                 }
2435                 qls_mdelay(__func__, 10);
2436         }
2437         if (count == 0) {
2438                 device_printf(dev, "%s: failed\n", __func__);
2439                 return (-1);
2440         }
2441         return (0);
2442 }
2443