]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgb/qla_hw.c
zfs: merge openzfs/zfs@3b89d9518 (master) into main
[FreeBSD/FreeBSD.git] / sys / dev / qlxgb / qla_hw.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011-2012 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  * File: qla_hw.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  * Content: Contains Hardware dependent functions
34  */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "qla_os.h"
40 #include "qla_reg.h"
41 #include "qla_hw.h"
42 #include "qla_def.h"
43 #include "qla_inline.h"
44 #include "qla_ver.h"
45 #include "qla_glbl.h"
46 #include "qla_dbg.h"
47
48 static uint32_t sysctl_num_rds_rings = 2;
49 static uint32_t sysctl_num_sds_rings = 4;
50
51 /*
52  * Static Functions
53  */
54
55 static void qla_init_cntxt_regions(qla_host_t *ha);
56 static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp);
57 static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size);
58 static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr,
59                 uint16_t cntxt_id, uint32_t add_multi);
60 static void qla_del_rcv_cntxt(qla_host_t *ha);
61 static int qla_init_rcv_cntxt(qla_host_t *ha);
62 static void qla_del_xmt_cntxt(qla_host_t *ha);
63 static int qla_init_xmt_cntxt(qla_host_t *ha);
64 static int qla_get_max_rds(qla_host_t *ha);
65 static int qla_get_max_sds(qla_host_t *ha);
66 static int qla_get_max_rules(qla_host_t *ha);
67 static int qla_get_max_rcv_cntxts(qla_host_t *ha);
68 static int qla_get_max_tx_cntxts(qla_host_t *ha);
69 static int qla_get_max_mtu(qla_host_t *ha);
70 static int qla_get_max_lro(qla_host_t *ha);
71 static int qla_get_flow_control(qla_host_t *ha);
72 static void qla_hw_tx_done_locked(qla_host_t *ha);
73
74 int
75 qla_get_msix_count(qla_host_t *ha)
76 {
77         return (sysctl_num_sds_rings);
78 }
79
80 /*
81  * Name: qla_hw_add_sysctls
82  * Function: Add P3Plus specific sysctls
83  */
84 void
85 qla_hw_add_sysctls(qla_host_t *ha)
86 {
87         device_t        dev;
88
89         dev = ha->pci_dev;
90
91         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
92                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
93                 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings,
94                 sysctl_num_rds_rings, "Number of Rcv Descriptor Rings");
95
96         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
97                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
98                 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings,
99                 sysctl_num_sds_rings, "Number of Status Descriptor Rings");
100 }
101
102 /*
103  * Name: qla_free_dma
104  * Function: Frees the DMA'able memory allocated in qla_alloc_dma()
105  */
106 void
107 qla_free_dma(qla_host_t *ha)
108 {
109         uint32_t i;
110
111         if (ha->hw.dma_buf.flags.context) {
112                 qla_free_dmabuf(ha, &ha->hw.dma_buf.context);
113                 ha->hw.dma_buf.flags.context = 0;
114         }
115
116         if (ha->hw.dma_buf.flags.sds_ring) {
117                 for (i = 0; i < ha->hw.num_sds_rings; i++)
118                         qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
119                 ha->hw.dma_buf.flags.sds_ring = 0;
120         }
121
122         if (ha->hw.dma_buf.flags.rds_ring) {
123                 for (i = 0; i < ha->hw.num_rds_rings; i++)
124                         qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
125                 ha->hw.dma_buf.flags.rds_ring = 0;
126         }
127
128         if (ha->hw.dma_buf.flags.tx_ring) {
129                 qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
130                 ha->hw.dma_buf.flags.tx_ring = 0;
131         }
132 }
133
134 /*
135  * Name: qla_alloc_dma
136  * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
137  */
138 int
139 qla_alloc_dma(qla_host_t *ha)
140 {
141         device_t                dev;
142         uint32_t                i, j, size;
143
144         dev = ha->pci_dev;
145
146         QL_DPRINT2((dev, "%s: enter\n", __func__));
147
148         ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings;
149         ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings;
150
151         /*
152          * Allocate Transmit Ring
153          */
154
155         ha->hw.dma_buf.tx_ring.alignment = 8;
156         ha->hw.dma_buf.tx_ring.size =
157                 (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS;
158
159         if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
160                 device_printf(dev, "%s: tx ring alloc failed\n", __func__);
161                 goto qla_alloc_dma_exit;
162         }
163         ha->hw.dma_buf.flags.tx_ring = 1;
164
165         QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n",
166                 __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
167                 ha->hw.dma_buf.tx_ring.dma_b));
168         /*
169          * Allocate Receive Descriptor Rings
170          */
171
172         for (i = 0; i < ha->hw.num_rds_rings; i++) {
173                 ha->hw.dma_buf.rds_ring[i].alignment = 8;
174
175                 if (i == RDS_RING_INDEX_NORMAL) {
176                         ha->hw.dma_buf.rds_ring[i].size =
177                                 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
178                 } else if (i == RDS_RING_INDEX_JUMBO) {
179                         ha->hw.dma_buf.rds_ring[i].size = 
180                                 (sizeof(q80_recv_desc_t)) *
181                                         NUM_RX_JUMBO_DESCRIPTORS;
182                 } else
183                         break;
184
185                 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) {
186                         QL_DPRINT4((dev, "%s: rds ring alloc failed\n",
187                                 __func__));
188
189                         for (j = 0; j < i; j++)
190                                 qla_free_dmabuf(ha,
191                                         &ha->hw.dma_buf.rds_ring[j]);
192
193                         goto qla_alloc_dma_exit;
194                 }
195                 QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n",
196                         __func__, i,
197                         (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr),
198                         ha->hw.dma_buf.rds_ring[i].dma_b));
199         }
200         ha->hw.dma_buf.flags.rds_ring = 1;
201
202         /*
203          * Allocate Status Descriptor Rings
204          */
205
206         for (i = 0; i < ha->hw.num_sds_rings; i++) {
207                 ha->hw.dma_buf.sds_ring[i].alignment = 8;
208                 ha->hw.dma_buf.sds_ring[i].size =
209                         (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
210
211                 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) {
212                         device_printf(dev, "%s: sds ring alloc failed\n",
213                                 __func__);
214
215                         for (j = 0; j < i; j++)
216                                 qla_free_dmabuf(ha,
217                                         &ha->hw.dma_buf.sds_ring[j]);
218
219                         goto qla_alloc_dma_exit;
220                 }
221                 QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n",
222                         __func__, i,
223                         (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr),
224                         ha->hw.dma_buf.sds_ring[i].dma_b));
225         }
226         ha->hw.dma_buf.flags.sds_ring = 1;
227
228         /*
229          * Allocate Context Area
230          */
231         size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
232
233         size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
234
235         size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
236
237         size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
238
239         size += sizeof (uint32_t); /* for tx consumer index */
240
241         size = QL_ALIGN(size, PAGE_SIZE);
242
243         ha->hw.dma_buf.context.alignment = 8;
244         ha->hw.dma_buf.context.size = size;
245
246         if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) {
247                 device_printf(dev, "%s: context alloc failed\n", __func__);
248                 goto qla_alloc_dma_exit;
249         }
250         ha->hw.dma_buf.flags.context = 1;
251         QL_DPRINT2((dev, "%s: context phys %p virt %p\n",
252                 __func__, (void *)(ha->hw.dma_buf.context.dma_addr),
253                 ha->hw.dma_buf.context.dma_b));
254
255         qla_init_cntxt_regions(ha);
256
257         return 0;
258
259 qla_alloc_dma_exit:
260         qla_free_dma(ha);
261         return -1;
262 }
263
264 /*
265  * Name: qla_init_cntxt_regions
266  * Function: Initializes Tx/Rx Contexts.
267  */
268 static void
269 qla_init_cntxt_regions(qla_host_t *ha)
270 {
271         qla_hw_t                *hw;
272         q80_tx_cntxt_req_t      *tx_cntxt_req;
273         q80_rcv_cntxt_req_t     *rx_cntxt_req;
274         bus_addr_t              phys_addr;
275         uint32_t                i;
276         device_t                dev;
277         uint32_t                size;
278
279         dev = ha->pci_dev;
280
281         hw = &ha->hw;
282
283         hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
284
285         for (i = 0; i < ha->hw.num_sds_rings; i++)
286                 hw->sds[i].sds_ring_base =
287                         (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
288
289         phys_addr = hw->dma_buf.context.dma_addr;
290
291         memset((void *)hw->dma_buf.context.dma_b, 0,
292                 ha->hw.dma_buf.context.size);
293
294         hw->tx_cntxt_req        =
295                 (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b;
296         hw->tx_cntxt_req_paddr  = phys_addr;
297
298         size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);
299
300         hw->tx_cntxt_rsp        =
301                 (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size);
302         hw->tx_cntxt_rsp_paddr  = hw->tx_cntxt_req_paddr + size;
303
304         size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);
305
306         hw->rx_cntxt_req =
307                 (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size);
308         hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size;
309
310         size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);
311
312         hw->rx_cntxt_rsp =
313                 (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size);
314         hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size;
315
316         size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);
317
318         hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size);
319         hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size;
320
321         /*
322          * Initialize the Transmit Context Request so that we don't need to
323          * do it every time we need to create a context
324          */
325         tx_cntxt_req = hw->tx_cntxt_req;
326
327         tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr);
328
329         tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr);
330
331         tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW |
332                                         CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO));
333
334         tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
335
336         tx_cntxt_req->phys_addr =
337                 qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);
338
339         tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS);
340
341         /*
342          * Initialize the Receive Context Request
343          */
344
345         rx_cntxt_req = hw->rx_cntxt_req;
346
347         rx_cntxt_req->rx_req.rsp_dma_addr =
348                 qla_host_to_le64(hw->rx_cntxt_rsp_paddr);
349
350         rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW |
351                                                 CNTXT_CAP0_LEGACY_MN |
352                                                 CNTXT_CAP0_JUMBO |
353                                                 CNTXT_CAP0_LRO|
354                                                 CNTXT_CAP0_HW_LRO);
355
356         rx_cntxt_req->rx_req.intr_mode =
357                 qla_host_to_le32(CNTXT_INTR_MODE_SHARED);
358
359         rx_cntxt_req->rx_req.rds_intr_mode =
360                 qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE);
361
362         rx_cntxt_req->rx_req.rds_ring_offset = 0;
363         rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32(
364                 (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t)));
365         rx_cntxt_req->rx_req.num_rds_rings =
366                 qla_host_to_le16(hw->num_rds_rings);
367         rx_cntxt_req->rx_req.num_sds_rings =
368                 qla_host_to_le16(hw->num_sds_rings);
369
370         for (i = 0; i < hw->num_rds_rings; i++) {
371                 rx_cntxt_req->rds_req[i].phys_addr =
372                         qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
373
374                 if (i == RDS_RING_INDEX_NORMAL) {
375                         rx_cntxt_req->rds_req[i].buf_size =
376                                 qla_host_to_le64(MCLBYTES);
377                         rx_cntxt_req->rds_req[i].size =
378                                 qla_host_to_le32(NUM_RX_DESCRIPTORS);
379                 } else {
380                         rx_cntxt_req->rds_req[i].buf_size =
381                                 qla_host_to_le64(MJUM9BYTES);
382                         rx_cntxt_req->rds_req[i].size =
383                                 qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS);
384                 }
385         }
386
387         for (i = 0; i < hw->num_sds_rings; i++) {
388                 rx_cntxt_req->sds_req[i].phys_addr =
389                         qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
390                 rx_cntxt_req->sds_req[i].size =
391                         qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
392                 rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i);
393         }
394
395         QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n",
396                 __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr));
397         QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n",
398                 __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr));
399         QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n",
400                 __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr));
401         QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n",
402                 __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr));
403         QL_DPRINT2((ha->pci_dev, "%s: tx_cons      = %p paddr %p\n",
404                 __func__, hw->tx_cons, (void *)hw->tx_cons_paddr));
405 }
406
407 /*
408  * Name: qla_issue_cmd
409  * Function: Issues commands on the CDRP interface and returns responses.
410  */
411 static int
412 qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp)
413 {
414         int     ret = 0;
415         uint32_t signature;
416         uint32_t count = 400; /* 4 seconds or 400 10ms intervals */
417         uint32_t data;
418         device_t dev;
419
420         dev = ha->pci_dev;
421
422         signature = 0xcafe0000 | 0x0100 | ha->pci_func;
423
424         ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func);
425
426         if (ret) {
427                 device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__);
428                 return (ret);
429         }
430
431         WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature);
432
433         WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1));
434         WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2));
435         WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3));
436
437         WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd);
438
439         while (count) {
440                 qla_mdelay(__func__, 10);
441
442                 data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
443
444                 if ((!(data & 0x80000000)))
445                         break;
446                 count--;
447         }
448         if ((!count) || (data != 1))
449                 ret = -1;
450
451         cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP);
452         cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1);
453         cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2);
454         cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3);
455
456         qla_sem_unlock(ha, Q8_SEM5_UNLOCK);
457
458         if (ret) {
459                 device_printf(dev, "%s: "
460                         "cmd[0x%08x] = 0x%08x\n"
461                         "\tsig[0x%08x] = 0x%08x\n"
462                         "\targ1[0x%08x] = 0x%08x\n"
463                         "\targ2[0x%08x] = 0x%08x\n"
464                         "\targ3[0x%08x] = 0x%08x\n",
465                         __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd,
466                         Q8_NX_CDRP_SIGNATURE, signature,
467                         Q8_NX_CDRP_ARG1, cdrp->cmd_arg1,
468                         Q8_NX_CDRP_ARG2, cdrp->cmd_arg2,
469                         Q8_NX_CDRP_ARG3, cdrp->cmd_arg3);
470                 
471                 device_printf(dev, "%s: exit (ret = 0x%x)\n"
472                         "\t\t rsp = 0x%08x\n"
473                         "\t\t arg1 = 0x%08x\n"
474                         "\t\t arg2 = 0x%08x\n"
475                         "\t\t arg3 = 0x%08x\n",
476                         __func__, ret, cdrp->rsp,
477                         cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3);
478         }
479
480         return (ret);
481 }
482
483 #define QLA_TX_MIN_FREE 2
484
485 /*
486  * Name: qla_fw_cmd
487  * Function: Issues firmware control commands on the Tx Ring.
488  */
489 static int
490 qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size)
491 {
492         device_t dev;
493         q80_tx_cmd_t *tx_cmd;
494         qla_hw_t *hw = &ha->hw;
495         int count = 100;
496
497         dev = ha->pci_dev;
498
499         QLA_TX_LOCK(ha);
500
501         if (hw->txr_free <= QLA_TX_MIN_FREE) {
502                 while (count--) {
503                         qla_hw_tx_done_locked(ha);
504                         if (hw->txr_free > QLA_TX_MIN_FREE)
505                                 break;
506
507                         QLA_TX_UNLOCK(ha);
508                         qla_mdelay(__func__, 10);
509                         QLA_TX_LOCK(ha);
510                 }
511                 if (hw->txr_free <= QLA_TX_MIN_FREE) {
512                         QLA_TX_UNLOCK(ha);
513                         device_printf(dev, "%s: xmit queue full\n", __func__);
514                         return (-1);
515                 }
516         }
517         tx_cmd = &hw->tx_ring_base[hw->txr_next];
518
519         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
520
521         bcopy(fw_cmd, tx_cmd, size);
522
523         hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
524         hw->txr_free--;
525
526         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
527
528         QLA_TX_UNLOCK(ha);
529
530         return (0);
531 }
532
533 /*
534  * Name: qla_config_rss
535  * Function: Configure RSS for the context/interface.
536  */
537 const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
538                         0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
539                         0x255b0ec26d5a56daULL };
540
541 static int
542 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
543 {
544         qla_fw_cds_config_rss_t rss_config;
545         int ret, i;
546
547         bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t));
548
549         rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
550         rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS;
551         rss_config.hdr.cntxt_id = cntxt_id;
552
553         rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP |
554                                         Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP);
555         rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS;
556
557         rss_config.ind_tbl_mask = 0x7;
558
559         for (i = 0; i < 5; i++)
560                 rss_config.rss_key[i] = rss_key[i];
561
562         ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t));
563
564         return ret;
565 }
566
567 /*
568  * Name: qla_config_intr_coalesce
569  * Function: Configure Interrupt Coalescing.
570  */
571 static int
572 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
573 {
574         qla_fw_cds_config_intr_coalesc_t intr_coalesce;
575         int ret;
576
577         bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t));
578
579         intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ;
580         intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING;
581         intr_coalesce.hdr.cntxt_id = cntxt_id;
582
583         intr_coalesce.flags = 0x04;
584         intr_coalesce.max_rcv_pkts = 256;
585         intr_coalesce.max_rcv_usecs = 3;
586         intr_coalesce.max_snd_pkts = 64;
587         intr_coalesce.max_snd_usecs = 4;
588
589         if (tenable) {
590                 intr_coalesce.usecs_to = 1000; /* 1 millisecond */
591                 intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC;
592                 intr_coalesce.sds_ring_bitmask =
593                         Q8_FWCMD_INTR_COALESC_SDS_RING_0;
594         }
595
596         ret = qla_fw_cmd(ha, &intr_coalesce,
597                         sizeof(qla_fw_cds_config_intr_coalesc_t));
598
599         return ret;
600 }
601
602 /*
603  * Name: qla_config_mac_addr
604  * Function: binds a MAC address to the context/interface.
605  *      Can be unicast, multicast or broadcast.
606  */
607 static int
608 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id,
609         uint32_t add_multi)
610 {
611         qla_fw_cds_config_mac_addr_t mac_config;
612         int ret;
613
614 //      device_printf(ha->pci_dev,
615 //              "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__,
616 //              mac_addr[0], mac_addr[1], mac_addr[2],
617 //              mac_addr[3], mac_addr[4], mac_addr[5]);
618
619         bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
620
621         mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ;
622         mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR;
623         mac_config.hdr.cntxt_id = cntxt_id;
624
625         if (add_multi)
626                 mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR;
627         else
628                 mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR;
629         bcopy(mac_addr, mac_config.mac_addr,6); 
630
631         ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t));
632
633         return ret;
634 }
635
636 /*
637  * Name: qla_set_mac_rcv_mode
638  * Function: Enable/Disable AllMulticast and Promiscuous Modes.
639  */
640 static int
641 qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode)
642 {
643         qla_set_mac_rcv_mode_t rcv_mode;
644         int ret;
645
646         bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
647
648         rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ;
649         rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE;
650         rcv_mode.hdr.cntxt_id = cntxt_id;
651
652         rcv_mode.mode = mode;
653
654         ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t));
655
656         return ret;
657 }
658
659 void
660 qla_set_promisc(qla_host_t *ha)
661 {
662         (void)qla_set_mac_rcv_mode(ha,
663                 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
664                 Q8_MAC_RCV_ENABLE_PROMISCUOUS);
665 }
666
667 void
668 qla_set_allmulti(qla_host_t *ha)
669 {
670         (void)qla_set_mac_rcv_mode(ha,
671                 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
672                 Q8_MAC_RCV_ENABLE_ALLMULTI);
673 }
674
675 void
676 qla_reset_promisc_allmulti(qla_host_t *ha)
677 {
678         (void)qla_set_mac_rcv_mode(ha,
679                 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id,
680                 Q8_MAC_RCV_RESET_PROMISC_ALLMULTI);
681 }
682
683 /*
684  * Name: qla_config_ipv4_addr
685  * Function: Configures the Destination IP Addr for LRO.
686  */
687 void
688 qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr)
689 {
690         qla_config_ipv4_t ip_conf;
691
692         bzero(&ip_conf, sizeof(qla_config_ipv4_t));
693
694         ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ;
695         ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR;
696         ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
697
698         ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE;
699         ip_conf.ipv4_addr = (uint64_t)ipv4_addr;
700
701         (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t));
702
703         return;
704 }
705
706 /*
707  * Name: qla_tx_tso
708  * Function: Checks if the packet to be transmitted is a candidate for
709  *      Large TCP Segment Offload. If yes, the appropriate fields in the Tx
710  *      Ring Structure are plugged in.
711  */
712 static int
713 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
714 {
715         struct ether_vlan_header *eh;
716         struct ip *ip = NULL;
717         struct tcphdr *th = NULL;
718         uint32_t ehdrlen,  hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off;
719         uint16_t etype, opcode, offload = 1;
720         uint8_t *tcp_opt;
721         device_t dev;
722
723         dev = ha->pci_dev;
724
725         eh = mtod(mp, struct ether_vlan_header *);
726
727         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
728                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
729                 etype = ntohs(eh->evl_proto);
730         } else {
731                 ehdrlen = ETHER_HDR_LEN;
732                 etype = ntohs(eh->evl_encap_proto);
733         }
734
735         switch (etype) {
736                 case ETHERTYPE_IP:
737
738                         tcp_opt_off = ehdrlen + sizeof(struct ip) +
739                                         sizeof(struct tcphdr);
740
741                         if (mp->m_len < tcp_opt_off) {
742                                 m_copydata(mp, 0, tcp_opt_off, hdr);
743                                 ip = (struct ip *)hdr;
744                         } else {
745                                 ip = (struct ip *)(mp->m_data + ehdrlen);
746                         }
747
748                         ip_hlen = ip->ip_hl << 2;
749                         opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
750
751                         if ((ip->ip_p != IPPROTO_TCP) ||
752                                 (ip_hlen != sizeof (struct ip))) {
753                                 offload = 0;
754                         } else {
755                                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
756                         }
757                 break;
758
759                 default:
760                         QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
761                         offload = 0;
762                 break;
763         }
764
765         if (!offload)
766                 return (-1);
767
768         tcp_hlen = th->th_off << 2;
769
770         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
771
772         if (mp->m_len < hdrlen) {
773                 if (mp->m_len < tcp_opt_off) {
774                         if (tcp_hlen > sizeof(struct tcphdr)) {
775                                 m_copydata(mp, tcp_opt_off,
776                                         (tcp_hlen - sizeof(struct tcphdr)),
777                                         &hdr[tcp_opt_off]);
778                         }
779                 } else {
780                         m_copydata(mp, 0, hdrlen, hdr);
781                 }
782         }
783
784         if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
785                 /* If TCP options are preset only time stamp option is supported */
786                 if ((tcp_hlen - sizeof(struct tcphdr)) != 10) 
787                         return -1;
788                 else {
789                         if (mp->m_len < hdrlen) {
790                                 tcp_opt = &hdr[tcp_opt_off];
791                         } else {
792                                 tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off);
793                         }
794
795                         if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) ||
796                                 (*(tcp_opt + 2) != 0x08) ||
797                                 (*(tcp_opt + 3) != 10)) {
798                                 return -1;
799                         }
800                 }
801
802                 tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen;
803         } else {
804                 tx_cmd->mss = mp->m_pkthdr.tso_segsz;
805         }
806
807         tx_cmd->flags_opcode = opcode ;
808         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
809         tx_cmd->ip_hdr_off = ehdrlen;
810         tx_cmd->mss = mp->m_pkthdr.tso_segsz;
811         tx_cmd->total_hdr_len = hdrlen;
812
813         /* Check for Multicast least significant bit of MSB == 1 */
814         if (eh->evl_dhost[0] & 0x01) {
815                 tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
816         }
817
818         if (mp->m_len < hdrlen) {
819                 return (1);
820         }
821
822         return (0);
823 }
824
825 /*
826  * Name: qla_tx_chksum
827  * Function: Checks if the packet to be transmitted is a candidate for
828  *      TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
829  *      Ring Structure are plugged in.
830  */
831 static int
832 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
833 {
834         struct ether_vlan_header *eh;
835         struct ip *ip;
836         struct ip6_hdr *ip6;
837         uint32_t ehdrlen, ip_hlen;
838         uint16_t etype, opcode, offload = 1;
839         device_t dev;
840
841         dev = ha->pci_dev;
842
843         if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
844                 return (-1);
845
846         eh = mtod(mp, struct ether_vlan_header *);
847
848         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
849                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
850                 etype = ntohs(eh->evl_proto);
851         } else {
852                 ehdrlen = ETHER_HDR_LEN;
853                 etype = ntohs(eh->evl_encap_proto);
854         }
855
856                 
857         switch (etype) {
858                 case ETHERTYPE_IP:
859                         ip = (struct ip *)(mp->m_data + ehdrlen);
860
861                         ip_hlen = sizeof (struct ip);
862
863                         if (mp->m_len < (ehdrlen + ip_hlen)) {
864                                 device_printf(dev, "%s: ipv4 mlen\n", __func__);
865                                 offload = 0;
866                                 break;
867                         }
868
869                         if (ip->ip_p == IPPROTO_TCP)
870                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
871                         else if (ip->ip_p == IPPROTO_UDP)
872                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
873                         else {
874                                 device_printf(dev, "%s: ipv4\n", __func__);
875                                 offload = 0;
876                         }
877                 break;
878
879                 case ETHERTYPE_IPV6:
880                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
881
882                         ip_hlen = sizeof(struct ip6_hdr);
883
884                         if (mp->m_len < (ehdrlen + ip_hlen)) {
885                                 device_printf(dev, "%s: ipv6 mlen\n", __func__);
886                                 offload = 0;
887                                 break;
888                         }
889
890                         if (ip6->ip6_nxt == IPPROTO_TCP)
891                                 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
892                         else if (ip6->ip6_nxt == IPPROTO_UDP)
893                                 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
894                         else {
895                                 device_printf(dev, "%s: ipv6\n", __func__);
896                                 offload = 0;
897                         }
898                 break;
899
900                 default:
901                         offload = 0;
902                 break;
903         }
904         if (!offload)
905                 return (-1);
906
907         tx_cmd->flags_opcode = opcode;
908
909         tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
910
911         return (0);
912 }
913
914 /*
915  * Name: qla_hw_send
916  * Function: Transmits a packet. It first checks if the packet is a
917  *      candidate for Large TCP Segment Offload and then for UDP/TCP checksum
918  *      offload. If either of these creteria are not met, it is transmitted
919  *      as a regular ethernet frame.
920  */
921 int
922 qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
923         uint32_t *tx_idx,  struct mbuf *mp)
924 {
925         struct ether_vlan_header *eh;
926         qla_hw_t *hw = &ha->hw;
927         q80_tx_cmd_t *tx_cmd, tso_cmd;
928         bus_dma_segment_t *c_seg;
929         uint32_t num_tx_cmds, hdr_len = 0;
930         uint32_t total_length = 0, bytes, tx_cmd_count = 0;
931         device_t dev;
932         int i, ret;
933         uint8_t *src = NULL, *dst = NULL;
934
935         dev = ha->pci_dev;
936
937         /*
938          * Always make sure there is atleast one empty slot in the tx_ring
939          * tx_ring is considered full when there only one entry available
940          */
941         num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
942
943         total_length = mp->m_pkthdr.len;
944         if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
945                 device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
946                         __func__, total_length);
947                 return (-1);
948         }
949         eh = mtod(mp, struct ether_vlan_header *);
950
951         if ((mp->m_pkthdr.len > ha->max_frame_size)||(nsegs > Q8_TX_MAX_SEGMENTS)) {
952                 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
953
954                 src = ha->hw.frame_hdr;
955                 ret = qla_tx_tso(ha, mp, &tso_cmd, src);
956
957                 if (!(ret & ~1)) {
958                         /* find the additional tx_cmd descriptors required */
959
960                         hdr_len = tso_cmd.total_hdr_len;
961
962                         bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
963                         bytes = QL_MIN(bytes, hdr_len);
964
965                         num_tx_cmds++;
966                         hdr_len -= bytes;
967
968                         while (hdr_len) {
969                                 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
970                                 hdr_len -= bytes;
971                                 num_tx_cmds++;
972                         }
973                         hdr_len = tso_cmd.total_hdr_len;
974
975                         if (ret == 0)
976                                 src = (uint8_t *)eh;
977                 }
978         }
979
980         if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
981                 qla_hw_tx_done_locked(ha);
982                 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
983                         QL_DPRINT8((dev, "%s: (hw->txr_free <= "
984                                 "(num_tx_cmds + QLA_TX_MIN_FREE))\n",
985                                 __func__));
986                         return (-1);
987                 }
988         }
989
990         *tx_idx = hw->txr_next;
991
992         tx_cmd = &hw->tx_ring_base[hw->txr_next];
993
994         if (hdr_len == 0) {
995                 if ((nsegs > Q8_TX_MAX_SEGMENTS) ||
996                         (mp->m_pkthdr.len > ha->max_frame_size)){
997                         device_printf(dev,
998                                 "%s: (nsegs[%d, %d, 0x%b] > Q8_TX_MAX_SEGMENTS)\n",
999                                 __func__, nsegs, mp->m_pkthdr.len,
1000                                 (int)mp->m_pkthdr.csum_flags, CSUM_BITS);
1001                         qla_dump_buf8(ha, "qla_hw_send: wrong pkt",
1002                                 mtod(mp, char *), mp->m_len);
1003                         return (EINVAL);
1004                 }
1005                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1006                 if (qla_tx_chksum(ha, mp, tx_cmd) != 0) 
1007                         tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1008         } else {
1009                 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1010         }
1011
1012         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
1013                 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1014         else if (mp->m_flags & M_VLANTAG) {
1015                 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1016                                                 Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1017                 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1018         }
1019
1020         tx_cmd->n_bufs = (uint8_t)nsegs;
1021         tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1022         tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1023         tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1024
1025         c_seg = segs;
1026
1027         while (1) {
1028                 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1029                         switch (i) {
1030                         case 0:
1031                                 tx_cmd->buf1_addr = c_seg->ds_addr;
1032                                 tx_cmd->buf1_len = c_seg->ds_len;
1033                                 break;
1034
1035                         case 1:
1036                                 tx_cmd->buf2_addr = c_seg->ds_addr;
1037                                 tx_cmd->buf2_len = c_seg->ds_len;
1038                                 break;
1039
1040                         case 2:
1041                                 tx_cmd->buf3_addr = c_seg->ds_addr;
1042                                 tx_cmd->buf3_len = c_seg->ds_len;
1043                                 break;
1044
1045                         case 3:
1046                                 tx_cmd->buf4_addr = c_seg->ds_addr;
1047                                 tx_cmd->buf4_len = c_seg->ds_len;
1048                                 break;
1049                         }
1050
1051                         c_seg++;
1052                         nsegs--;
1053                 }
1054
1055                 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1056                 tx_cmd_count++;
1057
1058                 if (!nsegs)
1059                         break;
1060                 
1061                 tx_cmd = &hw->tx_ring_base[hw->txr_next];
1062                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1063         }
1064
1065         if (hdr_len) {
1066                 /* TSO : Copy the header in the following tx cmd descriptors */
1067
1068                 tx_cmd = &hw->tx_ring_base[hw->txr_next];
1069                 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1070
1071                 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1072                 bytes = QL_MIN(bytes, hdr_len);
1073
1074                 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1075
1076                 if (mp->m_flags & M_VLANTAG) {
1077                         /* first copy the src/dst MAC addresses */
1078                         bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1079                         dst += (ETHER_ADDR_LEN * 2);
1080                         src += (ETHER_ADDR_LEN * 2);
1081                         
1082                         hdr_len -= (ETHER_ADDR_LEN * 2);
1083
1084                         *((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1085                         dst += 2;
1086                         *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag;
1087                         dst += 2;
1088
1089                         bytes -= ((ETHER_ADDR_LEN * 2) + 4);
1090
1091                         bcopy(src, dst, bytes);
1092                         src += bytes;
1093                         hdr_len -= bytes;
1094                 } else {
1095                         bcopy(src, dst, bytes);
1096                         src += bytes;
1097                         hdr_len -= bytes;
1098                 }
1099
1100                 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1101                 tx_cmd_count++;
1102                 
1103                 while (hdr_len) {
1104                         tx_cmd = &hw->tx_ring_base[hw->txr_next];
1105                         bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1106
1107                         bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1108
1109                         bcopy(src, tx_cmd, bytes);
1110                         src += bytes;
1111                         hdr_len -= bytes;
1112                         hw->txr_next =
1113                                 (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1);
1114                         tx_cmd_count++;
1115                 }
1116         }
1117
1118         hw->txr_free = hw->txr_free - tx_cmd_count;
1119
1120         QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next);
1121         QL_DPRINT8((dev, "%s: return\n", __func__));
1122         return (0);
1123 }
1124
1125 /*
1126  * Name: qla_del_hw_if
1127  * Function: Destroys the hardware specific entities corresponding to an
1128  *      Ethernet Interface
1129  */
1130 void
1131 qla_del_hw_if(qla_host_t *ha)
1132 {
1133         int     i;
1134
1135         for (i = 0; i < ha->hw.num_sds_rings; i++)
1136                 QL_DISABLE_INTERRUPTS(ha, i);
1137
1138         qla_del_rcv_cntxt(ha);
1139         qla_del_xmt_cntxt(ha);
1140
1141         ha->hw.flags.lro = 0;
1142 }
1143
1144 /*
1145  * Name: qla_init_hw_if
1146  * Function: Creates the hardware specific entities corresponding to an
1147  *      Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1148  *      corresponding to the interface. Enables LRO if allowed.
1149  */
1150 int
1151 qla_init_hw_if(qla_host_t *ha)
1152 {
1153         device_t        dev;
1154         int             i;
1155         uint8_t         bcast_mac[6];
1156
1157         qla_get_hw_caps(ha);
1158
1159         dev = ha->pci_dev;
1160
1161         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1162                 bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1163                         ha->hw.dma_buf.sds_ring[i].size);
1164         }
1165         /*
1166          * Create Receive Context
1167          */
1168         if (qla_init_rcv_cntxt(ha)) {
1169                 return (-1);
1170         }
1171
1172         ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2;
1173         ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2;
1174         ha->hw.rx_in = ha->hw.rxj_in = 0;
1175
1176         /* Update the RDS Producer Indices */
1177         QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
1178         QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
1179
1180         /*
1181          * Create Transmit Context
1182          */
1183         if (qla_init_xmt_cntxt(ha)) {
1184                 qla_del_rcv_cntxt(ha);
1185                 return (-1);
1186         }
1187
1188         qla_config_mac_addr(ha, ha->hw.mac_addr,
1189                 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
1190
1191         bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1192         bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1193         qla_config_mac_addr(ha, bcast_mac,
1194                 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1);
1195
1196         qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
1197
1198         qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0);
1199
1200         for (i = 0; i < ha->hw.num_sds_rings; i++)
1201                 QL_ENABLE_INTERRUPTS(ha, i);
1202
1203         return (0);
1204 }
1205
1206 /*
1207  * Name: qla_init_rcv_cntxt
1208  * Function: Creates the Receive Context.
1209  */
1210 static int
1211 qla_init_rcv_cntxt(qla_host_t *ha)
1212 {
1213         device_t                dev;
1214         qla_cdrp_t              cdrp;
1215         q80_rcv_cntxt_rsp_t     *rsp;
1216         q80_stat_desc_t         *sdesc;
1217         bus_addr_t              phys_addr;
1218         int                     i, j;
1219         qla_hw_t                *hw = &ha->hw;
1220
1221         dev = ha->pci_dev;
1222
1223         /*
1224          * Create Receive Context
1225          */
1226
1227         for (i = 0; i < hw->num_sds_rings; i++) {
1228                 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
1229                 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
1230                         sdesc->data[0] =
1231                                 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
1232                 }
1233         }
1234
1235         phys_addr = ha->hw.rx_cntxt_req_paddr;
1236
1237         bzero(&cdrp, sizeof(qla_cdrp_t));
1238
1239         cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT;
1240         cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
1241         cdrp.cmd_arg2 = (uint32_t)(phys_addr);
1242         cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t));
1243
1244         if (qla_issue_cmd(ha, &cdrp)) {
1245                 device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n",
1246                         __func__);
1247                 return (-1);
1248         } else {
1249                 rsp = ha->hw.rx_cntxt_rsp;
1250
1251                 QL_DPRINT2((dev, "%s: rcv cntxt successful"
1252                         " rds_ring_offset = 0x%08x"
1253                         " sds_ring_offset = 0x%08x"
1254                         " cntxt_state = 0x%08x"
1255                         " funcs_per_port = 0x%08x"
1256                         " num_rds_rings = 0x%04x"
1257                         " num_sds_rings = 0x%04x"
1258                         " cntxt_id = 0x%04x"
1259                         " phys_port = 0x%02x"
1260                         " virt_port = 0x%02x\n",
1261                         __func__,
1262                         rsp->rx_rsp.rds_ring_offset,
1263                         rsp->rx_rsp.sds_ring_offset,
1264                         rsp->rx_rsp.cntxt_state,
1265                         rsp->rx_rsp.funcs_per_port,
1266                         rsp->rx_rsp.num_rds_rings,
1267                         rsp->rx_rsp.num_sds_rings,
1268                         rsp->rx_rsp.cntxt_id,
1269                         rsp->rx_rsp.phys_port,
1270                         rsp->rx_rsp.virt_port));
1271
1272                 for (i = 0; i < ha->hw.num_rds_rings; i++) {
1273                         QL_DPRINT2((dev,
1274                                 "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n",
1275                                 __func__, i, rsp->rds_rsp[i].producer_reg));
1276                 }
1277                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1278                         QL_DPRINT2((dev,
1279                                 "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x"
1280                                 " sds[%i].intr_mask_reg = 0x%08x\n",
1281                                 __func__, i, rsp->sds_rsp[i].consumer_reg,
1282                                 i, rsp->sds_rsp[i].intr_mask_reg));
1283                 }
1284         }
1285         ha->hw.flags.init_rx_cnxt = 1;
1286         return (0);
1287 }
1288
1289 /*
1290  * Name: qla_del_rcv_cntxt
1291  * Function: Destroys the Receive Context.
1292  */
1293 void
1294 qla_del_rcv_cntxt(qla_host_t *ha)
1295 {
1296         qla_cdrp_t      cdrp;
1297         device_t        dev = ha->pci_dev;
1298
1299         if (!ha->hw.flags.init_rx_cnxt)
1300                 return;
1301
1302         bzero(&cdrp, sizeof(qla_cdrp_t));
1303
1304         cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT;
1305         cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id;
1306
1307         if (qla_issue_cmd(ha, &cdrp)) {
1308                 device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n",
1309                         __func__);
1310         }
1311         ha->hw.flags.init_rx_cnxt = 0;
1312 }
1313
1314 /*
1315  * Name: qla_init_xmt_cntxt
1316  * Function: Creates the Transmit Context.
1317  */
1318 static int
1319 qla_init_xmt_cntxt(qla_host_t *ha)
1320 {
1321         bus_addr_t              phys_addr;
1322         device_t                dev;
1323         q80_tx_cntxt_rsp_t      *tx_rsp;
1324         qla_cdrp_t              cdrp;
1325         qla_hw_t                *hw = &ha->hw;
1326
1327         dev = ha->pci_dev;
1328
1329         /*
1330          * Create Transmit Context
1331          */
1332         phys_addr = ha->hw.tx_cntxt_req_paddr;
1333         tx_rsp = ha->hw.tx_cntxt_rsp;
1334
1335         hw->txr_comp = hw->txr_next = 0;
1336         *(hw->tx_cons) = 0;
1337
1338         bzero(&cdrp, sizeof(qla_cdrp_t));
1339
1340         cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT;
1341         cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32);
1342         cdrp.cmd_arg2 = (uint32_t)(phys_addr);
1343         cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t));
1344
1345         if (qla_issue_cmd(ha, &cdrp)) {
1346                 device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n",
1347                         __func__);
1348                 return (-1);
1349         } else {
1350                 ha->hw.tx_prod_reg = tx_rsp->producer_reg;
1351
1352                 QL_DPRINT2((dev, "%s: tx cntxt successful"
1353                         " cntxt_state = 0x%08x "
1354                         " cntxt_id = 0x%04x "
1355                         " phys_port_id = 0x%02x "
1356                         " virt_port_id = 0x%02x "
1357                         " producer_reg = 0x%08x "
1358                         " intr_mask_reg = 0x%08x\n",
1359                         __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id,
1360                         tx_rsp->phys_port_id, tx_rsp->virt_port_id,
1361                         tx_rsp->producer_reg, tx_rsp->intr_mask_reg));
1362         }
1363         ha->hw.txr_free = NUM_TX_DESCRIPTORS;
1364
1365         ha->hw.flags.init_tx_cnxt = 1;
1366         return (0);
1367 }
1368
1369 /*
1370  * Name: qla_del_xmt_cntxt
1371  * Function: Destroys the Transmit Context.
1372  */
1373 static void
1374 qla_del_xmt_cntxt(qla_host_t *ha)
1375 {
1376         qla_cdrp_t      cdrp;
1377         device_t        dev = ha->pci_dev;
1378
1379         if (!ha->hw.flags.init_tx_cnxt)
1380                 return;
1381
1382         bzero(&cdrp, sizeof(qla_cdrp_t));
1383
1384         cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT;
1385         cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id;
1386
1387         if (qla_issue_cmd(ha, &cdrp)) {
1388                 device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n",
1389                         __func__);
1390         }
1391         ha->hw.flags.init_tx_cnxt = 0;
1392 }
1393
1394 /*
1395  * Name: qla_get_max_rds
1396  * Function: Returns the maximum number of Receive Descriptor Rings per context.
1397  */
1398 static int
1399 qla_get_max_rds(qla_host_t *ha)
1400 {
1401         qla_cdrp_t      cdrp;
1402         device_t        dev;
1403
1404         dev = ha->pci_dev;
1405
1406         bzero(&cdrp, sizeof(qla_cdrp_t));
1407
1408         cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT;
1409
1410         if (qla_issue_cmd(ha, &cdrp)) {
1411                 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
1412                         __func__);
1413                 return (-1);
1414         } else {
1415                 ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1;
1416                 QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n",
1417                         __func__, ha->hw.max_rds_per_cntxt));
1418         }
1419         return 0;
1420 }
1421
1422 /*
1423  * Name: qla_get_max_sds
1424  * Function: Returns the maximum number of Status Descriptor Rings per context.
1425  */
1426 static int
1427 qla_get_max_sds(qla_host_t *ha)
1428 {
1429         qla_cdrp_t      cdrp;
1430         device_t        dev;
1431
1432         dev = ha->pci_dev;
1433
1434         bzero(&cdrp, sizeof(qla_cdrp_t));
1435
1436         cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT;
1437
1438         if (qla_issue_cmd(ha, &cdrp)) {
1439                 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n",
1440                         __func__);
1441                 return (-1);
1442         } else {
1443                 ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1;
1444                 QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n",
1445                         __func__, ha->hw.max_sds_per_cntxt));
1446         }
1447         return 0;
1448 }
1449
1450 /*
1451  * Name: qla_get_max_rules
1452  * Function: Returns the maximum number of Rules per context.
1453  */
1454 static int
1455 qla_get_max_rules(qla_host_t *ha)
1456 {
1457         qla_cdrp_t      cdrp;
1458         device_t        dev;
1459
1460         dev = ha->pci_dev;
1461
1462         bzero(&cdrp, sizeof(qla_cdrp_t));
1463
1464         cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT;
1465
1466         if (qla_issue_cmd(ha, &cdrp)) {
1467                 device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n",
1468                         __func__);
1469                 return (-1);
1470         } else {
1471                 ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1;
1472                 QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n",
1473                         __func__, ha->hw.max_rules_per_cntxt));
1474         }
1475         return 0;
1476 }
1477
1478 /*
1479  * Name: qla_get_max_rcv_cntxts
1480  * Function: Returns the maximum number of Receive Contexts supported.
1481  */
1482 static int
1483 qla_get_max_rcv_cntxts(qla_host_t *ha)
1484 {
1485         qla_cdrp_t      cdrp;
1486         device_t        dev;
1487
1488         dev = ha->pci_dev;
1489
1490         bzero(&cdrp, sizeof(qla_cdrp_t));
1491
1492         cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT;
1493
1494         if (qla_issue_cmd(ha, &cdrp)) {
1495                 device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n",
1496                         __func__);
1497                 return (-1);
1498         } else {
1499                 ha->hw.max_rcv_cntxts = cdrp.rsp_arg1;
1500                 QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n",
1501                         __func__, ha->hw.max_rcv_cntxts));
1502         }
1503         return 0;
1504 }
1505
1506 /*
1507  * Name: qla_get_max_tx_cntxts
1508  * Function: Returns the maximum number of Transmit Contexts supported.
1509  */
1510 static int
1511 qla_get_max_tx_cntxts(qla_host_t *ha)
1512 {
1513         qla_cdrp_t      cdrp;
1514         device_t        dev;
1515
1516         dev = ha->pci_dev;
1517
1518         bzero(&cdrp, sizeof(qla_cdrp_t));
1519
1520         cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT;
1521
1522         if (qla_issue_cmd(ha, &cdrp)) {
1523                 device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n",
1524                         __func__);
1525                 return (-1);
1526         } else {
1527                 ha->hw.max_xmt_cntxts = cdrp.rsp_arg1;
1528                 QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n",
1529                         __func__, ha->hw.max_xmt_cntxts));
1530         }
1531         return 0;
1532 }
1533
1534 /*
1535  * Name: qla_get_max_mtu
1536  * Function: Returns the MTU supported for a context.
1537  */
1538 static int
1539 qla_get_max_mtu(qla_host_t *ha)
1540 {
1541         qla_cdrp_t      cdrp;
1542         device_t        dev;
1543
1544         dev = ha->pci_dev;
1545
1546         bzero(&cdrp, sizeof(qla_cdrp_t));
1547
1548         cdrp.cmd = Q8_CMD_RD_MAX_MTU;
1549
1550         if (qla_issue_cmd(ha, &cdrp)) {
1551                 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
1552                 return (-1);
1553         } else {
1554                 ha->hw.max_mtu = cdrp.rsp_arg1;
1555                 QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__,
1556                         ha->hw.max_mtu));
1557         }
1558         return 0;
1559 }
1560
1561 /*
1562  * Name: qla_set_max_mtu
1563  * Function:
1564  *      Sets the maximum transfer unit size for the specified rcv context.
1565  */
1566 int
1567 qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1568 {
1569         qla_cdrp_t      cdrp;
1570         device_t        dev;
1571
1572         dev = ha->pci_dev;
1573
1574         bzero(&cdrp, sizeof(qla_cdrp_t));
1575
1576         cdrp.cmd = Q8_CMD_SET_MTU;
1577         cdrp.cmd_arg1 = (uint32_t)cntxt_id;
1578         cdrp.cmd_arg2 = mtu;
1579
1580         if (qla_issue_cmd(ha, &cdrp)) {
1581                 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__);
1582                 return (-1);
1583         } else {
1584                 ha->hw.max_mtu = cdrp.rsp_arg1;
1585         }
1586         return 0;
1587 }
1588
1589 /*
1590  * Name: qla_get_max_lro
1591  * Function: Returns the maximum number of TCP Connection which can be supported
1592  *      with LRO.
1593  */
1594 static int
1595 qla_get_max_lro(qla_host_t *ha)
1596 {
1597         qla_cdrp_t      cdrp;
1598         device_t        dev;
1599
1600         dev = ha->pci_dev;
1601
1602         bzero(&cdrp, sizeof(qla_cdrp_t));
1603
1604         cdrp.cmd = Q8_CMD_RD_MAX_LRO;
1605
1606         if (qla_issue_cmd(ha, &cdrp)) {
1607                 device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__);
1608                 return (-1);
1609         } else {
1610                 ha->hw.max_lro = cdrp.rsp_arg1;
1611                 QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__,
1612                         ha->hw.max_lro));
1613         }
1614         return 0;
1615 }
1616
1617 /*
1618  * Name: qla_get_flow_control
1619  * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for
1620  *      PCI function.
1621  */
1622 static int
1623 qla_get_flow_control(qla_host_t *ha)
1624 {
1625         qla_cdrp_t      cdrp;
1626         device_t        dev;
1627
1628         dev = ha->pci_dev;
1629
1630         bzero(&cdrp, sizeof(qla_cdrp_t));
1631
1632         cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL;
1633
1634         if (qla_issue_cmd(ha, &cdrp)) {
1635                 device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n",
1636                         __func__);
1637                 return (-1);
1638         } else {
1639                 QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__,
1640                         cdrp.rsp_arg1));
1641         }
1642         return 0;
1643 }
1644
1645 /*
1646  * Name: qla_get_flow_control
1647  * Function: Retrieves hardware capabilities
1648  */
1649 void
1650 qla_get_hw_caps(qla_host_t *ha)
1651 {
1652         //qla_read_mac_addr(ha);
1653         qla_get_max_rds(ha);
1654         qla_get_max_sds(ha);
1655         qla_get_max_rules(ha);
1656         qla_get_max_rcv_cntxts(ha);
1657         qla_get_max_tx_cntxts(ha);
1658         qla_get_max_mtu(ha);
1659         qla_get_max_lro(ha);
1660         qla_get_flow_control(ha);
1661         return;
1662 }
1663
1664 /*
1665  * Name: qla_hw_set_multi
1666  * Function: Sets the Multicast Addresses provided the host O.S into the
1667  *      hardware (for the given interface)
1668  */
1669 void
1670 qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt,
1671         uint32_t add_multi)
1672 {
1673         q80_rcv_cntxt_rsp_t     *rsp;
1674         int i;
1675
1676         rsp = ha->hw.rx_cntxt_rsp;
1677         for (i = 0; i < mcnt; i++) {
1678                 qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi);
1679                 mta += Q8_MAC_ADDR_LEN;
1680         }
1681         return;
1682 }
1683
1684 /*
1685  * Name: qla_hw_tx_done_locked
1686  * Function: Handle Transmit Completions
1687  */
1688 static void
1689 qla_hw_tx_done_locked(qla_host_t *ha)
1690 {
1691         qla_tx_buf_t *txb;
1692         qla_hw_t *hw = &ha->hw;
1693         uint32_t comp_idx, comp_count = 0;
1694
1695         /* retrieve index of last entry in tx ring completed */
1696         comp_idx = qla_le32_to_host(*(hw->tx_cons));
1697
1698         while (comp_idx != hw->txr_comp) {
1699                 txb = &ha->tx_buf[hw->txr_comp];
1700
1701                 hw->txr_comp++;
1702                 if (hw->txr_comp == NUM_TX_DESCRIPTORS)
1703                         hw->txr_comp = 0;
1704
1705                 comp_count++;
1706
1707                 if (txb->m_head) {
1708                         bus_dmamap_sync(ha->tx_tag, txb->map,
1709                                 BUS_DMASYNC_POSTWRITE);
1710                         bus_dmamap_unload(ha->tx_tag, txb->map);
1711                         bus_dmamap_destroy(ha->tx_tag, txb->map);
1712                         m_freem(txb->m_head);
1713
1714                         txb->map = (bus_dmamap_t)0;
1715                         txb->m_head = NULL;
1716                 }
1717         }
1718
1719         hw->txr_free += comp_count;
1720
1721         QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__,
1722                 hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000))));
1723
1724         return;
1725 }
1726
1727 /*
1728  * Name: qla_hw_tx_done
1729  * Function: Handle Transmit Completions
1730  */
1731 void
1732 qla_hw_tx_done(qla_host_t *ha)
1733 {
1734         if (!mtx_trylock(&ha->tx_lock)) {
1735                 QL_DPRINT8((ha->pci_dev,
1736                         "%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
1737                 return;
1738         }
1739         qla_hw_tx_done_locked(ha);
1740
1741         if (ha->hw.txr_free > free_pkt_thres)
1742                 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1743
1744         mtx_unlock(&ha->tx_lock);
1745         return;
1746 }
1747
1748 void
1749 qla_update_link_state(qla_host_t *ha)
1750 {
1751         uint32_t link_state;
1752         uint32_t prev_link_state;
1753
1754         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1755                 ha->hw.flags.link_up = 0;
1756                 return;
1757         }
1758         link_state = READ_REG32(ha, Q8_LINK_STATE);
1759
1760         prev_link_state =  ha->hw.flags.link_up;
1761
1762         if (ha->pci_func == 0)
1763                 ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0);
1764         else
1765                 ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
1766
1767         if (prev_link_state !=  ha->hw.flags.link_up) {
1768                 if (ha->hw.flags.link_up) {
1769                         if_link_state_change(ha->ifp, LINK_STATE_UP);
1770                 } else {
1771                         if_link_state_change(ha->ifp, LINK_STATE_DOWN);
1772                 }
1773         }
1774 }
1775
1776 int
1777 qla_config_lro(qla_host_t *ha)
1778 {
1779         int i;
1780         qla_hw_t *hw = &ha->hw;
1781         struct lro_ctrl *lro;
1782
1783         for (i = 0; i < hw->num_sds_rings; i++) {
1784                 lro = &hw->sds[i].lro;
1785                 if (tcp_lro_init(lro)) {
1786                         device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1787                                 __func__);
1788                         return (-1);
1789                 }
1790                 lro->ifp = ha->ifp;
1791         }
1792         ha->flags.lro_init = 1;
1793
1794         QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1795         return (0);
1796 }
1797
1798 void
1799 qla_free_lro(qla_host_t *ha)
1800 {
1801         int i;
1802         qla_hw_t *hw = &ha->hw;
1803         struct lro_ctrl *lro;
1804
1805         if (!ha->flags.lro_init)
1806                 return;
1807
1808         for (i = 0; i < hw->num_sds_rings; i++) {
1809                 lro = &hw->sds[i].lro;
1810                 tcp_lro_free(lro);
1811         }
1812         ha->flags.lro_init = 0;
1813 }
1814
1815 void
1816 qla_hw_stop_rcv(qla_host_t *ha)
1817 {
1818         int i, done, count = 100;
1819
1820         while (count--) {
1821                 done = 1;
1822                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1823                         if (ha->hw.sds[i].rcv_active)
1824                                 done = 0;
1825                 }
1826                 if (done)
1827                         break;
1828                 else 
1829                         qla_mdelay(__func__, 10);
1830         }
1831 }