]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgbe/ql_os.c
MFC r322695:
[FreeBSD/FreeBSD.git] / sys / dev / qlxgbe / ql_os.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include <sys/smp.h>
45
46 /*
47  * Some PCI Configuration Space Related Defines
48  */
49
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC       0x1077
52 #endif
53
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030      0x8030
56 #endif
57
58 #define PCI_QLOGIC_ISP8030 \
59         ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60
61 /*
62  * static functions
63  */
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
74 static void qla_release(qla_host_t *ha);
75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76                 int error);
77 static void qla_stop(qla_host_t *ha);
78 static void qla_get_peer(qla_host_t *ha);
79 static void qla_error_recovery(void *context, int pending);
80 static void qla_async_event(void *context, int pending);
81 static void qla_stats(void *context, int pending);
82 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83                 uint32_t iscsi_pdu);
84
85 /*
86  * Hooks to the Operating Systems
87  */
88 static int qla_pci_probe (device_t);
89 static int qla_pci_attach (device_t);
90 static int qla_pci_detach (device_t);
91
92 static void qla_init(void *arg);
93 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94 static int qla_media_change(struct ifnet *ifp);
95 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96
97 static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98 static void qla_qflush(struct ifnet *ifp);
99 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101 static int qla_create_fp_taskqueues(qla_host_t *ha);
102 static void qla_destroy_fp_taskqueues(qla_host_t *ha);
103 static void qla_drain_fp_taskqueues(qla_host_t *ha);
104
105 static device_method_t qla_pci_methods[] = {
106         /* Device interface */
107         DEVMETHOD(device_probe, qla_pci_probe),
108         DEVMETHOD(device_attach, qla_pci_attach),
109         DEVMETHOD(device_detach, qla_pci_detach),
110         { 0, 0 }
111 };
112
113 static driver_t qla_pci_driver = {
114         "ql", qla_pci_methods, sizeof (qla_host_t),
115 };
116
117 static devclass_t qla83xx_devclass;
118
119 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120
121 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123
124 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125
126 #define QL_STD_REPLENISH_THRES          0
127 #define QL_JUMBO_REPLENISH_THRES        32
128
129
130 static char dev_str[64];
131 static char ver_str[64];
132
133 /*
134  * Name:        qla_pci_probe
135  * Function:    Validate the PCI device to be a QLA80XX device
136  */
137 static int
138 qla_pci_probe(device_t dev)
139 {
140         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141         case PCI_QLOGIC_ISP8030:
142                 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143                         "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144                         QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145                         QLA_VERSION_BUILD);
146                 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147                         QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148                         QLA_VERSION_BUILD);
149                 device_set_desc(dev, dev_str);
150                 break;
151         default:
152                 return (ENXIO);
153         }
154
155         if (bootverbose)
156                 printf("%s: %s\n ", __func__, dev_str);
157
158         return (BUS_PROBE_DEFAULT);
159 }
160
161 static void
162 qla_add_sysctls(qla_host_t *ha)
163 {
164         device_t dev = ha->pci_dev;
165
166         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168                 OID_AUTO, "version", CTLFLAG_RD,
169                 ver_str, 0, "Driver Version");
170
171         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
172                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173                 OID_AUTO, "fw_version", CTLFLAG_RD,
174                 ha->fw_ver_str, 0, "firmware version");
175
176         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
177                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
179                 (void *)ha, 0,
180                 qla_sysctl_get_link_status, "I", "Link Status");
181
182         ha->dbg_level = 0;
183         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                 OID_AUTO, "debug", CTLFLAG_RW,
186                 &ha->dbg_level, ha->dbg_level, "Debug Level");
187
188         ha->enable_minidump = 1;
189         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191                 OID_AUTO, "enable_minidump", CTLFLAG_RW,
192                 &ha->enable_minidump, ha->enable_minidump,
193                 "Minidump retrival is enabled only when this is set");
194
195         ha->std_replenish = QL_STD_REPLENISH_THRES;
196         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
197                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
198                 OID_AUTO, "std_replenish", CTLFLAG_RW,
199                 &ha->std_replenish, ha->std_replenish,
200                 "Threshold for Replenishing Standard Frames");
201
202         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
203                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
204                 OID_AUTO, "ipv4_lro",
205                 CTLFLAG_RD, &ha->ipv4_lro,
206                 "number of ipv4 lro completions");
207
208         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
209                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210                 OID_AUTO, "ipv6_lro",
211                 CTLFLAG_RD, &ha->ipv6_lro,
212                 "number of ipv6 lro completions");
213
214         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
215                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
216                 OID_AUTO, "tx_tso_frames",
217                 CTLFLAG_RD, &ha->tx_tso_frames,
218                 "number of Tx TSO Frames");
219
220         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
221                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
222                 OID_AUTO, "hw_vlan_tx_frames",
223                 CTLFLAG_RD, &ha->hw_vlan_tx_frames,
224                 "number of Tx VLAN Frames");
225
226         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
227                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228                 OID_AUTO, "hw_lock_failed",
229                 CTLFLAG_RD, &ha->hw_lock_failed,
230                 "number of hw_lock failures");
231
232         return;
233 }
234
235 static void
236 qla_watchdog(void *arg)
237 {
238         qla_host_t *ha = arg;
239         qla_hw_t *hw;
240         struct ifnet *ifp;
241
242         hw = &ha->hw;
243         ifp = ha->ifp;
244
245         if (ha->qla_watchdog_exit) {
246                 ha->qla_watchdog_exited = 1;
247                 return;
248         }
249         ha->qla_watchdog_exited = 0;
250
251         if (!ha->qla_watchdog_pause) {
252                 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
253                         (ha->msg_from_peer == QL_PEER_MSG_RESET)) {
254
255                         if (!(ha->dbg_level & 0x8000)) {
256                                 ha->qla_watchdog_paused = 1;
257                                 ha->qla_watchdog_pause = 1;
258                                 ha->qla_initiate_recovery = 0;
259                                 ha->err_inject = 0;
260                                 device_printf(ha->pci_dev,
261                                         "%s: taskqueue_enqueue(err_task) \n",
262                                         __func__);
263                                 taskqueue_enqueue(ha->err_tq, &ha->err_task);
264                                 return;
265                         }
266
267                 } else if (ha->qla_interface_up) {
268
269                         ha->watchdog_ticks++;
270
271                         if (ha->watchdog_ticks > 1000)
272                                 ha->watchdog_ticks = 0;
273
274                         if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
275                                 taskqueue_enqueue(ha->stats_tq, &ha->stats_task);
276                         }
277
278                         if (ha->async_event) {
279                                 taskqueue_enqueue(ha->async_event_tq,
280                                         &ha->async_event_task);
281                         }
282
283 #if 0
284                         for (i = 0; ((i < ha->hw.num_sds_rings) &&
285                                         !ha->watchdog_ticks); i++) {
286                                 qla_tx_fp_t *fp = &ha->tx_fp[i];
287
288                                 if (fp->fp_taskqueue != NULL)
289                                         taskqueue_enqueue(fp->fp_taskqueue,
290                                                 &fp->fp_task);
291                         }
292 #endif
293                         ha->qla_watchdog_paused = 0;
294                 } else {
295                         ha->qla_watchdog_paused = 0;
296                 }
297         } else {
298                 ha->qla_watchdog_paused = 1;
299         }
300
301         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
302                 qla_watchdog, ha);
303 }
304
305 /*
306  * Name:        qla_pci_attach
307  * Function:    attaches the device to the operating system
308  */
309 static int
310 qla_pci_attach(device_t dev)
311 {
312         qla_host_t *ha = NULL;
313         uint32_t rsrc_len;
314         int i;
315         uint32_t num_rcvq = 0;
316
317         if ((ha = device_get_softc(dev)) == NULL) {
318                 device_printf(dev, "cannot get softc\n");
319                 return (ENOMEM);
320         }
321
322         memset(ha, 0, sizeof (qla_host_t));
323
324         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
325                 device_printf(dev, "device is not ISP8030\n");
326                 return (ENXIO);
327         }
328
329         ha->pci_func = pci_get_function(dev) & 0x1;
330
331         ha->pci_dev = dev;
332
333         pci_enable_busmaster(dev);
334
335         ha->reg_rid = PCIR_BAR(0);
336         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
337                                 RF_ACTIVE);
338
339         if (ha->pci_reg == NULL) {
340                 device_printf(dev, "unable to map any ports\n");
341                 goto qla_pci_attach_err;
342         }
343
344         rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
345                                         ha->reg_rid);
346
347         mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
348         ha->flags.lock_init = 1;
349
350         qla_add_sysctls(ha);
351
352         ha->hw.num_sds_rings = MAX_SDS_RINGS;
353         ha->hw.num_rds_rings = MAX_RDS_RINGS;
354         ha->hw.num_tx_rings = NUM_TX_RINGS;
355
356         ha->reg_rid1 = PCIR_BAR(2);
357         ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
358                         &ha->reg_rid1, RF_ACTIVE);
359
360         ha->msix_count = pci_msix_count(dev);
361
362         if (ha->msix_count < 1 ) {
363                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
364                         ha->msix_count);
365                 goto qla_pci_attach_err;
366         }
367
368         if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
369                 ha->hw.num_sds_rings = ha->msix_count - 1;
370         }
371
372         QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
373                 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
374                 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
375                 ha->pci_reg1));
376
377         /* initialize hardware */
378         if (ql_init_hw(ha)) {
379                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
380                 goto qla_pci_attach_err;
381         }
382
383         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
384                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
385                 ha->fw_ver_build);
386         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
387                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
388                         ha->fw_ver_build);
389
390         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
391                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
392                         __func__);
393                 goto qla_pci_attach_err;
394         }
395         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
396                 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
397                 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
398                 ha->pci_reg, ha->pci_reg1, num_rcvq);
399
400         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
401                 if (ha->hw.num_sds_rings > 15) {
402                         ha->hw.num_sds_rings = 15;
403                 }
404         }
405
406         ha->hw.num_rds_rings = ha->hw.num_sds_rings;
407         ha->hw.num_tx_rings = ha->hw.num_sds_rings;
408
409 #ifdef QL_ENABLE_ISCSI_TLV
410         ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
411 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
412
413         ql_hw_add_sysctls(ha);
414
415         ha->msix_count = ha->hw.num_sds_rings + 1;
416
417         if (pci_alloc_msix(dev, &ha->msix_count)) {
418                 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
419                         ha->msix_count);
420                 ha->msix_count = 0;
421                 goto qla_pci_attach_err;
422         }
423
424         ha->mbx_irq_rid = 1;
425         ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
426                                 &ha->mbx_irq_rid,
427                                 (RF_ACTIVE | RF_SHAREABLE));
428         if (ha->mbx_irq == NULL) {
429                 device_printf(dev, "could not allocate mbx interrupt\n");
430                 goto qla_pci_attach_err;
431         }
432         if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
433                 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
434                 device_printf(dev, "could not setup mbx interrupt\n");
435                 goto qla_pci_attach_err;
436         }
437
438         for (i = 0; i < ha->hw.num_sds_rings; i++) {
439                 ha->irq_vec[i].sds_idx = i;
440                 ha->irq_vec[i].ha = ha;
441                 ha->irq_vec[i].irq_rid = 2 + i;
442
443                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
444                                 &ha->irq_vec[i].irq_rid,
445                                 (RF_ACTIVE | RF_SHAREABLE));
446
447                 if (ha->irq_vec[i].irq == NULL) {
448                         device_printf(dev, "could not allocate interrupt\n");
449                         goto qla_pci_attach_err;
450                 }
451                 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
452                         (INTR_TYPE_NET | INTR_MPSAFE),
453                         NULL, ql_isr, &ha->irq_vec[i],
454                         &ha->irq_vec[i].handle)) {
455                         device_printf(dev, "could not setup interrupt\n");
456                         goto qla_pci_attach_err;
457                 }
458
459                 ha->tx_fp[i].ha = ha;
460                 ha->tx_fp[i].txr_idx = i;
461
462                 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
463                         device_printf(dev, "%s: could not allocate tx_br[%d]\n",
464                                 __func__, i);
465                         goto qla_pci_attach_err;
466                 }
467         }
468
469         if (qla_create_fp_taskqueues(ha) != 0)
470                 goto qla_pci_attach_err;
471
472         printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
473                 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
474
475         ql_read_mac_addr(ha);
476
477         /* allocate parent dma tag */
478         if (qla_alloc_parent_dma_tag(ha)) {
479                 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
480                         __func__);
481                 goto qla_pci_attach_err;
482         }
483
484         /* alloc all dma buffers */
485         if (ql_alloc_dma(ha)) {
486                 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
487                 goto qla_pci_attach_err;
488         }
489         qla_get_peer(ha);
490
491         if (ql_minidump_init(ha) != 0) {
492                 device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
493                 goto qla_pci_attach_err;
494         }
495         ql_alloc_drvr_state_buffer(ha);
496         /* create the o.s ethernet interface */
497         qla_init_ifnet(dev, ha);
498
499         ha->flags.qla_watchdog_active = 1;
500         ha->qla_watchdog_pause = 0;
501
502         callout_init(&ha->tx_callout, TRUE);
503         ha->flags.qla_callout_init = 1;
504
505         /* create ioctl device interface */
506         if (ql_make_cdev(ha)) {
507                 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
508                 goto qla_pci_attach_err;
509         }
510
511         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
512                 qla_watchdog, ha);
513
514         TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
515         ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
516                         taskqueue_thread_enqueue, &ha->err_tq);
517         taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
518                 device_get_nameunit(ha->pci_dev));
519
520         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
521         ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
522                         taskqueue_thread_enqueue, &ha->async_event_tq);
523         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
524                 device_get_nameunit(ha->pci_dev));
525
526         TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
527         ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
528                         taskqueue_thread_enqueue, &ha->stats_tq);
529         taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
530                 device_get_nameunit(ha->pci_dev));
531
532         QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
533         return (0);
534
535 qla_pci_attach_err:
536
537         qla_release(ha);
538
539         if (ha->flags.lock_init) {
540                 mtx_destroy(&ha->hw_lock);
541         }
542
543         QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
544         return (ENXIO);
545 }
546
547 /*
548  * Name:        qla_pci_detach
549  * Function:    Unhooks the device from the operating system
550  */
551 static int
552 qla_pci_detach(device_t dev)
553 {
554         qla_host_t *ha = NULL;
555         struct ifnet *ifp;
556
557
558         if ((ha = device_get_softc(dev)) == NULL) {
559                 device_printf(dev, "cannot get softc\n");
560                 return (ENOMEM);
561         }
562
563         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
564
565         ifp = ha->ifp;
566
567         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
568         QLA_LOCK(ha, __func__, -1, 0);
569
570         ha->qla_detach_active = 1;
571         qla_stop(ha);
572
573         qla_release(ha);
574
575         QLA_UNLOCK(ha, __func__);
576
577         if (ha->flags.lock_init) {
578                 mtx_destroy(&ha->hw_lock);
579         }
580
581         QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
582
583         return (0);
584 }
585
586 /*
587  * SYSCTL Related Callbacks
588  */
589 static int
590 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
591 {
592         int err, ret = 0;
593         qla_host_t *ha;
594
595         err = sysctl_handle_int(oidp, &ret, 0, req);
596
597         if (err || !req->newptr)
598                 return (err);
599
600         if (ret == 1) {
601                 ha = (qla_host_t *)arg1;
602                 ql_hw_link_status(ha);
603         }
604         return (err);
605 }
606
607 /*
608  * Name:        qla_release
609  * Function:    Releases the resources allocated for the device
610  */
611 static void
612 qla_release(qla_host_t *ha)
613 {
614         device_t dev;
615         int i;
616
617         dev = ha->pci_dev;
618
619         if (ha->async_event_tq) {
620                 taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
621                 taskqueue_free(ha->async_event_tq);
622         }
623
624         if (ha->err_tq) {
625                 taskqueue_drain(ha->err_tq, &ha->err_task);
626                 taskqueue_free(ha->err_tq);
627         }
628
629         if (ha->stats_tq) {
630                 taskqueue_drain(ha->stats_tq, &ha->stats_task);
631                 taskqueue_free(ha->stats_tq);
632         }
633
634         ql_del_cdev(ha);
635
636         if (ha->flags.qla_watchdog_active) {
637                 ha->qla_watchdog_exit = 1;
638
639                 while (ha->qla_watchdog_exited == 0)
640                         qla_mdelay(__func__, 1);
641         }
642
643         if (ha->flags.qla_callout_init)
644                 callout_stop(&ha->tx_callout);
645
646         if (ha->ifp != NULL)
647                 ether_ifdetach(ha->ifp);
648
649         ql_free_drvr_state_buffer(ha);
650         ql_free_dma(ha); 
651         qla_free_parent_dma_tag(ha);
652
653         if (ha->mbx_handle)
654                 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
655
656         if (ha->mbx_irq)
657                 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
658                                 ha->mbx_irq);
659
660         for (i = 0; i < ha->hw.num_sds_rings; i++) {
661
662                 if (ha->irq_vec[i].handle) {
663                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
664                                         ha->irq_vec[i].handle);
665                 }
666                         
667                 if (ha->irq_vec[i].irq) {
668                         (void)bus_release_resource(dev, SYS_RES_IRQ,
669                                 ha->irq_vec[i].irq_rid,
670                                 ha->irq_vec[i].irq);
671                 }
672
673                 qla_free_tx_br(ha, &ha->tx_fp[i]);
674         }
675         qla_destroy_fp_taskqueues(ha);
676
677         if (ha->msix_count)
678                 pci_release_msi(dev);
679
680 //      if (ha->flags.lock_init) {
681 //              mtx_destroy(&ha->hw_lock);
682 //      }
683
684         if (ha->pci_reg)
685                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
686                                 ha->pci_reg);
687
688         if (ha->pci_reg1)
689                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
690                                 ha->pci_reg1);
691
692         return;
693 }
694
695 /*
696  * DMA Related Functions
697  */
698
699 static void
700 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
701 {
702         *((bus_addr_t *)arg) = 0;
703
704         if (error) {
705                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
706                 return;
707         }
708
709         *((bus_addr_t *)arg) = segs[0].ds_addr;
710
711         return;
712 }
713
714 int
715 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
716 {
717         int             ret = 0;
718         device_t        dev;
719         bus_addr_t      b_addr;
720
721         dev = ha->pci_dev;
722
723         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
724
725         ret = bus_dma_tag_create(
726                         ha->parent_tag,/* parent */
727                         dma_buf->alignment,
728                         ((bus_size_t)(1ULL << 32)),/* boundary */
729                         BUS_SPACE_MAXADDR,      /* lowaddr */
730                         BUS_SPACE_MAXADDR,      /* highaddr */
731                         NULL, NULL,             /* filter, filterarg */
732                         dma_buf->size,          /* maxsize */
733                         1,                      /* nsegments */
734                         dma_buf->size,          /* maxsegsize */
735                         0,                      /* flags */
736                         NULL, NULL,             /* lockfunc, lockarg */
737                         &dma_buf->dma_tag);
738
739         if (ret) {
740                 device_printf(dev, "%s: could not create dma tag\n", __func__);
741                 goto ql_alloc_dmabuf_exit;
742         }
743         ret = bus_dmamem_alloc(dma_buf->dma_tag,
744                         (void **)&dma_buf->dma_b,
745                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
746                         &dma_buf->dma_map);
747         if (ret) {
748                 bus_dma_tag_destroy(dma_buf->dma_tag);
749                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
750                 goto ql_alloc_dmabuf_exit;
751         }
752
753         ret = bus_dmamap_load(dma_buf->dma_tag,
754                         dma_buf->dma_map,
755                         dma_buf->dma_b,
756                         dma_buf->size,
757                         qla_dmamap_callback,
758                         &b_addr, BUS_DMA_NOWAIT);
759
760         if (ret || !b_addr) {
761                 bus_dma_tag_destroy(dma_buf->dma_tag);
762                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
763                         dma_buf->dma_map);
764                 ret = -1;
765                 goto ql_alloc_dmabuf_exit;
766         }
767
768         dma_buf->dma_addr = b_addr;
769
770 ql_alloc_dmabuf_exit:
771         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
772                 __func__, ret, (void *)dma_buf->dma_tag,
773                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
774                 dma_buf->size));
775
776         return ret;
777 }
778
779 void
780 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
781 {
782         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 
783         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
784         bus_dma_tag_destroy(dma_buf->dma_tag);
785 }
786
787 static int
788 qla_alloc_parent_dma_tag(qla_host_t *ha)
789 {
790         int             ret;
791         device_t        dev;
792
793         dev = ha->pci_dev;
794
795         /*
796          * Allocate parent DMA Tag
797          */
798         ret = bus_dma_tag_create(
799                         bus_get_dma_tag(dev),   /* parent */
800                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
801                         BUS_SPACE_MAXADDR,      /* lowaddr */
802                         BUS_SPACE_MAXADDR,      /* highaddr */
803                         NULL, NULL,             /* filter, filterarg */
804                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
805                         0,                      /* nsegments */
806                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
807                         0,                      /* flags */
808                         NULL, NULL,             /* lockfunc, lockarg */
809                         &ha->parent_tag);
810
811         if (ret) {
812                 device_printf(dev, "%s: could not create parent dma tag\n",
813                         __func__);
814                 return (-1);
815         }
816
817         ha->flags.parent_tag = 1;
818         
819         return (0);
820 }
821
822 static void
823 qla_free_parent_dma_tag(qla_host_t *ha)
824 {
825         if (ha->flags.parent_tag) {
826                 bus_dma_tag_destroy(ha->parent_tag);
827                 ha->flags.parent_tag = 0;
828         }
829 }
830
831 /*
832  * Name: qla_init_ifnet
833  * Function: Creates the Network Device Interface and Registers it with the O.S
834  */
835
836 static void
837 qla_init_ifnet(device_t dev, qla_host_t *ha)
838 {
839         struct ifnet *ifp;
840
841         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
842
843         ifp = ha->ifp = if_alloc(IFT_ETHER);
844
845         if (ifp == NULL)
846                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
847
848         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
849
850         ifp->if_baudrate = IF_Gbps(10);
851         ifp->if_capabilities = IFCAP_LINKSTATE;
852         ifp->if_mtu = ETHERMTU;
853
854         ifp->if_init = qla_init;
855         ifp->if_softc = ha;
856         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
857         ifp->if_ioctl = qla_ioctl;
858
859         ifp->if_transmit = qla_transmit;
860         ifp->if_qflush = qla_qflush;
861
862         IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
863         ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
864         IFQ_SET_READY(&ifp->if_snd);
865
866         ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
867
868         ether_ifattach(ifp, qla_get_mac_addr(ha));
869
870         ifp->if_capabilities |= IFCAP_HWCSUM |
871                                 IFCAP_TSO4 |
872                                 IFCAP_JUMBO_MTU |
873                                 IFCAP_VLAN_HWTAGGING |
874                                 IFCAP_VLAN_MTU |
875                                 IFCAP_VLAN_HWTSO |
876                                 IFCAP_LRO;
877
878         ifp->if_capenable = ifp->if_capabilities;
879
880         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
881
882         ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
883
884         ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
885                 NULL);
886         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
887
888         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
889
890         QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
891
892         return;
893 }
894
895 static void
896 qla_init_locked(qla_host_t *ha)
897 {
898         struct ifnet *ifp = ha->ifp;
899
900         qla_stop(ha);
901
902         if (qla_alloc_xmt_bufs(ha) != 0) 
903                 return;
904
905         qla_confirm_9kb_enable(ha);
906
907         if (qla_alloc_rcv_bufs(ha) != 0)
908                 return;
909
910         bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
911
912         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
913         ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
914
915         ha->stop_rcv = 0;
916         if (ql_init_hw_if(ha) == 0) {
917                 ifp = ha->ifp;
918                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
919                 ha->qla_watchdog_pause = 0;
920                 ha->hw_vlan_tx_frames = 0;
921                 ha->tx_tso_frames = 0;
922                 ha->qla_interface_up = 1;
923                 ql_update_link_state(ha);
924         }
925
926         return;
927 }
928
929 static void
930 qla_init(void *arg)
931 {
932         qla_host_t *ha;
933
934         ha = (qla_host_t *)arg;
935
936         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
937
938         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
939                 return;
940
941         qla_init_locked(ha);
942
943         QLA_UNLOCK(ha, __func__);
944
945         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
946 }
947
948 static int
949 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
950 {
951         uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
952         struct ifmultiaddr *ifma;
953         int mcnt = 0;
954         struct ifnet *ifp = ha->ifp;
955         int ret = 0;
956
957         if_maddr_rlock(ifp);
958
959         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
960
961                 if (ifma->ifma_addr->sa_family != AF_LINK)
962                         continue;
963
964                 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
965                         break;
966
967                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
968                         &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
969
970                 mcnt++;
971         }
972
973         if_maddr_runlock(ifp);
974
975         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
976                 QLA_LOCK_NO_SLEEP) != 0)
977                 return (-1);
978
979         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980
981                 if (!add_multi) {
982                         ret = qla_hw_del_all_mcast(ha);
983
984                         if (ret)
985                                 device_printf(ha->pci_dev,
986                                         "%s: qla_hw_del_all_mcast() failed\n",
987                                 __func__);
988                 }
989
990                 if (!ret)
991                         ret = ql_hw_set_multi(ha, mta, mcnt, 1);
992
993         }
994
995         QLA_UNLOCK(ha, __func__);
996
997         return (ret);
998 }
999
1000 static int
1001 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1002 {
1003         int ret = 0;
1004         struct ifreq *ifr = (struct ifreq *)data;
1005         struct ifaddr *ifa = (struct ifaddr *)data;
1006         qla_host_t *ha;
1007
1008         ha = (qla_host_t *)ifp->if_softc;
1009
1010         switch (cmd) {
1011         case SIOCSIFADDR:
1012                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1013                         __func__, cmd));
1014
1015                 if (ifa->ifa_addr->sa_family == AF_INET) {
1016
1017                         ret = QLA_LOCK(ha, __func__,
1018                                         QLA_LOCK_DEFAULT_MS_TIMEOUT,
1019                                         QLA_LOCK_NO_SLEEP);
1020                         if (ret)
1021                                 break;
1022
1023                         ifp->if_flags |= IFF_UP;
1024
1025                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1026                                 qla_init_locked(ha);
1027                         }
1028
1029                         QLA_UNLOCK(ha, __func__);
1030                         QL_DPRINT4(ha, (ha->pci_dev,
1031                                 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1032                                 __func__, cmd,
1033                                 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1034
1035                         arp_ifinit(ifp, ifa);
1036                 } else {
1037                         ether_ioctl(ifp, cmd, data);
1038                 }
1039                 break;
1040
1041         case SIOCSIFMTU:
1042                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1043                         __func__, cmd));
1044
1045                 if (ifr->ifr_mtu > QLA_MAX_MTU) {
1046                         ret = EINVAL;
1047                 } else {
1048                         ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1049                                         QLA_LOCK_NO_SLEEP);
1050
1051                         if (ret)
1052                                 break;
1053
1054                         ifp->if_mtu = ifr->ifr_mtu;
1055                         ha->max_frame_size =
1056                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1057
1058                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1059                                 qla_init_locked(ha);
1060                         }
1061
1062                         if (ifp->if_mtu > ETHERMTU)
1063                                 ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1064                         else
1065                                 ha->std_replenish = QL_STD_REPLENISH_THRES;
1066                                 
1067
1068                         QLA_UNLOCK(ha, __func__);
1069                 }
1070
1071                 break;
1072
1073         case SIOCSIFFLAGS:
1074                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1075                         __func__, cmd));
1076
1077                 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1078                                 QLA_LOCK_NO_SLEEP);
1079
1080                 if (ret)
1081                         break;
1082
1083                 if (ifp->if_flags & IFF_UP) {
1084
1085                         ha->max_frame_size = ifp->if_mtu +
1086                                         ETHER_HDR_LEN + ETHER_CRC_LEN;
1087                         qla_init_locked(ha);
1088                                                 
1089                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1090                                 if ((ifp->if_flags ^ ha->if_flags) &
1091                                         IFF_PROMISC) {
1092                                         ret = ql_set_promisc(ha);
1093                                 } else if ((ifp->if_flags ^ ha->if_flags) &
1094                                         IFF_ALLMULTI) {
1095                                         ret = ql_set_allmulti(ha);
1096                                 }
1097                         }
1098                 } else {
1099                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1100                                 qla_stop(ha);
1101                         ha->if_flags = ifp->if_flags;
1102                 }
1103
1104                 QLA_UNLOCK(ha, __func__);
1105                 break;
1106
1107         case SIOCADDMULTI:
1108                 QL_DPRINT4(ha, (ha->pci_dev,
1109                         "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1110
1111                 if (qla_set_multi(ha, 1))
1112                         ret = EINVAL;
1113                 break;
1114
1115         case SIOCDELMULTI:
1116                 QL_DPRINT4(ha, (ha->pci_dev,
1117                         "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1118
1119                 if (qla_set_multi(ha, 0))
1120                         ret = EINVAL;
1121                 break;
1122
1123         case SIOCSIFMEDIA:
1124         case SIOCGIFMEDIA:
1125                 QL_DPRINT4(ha, (ha->pci_dev,
1126                         "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1127                         __func__, cmd));
1128                 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1129                 break;
1130
1131         case SIOCSIFCAP:
1132         {
1133                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1134
1135                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1136                         __func__, cmd));
1137
1138                 if (mask & IFCAP_HWCSUM)
1139                         ifp->if_capenable ^= IFCAP_HWCSUM;
1140                 if (mask & IFCAP_TSO4)
1141                         ifp->if_capenable ^= IFCAP_TSO4;
1142                 if (mask & IFCAP_TSO6)
1143                         ifp->if_capenable ^= IFCAP_TSO6;
1144                 if (mask & IFCAP_VLAN_HWTAGGING)
1145                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1146                 if (mask & IFCAP_VLAN_HWTSO)
1147                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1148                 if (mask & IFCAP_LRO)
1149                         ifp->if_capenable ^= IFCAP_LRO;
1150
1151                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1152                         ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1153                                 QLA_LOCK_NO_SLEEP);
1154
1155                         if (ret)
1156                                 break;
1157
1158                         qla_init_locked(ha);
1159
1160                         QLA_UNLOCK(ha, __func__);
1161
1162                 }
1163                 VLAN_CAPABILITIES(ifp);
1164                 break;
1165         }
1166
1167         default:
1168                 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1169                         __func__, cmd));
1170                 ret = ether_ioctl(ifp, cmd, data);
1171                 break;
1172         }
1173
1174         return (ret);
1175 }
1176
1177 static int
1178 qla_media_change(struct ifnet *ifp)
1179 {
1180         qla_host_t *ha;
1181         struct ifmedia *ifm;
1182         int ret = 0;
1183
1184         ha = (qla_host_t *)ifp->if_softc;
1185
1186         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1187
1188         ifm = &ha->media;
1189
1190         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1191                 ret = EINVAL;
1192
1193         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1194
1195         return (ret);
1196 }
1197
1198 static void
1199 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1200 {
1201         qla_host_t *ha;
1202
1203         ha = (qla_host_t *)ifp->if_softc;
1204
1205         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1206
1207         ifmr->ifm_status = IFM_AVALID;
1208         ifmr->ifm_active = IFM_ETHER;
1209         
1210         ql_update_link_state(ha);
1211         if (ha->hw.link_up) {
1212                 ifmr->ifm_status |= IFM_ACTIVE;
1213                 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1214         }
1215
1216         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1217                 (ha->hw.link_up ? "link_up" : "link_down")));
1218
1219         return;
1220 }
1221
1222
1223 static int
1224 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1225         uint32_t iscsi_pdu)
1226 {
1227         bus_dma_segment_t       segs[QLA_MAX_SEGMENTS];
1228         bus_dmamap_t            map;
1229         int                     nsegs;
1230         int                     ret = -1;
1231         uint32_t                tx_idx;
1232         struct mbuf             *m_head = *m_headp;
1233
1234         QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1235
1236         tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1237
1238         if (NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) {
1239                 QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1240                         "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1241                         ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1242                 if (m_head)
1243                         m_freem(m_head);
1244                 *m_headp = NULL;
1245                 return (ret);
1246         }
1247
1248         map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1249
1250         ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1251                         BUS_DMA_NOWAIT);
1252
1253         if (ret == EFBIG) {
1254
1255                 struct mbuf *m;
1256
1257                 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1258                         m_head->m_pkthdr.len));
1259
1260                 m = m_defrag(m_head, M_NOWAIT);
1261                 if (m == NULL) {
1262                         ha->err_tx_defrag++;
1263                         m_freem(m_head);
1264                         *m_headp = NULL;
1265                         device_printf(ha->pci_dev,
1266                                 "%s: m_defrag() = NULL [%d]\n",
1267                                 __func__, ret);
1268                         return (ENOBUFS);
1269                 }
1270                 m_head = m;
1271                 *m_headp = m_head;
1272
1273                 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1274                                         segs, &nsegs, BUS_DMA_NOWAIT))) {
1275
1276                         ha->err_tx_dmamap_load++;
1277
1278                         device_printf(ha->pci_dev,
1279                                 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1280                                 __func__, ret, m_head->m_pkthdr.len);
1281
1282                         if (ret != ENOMEM) {
1283                                 m_freem(m_head);
1284                                 *m_headp = NULL;
1285                         }
1286                         return (ret);
1287                 }
1288
1289         } else if (ret) {
1290
1291                 ha->err_tx_dmamap_load++;
1292
1293                 device_printf(ha->pci_dev,
1294                         "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1295                         __func__, ret, m_head->m_pkthdr.len);
1296
1297                 if (ret != ENOMEM) {
1298                         m_freem(m_head);
1299                         *m_headp = NULL;
1300                 }
1301                 return (ret);
1302         }
1303
1304         QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1305
1306         bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1307
1308         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1309                                 iscsi_pdu))) {
1310                 ha->tx_ring[txr_idx].count++;
1311                 if (iscsi_pdu)
1312                         ha->tx_ring[txr_idx].iscsi_pkt_count++;
1313                 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1314         } else {
1315                 bus_dmamap_unload(ha->tx_tag, map); 
1316                 if (ret == EINVAL) {
1317                         if (m_head)
1318                                 m_freem(m_head);
1319                         *m_headp = NULL;
1320                 }
1321         }
1322
1323         QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1324         return (ret);
1325 }
1326
1327 static int
1328 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1329 {
1330         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1331                 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1332
1333         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1334
1335         fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1336                                    M_NOWAIT, &fp->tx_mtx);
1337         if (fp->tx_br == NULL) {
1338             QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1339                 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1340             return (-ENOMEM);
1341         }
1342         return 0;
1343 }
1344
1345 static void
1346 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1347 {
1348         struct mbuf *mp;
1349         struct ifnet *ifp = ha->ifp;
1350
1351         if (mtx_initialized(&fp->tx_mtx)) {
1352
1353                 if (fp->tx_br != NULL) {
1354
1355                         mtx_lock(&fp->tx_mtx);
1356
1357                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1358                                 m_freem(mp);
1359                         }
1360
1361                         mtx_unlock(&fp->tx_mtx);
1362
1363                         buf_ring_free(fp->tx_br, M_DEVBUF);
1364                         fp->tx_br = NULL;
1365                 }
1366                 mtx_destroy(&fp->tx_mtx);
1367         }
1368         return;
1369 }
1370
1371 static void
1372 qla_fp_taskqueue(void *context, int pending)
1373 {
1374         qla_tx_fp_t *fp;
1375         qla_host_t *ha;
1376         struct ifnet *ifp;
1377         struct mbuf  *mp;
1378         int ret;
1379         uint32_t txr_idx;
1380         uint32_t iscsi_pdu = 0;
1381         uint32_t rx_pkts_left = -1;
1382
1383         fp = context;
1384
1385         if (fp == NULL)
1386                 return;
1387
1388         ha = (qla_host_t *)fp->ha;
1389
1390         ifp = ha->ifp;
1391
1392         txr_idx = fp->txr_idx;
1393
1394         mtx_lock(&fp->tx_mtx);
1395
1396         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1397                 mtx_unlock(&fp->tx_mtx);
1398                 goto qla_fp_taskqueue_exit;
1399         }
1400
1401         while (rx_pkts_left && !ha->stop_rcv &&
1402                 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1403                 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1404
1405 #ifdef QL_ENABLE_ISCSI_TLV
1406                 ql_hw_tx_done_locked(ha, fp->txr_idx);
1407                 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1408 #else
1409                 ql_hw_tx_done_locked(ha, fp->txr_idx);
1410 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1411
1412                 mp = drbr_peek(ifp, fp->tx_br);
1413
1414                 while (mp != NULL) {
1415
1416                         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1417 #ifdef QL_ENABLE_ISCSI_TLV
1418                                 if (ql_iscsi_pdu(ha, mp) == 0) {
1419                                         txr_idx = txr_idx +
1420                                                 (ha->hw.num_tx_rings >> 1);
1421                                         iscsi_pdu = 1;
1422                                 } else {
1423                                         iscsi_pdu = 0;
1424                                         txr_idx = fp->txr_idx;
1425                                 }
1426 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1427                         }
1428
1429                         ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1430
1431                         if (ret) {
1432                                 if (mp != NULL)
1433                                         drbr_putback(ifp, fp->tx_br, mp);
1434                                 else {
1435                                         drbr_advance(ifp, fp->tx_br);
1436                                 }
1437
1438                                 mtx_unlock(&fp->tx_mtx);
1439
1440                                 goto qla_fp_taskqueue_exit0;
1441                         } else {
1442                                 drbr_advance(ifp, fp->tx_br);
1443                         }
1444
1445                         /* Send a copy of the frame to the BPF listener */
1446                         ETHER_BPF_MTAP(ifp, mp);
1447                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1448                                 break;
1449
1450                         mp = drbr_peek(ifp, fp->tx_br);
1451                 }
1452         }
1453         mtx_unlock(&fp->tx_mtx);
1454
1455 qla_fp_taskqueue_exit0:
1456
1457         if (rx_pkts_left || ((mp != NULL) && ret)) {
1458                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1459         } else {
1460                 if (!ha->stop_rcv) {
1461                         QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1462                 }
1463         }
1464
1465 qla_fp_taskqueue_exit:
1466
1467         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1468         return;
1469 }
1470
1471 static int
1472 qla_create_fp_taskqueues(qla_host_t *ha)
1473 {
1474         int     i;
1475         uint8_t tq_name[32];
1476
1477         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1478
1479                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1480
1481                 bzero(tq_name, sizeof (tq_name));
1482                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1483
1484                 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1485
1486                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1487                                         taskqueue_thread_enqueue,
1488                                         &fp->fp_taskqueue);
1489
1490                 if (fp->fp_taskqueue == NULL)
1491                         return (-1);
1492
1493                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1494                         tq_name);
1495
1496                 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1497                         fp->fp_taskqueue));
1498         }
1499
1500         return (0);
1501 }
1502
1503 static void
1504 qla_destroy_fp_taskqueues(qla_host_t *ha)
1505 {
1506         int     i;
1507
1508         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1509
1510                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1511
1512                 if (fp->fp_taskqueue != NULL) {
1513                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1514                         taskqueue_free(fp->fp_taskqueue);
1515                         fp->fp_taskqueue = NULL;
1516                 }
1517         }
1518         return;
1519 }
1520
1521 static void
1522 qla_drain_fp_taskqueues(qla_host_t *ha)
1523 {
1524         int     i;
1525
1526         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1527                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1528
1529                 if (fp->fp_taskqueue != NULL) {
1530                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1531                 }
1532         }
1533         return;
1534 }
1535
1536 static int
1537 qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1538 {
1539         qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1540         qla_tx_fp_t *fp;
1541         int rss_id = 0;
1542         int ret = 0;
1543
1544         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1545
1546 #if __FreeBSD_version >= 1100000
1547         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1548 #else
1549         if (mp->m_flags & M_FLOWID)
1550 #endif
1551                 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1552                                         ha->hw.num_sds_rings;
1553         fp = &ha->tx_fp[rss_id];
1554
1555         if (fp->tx_br == NULL) {
1556                 ret = EINVAL;
1557                 goto qla_transmit_exit;
1558         }
1559
1560         if (mp != NULL) {
1561                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
1562         }
1563
1564         if (fp->fp_taskqueue != NULL)
1565                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1566
1567         ret = 0;
1568
1569 qla_transmit_exit:
1570
1571         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1572         return ret;
1573 }
1574
1575 static void
1576 qla_qflush(struct ifnet *ifp)
1577 {
1578         int                     i;
1579         qla_tx_fp_t             *fp;
1580         struct mbuf             *mp;
1581         qla_host_t              *ha;
1582
1583         ha = (qla_host_t *)ifp->if_softc;
1584
1585         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1586
1587         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1588
1589                 fp = &ha->tx_fp[i];
1590
1591                 if (fp == NULL)
1592                         continue;
1593
1594                 if (fp->tx_br) {
1595                         mtx_lock(&fp->tx_mtx);
1596
1597                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1598                                 m_freem(mp);
1599                         }
1600                         mtx_unlock(&fp->tx_mtx);
1601                 }
1602         }
1603         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1604
1605         return;
1606 }
1607
1608 static void
1609 qla_stop(qla_host_t *ha)
1610 {
1611         struct ifnet *ifp = ha->ifp;
1612         device_t        dev;
1613         int i = 0;
1614
1615         dev = ha->pci_dev;
1616
1617         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1618         ha->qla_watchdog_pause = 1;
1619
1620         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1621                 qla_tx_fp_t *fp;
1622
1623                 fp = &ha->tx_fp[i];
1624
1625                 if (fp == NULL)
1626                         continue;
1627
1628                 if (fp->tx_br != NULL) {
1629                         mtx_lock(&fp->tx_mtx);
1630                         mtx_unlock(&fp->tx_mtx);
1631                 }
1632         }
1633
1634         while (!ha->qla_watchdog_paused)
1635                 qla_mdelay(__func__, 1);
1636
1637         ha->qla_interface_up = 0;
1638
1639         qla_drain_fp_taskqueues(ha);
1640
1641         ql_del_hw_if(ha);
1642
1643         qla_free_xmt_bufs(ha);
1644         qla_free_rcv_bufs(ha);
1645
1646         return;
1647 }
1648
1649 /*
1650  * Buffer Management Functions for Transmit and Receive Rings
1651  */
1652 static int
1653 qla_alloc_xmt_bufs(qla_host_t *ha)
1654 {
1655         int ret = 0;
1656         uint32_t i, j;
1657         qla_tx_buf_t *txb;
1658
1659         if (bus_dma_tag_create(NULL,    /* parent */
1660                 1, 0,    /* alignment, bounds */
1661                 BUS_SPACE_MAXADDR,       /* lowaddr */
1662                 BUS_SPACE_MAXADDR,       /* highaddr */
1663                 NULL, NULL,      /* filter, filterarg */
1664                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1665                 QLA_MAX_SEGMENTS,        /* nsegments */
1666                 PAGE_SIZE,        /* maxsegsize */
1667                 BUS_DMA_ALLOCNOW,        /* flags */
1668                 NULL,    /* lockfunc */
1669                 NULL,    /* lockfuncarg */
1670                 &ha->tx_tag)) {
1671                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1672                         __func__);
1673                 return (ENOMEM);
1674         }
1675
1676         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1677                 bzero((void *)ha->tx_ring[i].tx_buf,
1678                         (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1679         }
1680
1681         for (j = 0; j < ha->hw.num_tx_rings; j++) {
1682                 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1683
1684                         txb = &ha->tx_ring[j].tx_buf[i];
1685
1686                         if ((ret = bus_dmamap_create(ha->tx_tag,
1687                                         BUS_DMA_NOWAIT, &txb->map))) {
1688
1689                                 ha->err_tx_dmamap_create++;
1690                                 device_printf(ha->pci_dev,
1691                                         "%s: bus_dmamap_create failed[%d]\n",
1692                                         __func__, ret);
1693
1694                                 qla_free_xmt_bufs(ha);
1695
1696                                 return (ret);
1697                         }
1698                 }
1699         }
1700
1701         return 0;
1702 }
1703
1704 /*
1705  * Release mbuf after it sent on the wire
1706  */
1707 static void
1708 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1709 {
1710         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1711
1712         if (txb->m_head) {
1713                 bus_dmamap_sync(ha->tx_tag, txb->map,
1714                         BUS_DMASYNC_POSTWRITE);
1715
1716                 bus_dmamap_unload(ha->tx_tag, txb->map);
1717
1718                 m_freem(txb->m_head);
1719                 txb->m_head = NULL;
1720
1721                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1722                 txb->map = NULL;
1723         }
1724
1725         if (txb->map) {
1726                 bus_dmamap_unload(ha->tx_tag, txb->map);
1727                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1728                 txb->map = NULL;
1729         }
1730
1731         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1732 }
1733
1734 static void
1735 qla_free_xmt_bufs(qla_host_t *ha)
1736 {
1737         int             i, j;
1738
1739         for (j = 0; j < ha->hw.num_tx_rings; j++) {
1740                 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1741                         qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1742         }
1743
1744         if (ha->tx_tag != NULL) {
1745                 bus_dma_tag_destroy(ha->tx_tag);
1746                 ha->tx_tag = NULL;
1747         }
1748
1749         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1750                 bzero((void *)ha->tx_ring[i].tx_buf,
1751                         (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1752         }
1753         return;
1754 }
1755
1756
1757 static int
1758 qla_alloc_rcv_std(qla_host_t *ha)
1759 {
1760         int             i, j, k, r, ret = 0;
1761         qla_rx_buf_t    *rxb;
1762         qla_rx_ring_t   *rx_ring;
1763
1764         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1765
1766                 rx_ring = &ha->rx_ring[r];
1767
1768                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1769
1770                         rxb = &rx_ring->rx_buf[i];
1771
1772                         ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1773                                         &rxb->map);
1774
1775                         if (ret) {
1776                                 device_printf(ha->pci_dev,
1777                                         "%s: dmamap[%d, %d] failed\n",
1778                                         __func__, r, i);
1779
1780                                 for (k = 0; k < r; k++) {
1781                                         for (j = 0; j < NUM_RX_DESCRIPTORS;
1782                                                 j++) {
1783                                                 rxb = &ha->rx_ring[k].rx_buf[j];
1784                                                 bus_dmamap_destroy(ha->rx_tag,
1785                                                         rxb->map);
1786                                         }
1787                                 }
1788
1789                                 for (j = 0; j < i; j++) {
1790                                         bus_dmamap_destroy(ha->rx_tag,
1791                                                 rx_ring->rx_buf[j].map);
1792                                 }
1793                                 goto qla_alloc_rcv_std_err;
1794                         }
1795                 }
1796         }
1797
1798         qla_init_hw_rcv_descriptors(ha);
1799
1800         
1801         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1802
1803                 rx_ring = &ha->rx_ring[r];
1804
1805                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1806                         rxb = &rx_ring->rx_buf[i];
1807                         rxb->handle = i;
1808                         if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1809                                 /*
1810                                  * set the physical address in the
1811                                  * corresponding descriptor entry in the
1812                                  * receive ring/queue for the hba 
1813                                  */
1814                                 qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1815                                         rxb->paddr,
1816                                         (rxb->m_head)->m_pkthdr.len);
1817                         } else {
1818                                 device_printf(ha->pci_dev,
1819                                         "%s: ql_get_mbuf [%d, %d] failed\n",
1820                                         __func__, r, i);
1821                                 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1822                                 goto qla_alloc_rcv_std_err;
1823                         }
1824                 }
1825         }
1826         return 0;
1827
1828 qla_alloc_rcv_std_err:
1829         return (-1);
1830 }
1831
1832 static void
1833 qla_free_rcv_std(qla_host_t *ha)
1834 {
1835         int             i, r;
1836         qla_rx_buf_t    *rxb;
1837
1838         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1839                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1840                         rxb = &ha->rx_ring[r].rx_buf[i];
1841                         if (rxb->m_head != NULL) {
1842                                 bus_dmamap_unload(ha->rx_tag, rxb->map);
1843                                 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1844                                 m_freem(rxb->m_head);
1845                                 rxb->m_head = NULL;
1846                         }
1847                 }
1848         }
1849         return;
1850 }
1851
1852 static int
1853 qla_alloc_rcv_bufs(qla_host_t *ha)
1854 {
1855         int             i, ret = 0;
1856
1857         if (bus_dma_tag_create(NULL,    /* parent */
1858                         1, 0,    /* alignment, bounds */
1859                         BUS_SPACE_MAXADDR,       /* lowaddr */
1860                         BUS_SPACE_MAXADDR,       /* highaddr */
1861                         NULL, NULL,      /* filter, filterarg */
1862                         MJUM9BYTES,     /* maxsize */
1863                         1,        /* nsegments */
1864                         MJUM9BYTES,        /* maxsegsize */
1865                         BUS_DMA_ALLOCNOW,        /* flags */
1866                         NULL,    /* lockfunc */
1867                         NULL,    /* lockfuncarg */
1868                         &ha->rx_tag)) {
1869
1870                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1871                         __func__);
1872
1873                 return (ENOMEM);
1874         }
1875
1876         bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1877
1878         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1879                 ha->hw.sds[i].sdsr_next = 0;
1880                 ha->hw.sds[i].rxb_free = NULL;
1881                 ha->hw.sds[i].rx_free = 0;
1882         }
1883
1884         ret = qla_alloc_rcv_std(ha);
1885
1886         return (ret);
1887 }
1888
1889 static void
1890 qla_free_rcv_bufs(qla_host_t *ha)
1891 {
1892         int             i;
1893
1894         qla_free_rcv_std(ha);
1895
1896         if (ha->rx_tag != NULL) {
1897                 bus_dma_tag_destroy(ha->rx_tag);
1898                 ha->rx_tag = NULL;
1899         }
1900
1901         bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1902
1903         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1904                 ha->hw.sds[i].sdsr_next = 0;
1905                 ha->hw.sds[i].rxb_free = NULL;
1906                 ha->hw.sds[i].rx_free = 0;
1907         }
1908
1909         return;
1910 }
1911
1912 int
1913 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1914 {
1915         register struct mbuf *mp = nmp;
1916         struct ifnet            *ifp;
1917         int                     ret = 0;
1918         uint32_t                offset;
1919         bus_dma_segment_t       segs[1];
1920         int                     nsegs, mbuf_size;
1921
1922         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1923
1924         ifp = ha->ifp;
1925
1926         if (ha->hw.enable_9kb)
1927                 mbuf_size = MJUM9BYTES;
1928         else
1929                 mbuf_size = MCLBYTES;
1930
1931         if (mp == NULL) {
1932
1933                 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1934                         return(-1);
1935
1936                 if (ha->hw.enable_9kb)
1937                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1938                 else
1939                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1940
1941                 if (mp == NULL) {
1942                         ha->err_m_getcl++;
1943                         ret = ENOBUFS;
1944                         device_printf(ha->pci_dev,
1945                                         "%s: m_getcl failed\n", __func__);
1946                         goto exit_ql_get_mbuf;
1947                 }
1948                 mp->m_len = mp->m_pkthdr.len = mbuf_size;
1949         } else {
1950                 mp->m_len = mp->m_pkthdr.len = mbuf_size;
1951                 mp->m_data = mp->m_ext.ext_buf;
1952                 mp->m_next = NULL;
1953         }
1954
1955         offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1956         if (offset) {
1957                 offset = 8 - offset;
1958                 m_adj(mp, offset);
1959         }
1960
1961         /*
1962          * Using memory from the mbuf cluster pool, invoke the bus_dma
1963          * machinery to arrange the memory mapping.
1964          */
1965         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1966                         mp, segs, &nsegs, BUS_DMA_NOWAIT);
1967         rxb->paddr = segs[0].ds_addr;
1968
1969         if (ret || !rxb->paddr || (nsegs != 1)) {
1970                 m_free(mp);
1971                 rxb->m_head = NULL;
1972                 device_printf(ha->pci_dev,
1973                         "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1974                         __func__, ret, (long long unsigned int)rxb->paddr,
1975                         nsegs);
1976                 ret = -1;
1977                 goto exit_ql_get_mbuf;
1978         }
1979         rxb->m_head = mp;
1980         bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1981
1982 exit_ql_get_mbuf:
1983         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1984         return (ret);
1985 }
1986
1987
1988 static void
1989 qla_get_peer(qla_host_t *ha)
1990 {
1991         device_t *peers;
1992         int count, i, slot;
1993         int my_slot = pci_get_slot(ha->pci_dev);
1994
1995         if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1996                 return;
1997
1998         for (i = 0; i < count; i++) {
1999                 slot = pci_get_slot(peers[i]);
2000
2001                 if ((slot >= 0) && (slot == my_slot) &&
2002                         (pci_get_device(peers[i]) ==
2003                                 pci_get_device(ha->pci_dev))) {
2004                         if (ha->pci_dev != peers[i]) 
2005                                 ha->peer_dev = peers[i];
2006                 }
2007         }
2008 }
2009
2010 static void
2011 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2012 {
2013         qla_host_t *ha_peer;
2014         
2015         if (ha->peer_dev) {
2016                 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2017
2018                         ha_peer->msg_from_peer = msg_to_peer;
2019                 }
2020         }
2021 }
2022
2023 static void
2024 qla_error_recovery(void *context, int pending)
2025 {
2026         qla_host_t *ha = context;
2027         uint32_t msecs_100 = 100;
2028         struct ifnet *ifp = ha->ifp;
2029         int i = 0;
2030
2031 device_printf(ha->pci_dev, "%s: \n", __func__);
2032         ha->hw.imd_compl = 1;
2033
2034         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2035                 return;
2036
2037 device_printf(ha->pci_dev, "%s: enter\n", __func__);
2038
2039         if (ha->qla_interface_up) {
2040
2041                 qla_mdelay(__func__, 300);
2042
2043                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2044
2045                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2046                         qla_tx_fp_t *fp;
2047
2048                         fp = &ha->tx_fp[i];
2049
2050                         if (fp == NULL)
2051                                 continue;
2052
2053                         if (fp->tx_br != NULL) {
2054                                 mtx_lock(&fp->tx_mtx);
2055                                 mtx_unlock(&fp->tx_mtx);
2056                         }
2057                 }
2058         }
2059
2060
2061         qla_drain_fp_taskqueues(ha);
2062
2063         if ((ha->pci_func & 0x1) == 0) {
2064
2065                 if (!ha->msg_from_peer) {
2066                         qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2067
2068                         while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2069                                 msecs_100--)
2070                                 qla_mdelay(__func__, 100);
2071                 }
2072
2073                 ha->msg_from_peer = 0;
2074
2075                 if (ha->enable_minidump)
2076                         ql_minidump(ha);
2077
2078                 (void) ql_init_hw(ha);
2079
2080                 if (ha->qla_interface_up) {
2081                         qla_free_xmt_bufs(ha);
2082                         qla_free_rcv_bufs(ha);
2083                 }
2084
2085                 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2086
2087         } else {
2088                 if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2089
2090                         ha->msg_from_peer = 0;
2091
2092                         qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2093                 } else {
2094                         qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2095                 }
2096
2097                 while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2098                         qla_mdelay(__func__, 100);
2099                 ha->msg_from_peer = 0;
2100
2101                 (void) ql_init_hw(ha);
2102
2103                 qla_mdelay(__func__, 1000);
2104
2105                 if (ha->qla_interface_up) {
2106                         qla_free_xmt_bufs(ha);
2107                         qla_free_rcv_bufs(ha);
2108                 }
2109         }
2110
2111         if (ha->qla_interface_up) {
2112
2113                 if (qla_alloc_xmt_bufs(ha) != 0) {
2114                         goto qla_error_recovery_exit;
2115                 }
2116                 qla_confirm_9kb_enable(ha);
2117
2118                 if (qla_alloc_rcv_bufs(ha) != 0) {
2119                         goto qla_error_recovery_exit;
2120                 }
2121
2122                 ha->stop_rcv = 0;
2123
2124                 if (ql_init_hw_if(ha) == 0) {
2125                         ifp = ha->ifp;
2126                         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2127                         ha->qla_watchdog_pause = 0;
2128                 }
2129         } else
2130                 ha->qla_watchdog_pause = 0;
2131
2132 qla_error_recovery_exit:
2133
2134 device_printf(ha->pci_dev, "%s: exit\n", __func__);
2135
2136         QLA_UNLOCK(ha, __func__);
2137
2138         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2139                 qla_watchdog, ha);
2140         return;
2141 }
2142
2143 static void
2144 qla_async_event(void *context, int pending)
2145 {
2146         qla_host_t *ha = context;
2147
2148         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2149                 return;
2150
2151         if (ha->async_event) {
2152                 ha->async_event = 0;
2153                 qla_hw_async_event(ha);
2154         }
2155
2156         QLA_UNLOCK(ha, __func__);
2157
2158         return;
2159 }
2160
2161 static void
2162 qla_stats(void *context, int pending)
2163 {
2164         qla_host_t *ha;
2165
2166         ha = context;
2167
2168         ql_get_stats(ha);
2169         return;
2170 }
2171