]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgbe/ql_os.c
MFV r319743: 8108 zdb -l fails to read labels 2 and 3
[FreeBSD/FreeBSD.git] / sys / dev / qlxgbe / ql_os.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: ql_os.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 #include <sys/smp.h>
45
46 /*
47  * Some PCI Configuration Space Related Defines
48  */
49
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC       0x1077
52 #endif
53
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030      0x8030
56 #endif
57
58 #define PCI_QLOGIC_ISP8030 \
59         ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60
61 /*
62  * static functions
63  */
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
74 static void qla_release(qla_host_t *ha);
75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76                 int error);
77 static void qla_stop(qla_host_t *ha);
78 static void qla_get_peer(qla_host_t *ha);
79 static void qla_error_recovery(void *context, int pending);
80 static void qla_async_event(void *context, int pending);
81 static void qla_stats(void *context, int pending);
82 static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83                 uint32_t iscsi_pdu);
84
85 /*
86  * Hooks to the Operating Systems
87  */
88 static int qla_pci_probe (device_t);
89 static int qla_pci_attach (device_t);
90 static int qla_pci_detach (device_t);
91
92 static void qla_init(void *arg);
93 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94 static int qla_media_change(struct ifnet *ifp);
95 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96
97 static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98 static void qla_qflush(struct ifnet *ifp);
99 static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100 static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101 static int qla_create_fp_taskqueues(qla_host_t *ha);
102 static void qla_destroy_fp_taskqueues(qla_host_t *ha);
103 static void qla_drain_fp_taskqueues(qla_host_t *ha);
104
105 static device_method_t qla_pci_methods[] = {
106         /* Device interface */
107         DEVMETHOD(device_probe, qla_pci_probe),
108         DEVMETHOD(device_attach, qla_pci_attach),
109         DEVMETHOD(device_detach, qla_pci_detach),
110         { 0, 0 }
111 };
112
113 static driver_t qla_pci_driver = {
114         "ql", qla_pci_methods, sizeof (qla_host_t),
115 };
116
117 static devclass_t qla83xx_devclass;
118
119 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120
121 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123
124 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125
126 #define QL_STD_REPLENISH_THRES          0
127 #define QL_JUMBO_REPLENISH_THRES        32
128
129
130 static char dev_str[64];
131 static char ver_str[64];
132
133 /*
134  * Name:        qla_pci_probe
135  * Function:    Validate the PCI device to be a QLA80XX device
136  */
137 static int
138 qla_pci_probe(device_t dev)
139 {
140         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141         case PCI_QLOGIC_ISP8030:
142                 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143                         "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144                         QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145                         QLA_VERSION_BUILD);
146                 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147                         QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148                         QLA_VERSION_BUILD);
149                 device_set_desc(dev, dev_str);
150                 break;
151         default:
152                 return (ENXIO);
153         }
154
155         if (bootverbose)
156                 printf("%s: %s\n ", __func__, dev_str);
157
158         return (BUS_PROBE_DEFAULT);
159 }
160
161 static void
162 qla_add_sysctls(qla_host_t *ha)
163 {
164         device_t dev = ha->pci_dev;
165
166         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168                 OID_AUTO, "version", CTLFLAG_RD,
169                 ver_str, 0, "Driver Version");
170
171         SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
172                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173                 OID_AUTO, "fw_version", CTLFLAG_RD,
174                 ha->fw_ver_str, 0, "firmware version");
175
176         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
177                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178                 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
179                 (void *)ha, 0,
180                 qla_sysctl_get_link_status, "I", "Link Status");
181
182         ha->dbg_level = 0;
183         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                 OID_AUTO, "debug", CTLFLAG_RW,
186                 &ha->dbg_level, ha->dbg_level, "Debug Level");
187
188         ha->enable_minidump = 1;
189         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191                 OID_AUTO, "enable_minidump", CTLFLAG_RW,
192                 &ha->enable_minidump, ha->enable_minidump,
193                 "Minidump retrival is enabled only when this is set");
194
195         ha->std_replenish = QL_STD_REPLENISH_THRES;
196         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
197                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
198                 OID_AUTO, "std_replenish", CTLFLAG_RW,
199                 &ha->std_replenish, ha->std_replenish,
200                 "Threshold for Replenishing Standard Frames");
201
202         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
203                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
204                 OID_AUTO, "ipv4_lro",
205                 CTLFLAG_RD, &ha->ipv4_lro,
206                 "number of ipv4 lro completions");
207
208         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
209                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210                 OID_AUTO, "ipv6_lro",
211                 CTLFLAG_RD, &ha->ipv6_lro,
212                 "number of ipv6 lro completions");
213
214         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
215                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
216                 OID_AUTO, "tx_tso_frames",
217                 CTLFLAG_RD, &ha->tx_tso_frames,
218                 "number of Tx TSO Frames");
219
220         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
221                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
222                 OID_AUTO, "hw_vlan_tx_frames",
223                 CTLFLAG_RD, &ha->hw_vlan_tx_frames,
224                 "number of Tx VLAN Frames");
225
226         SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
227                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228                 OID_AUTO, "hw_lock_failed",
229                 CTLFLAG_RD, &ha->hw_lock_failed,
230                 "number of hw_lock failures");
231
232         return;
233 }
234
235 static void
236 qla_watchdog(void *arg)
237 {
238         qla_host_t *ha = arg;
239         qla_hw_t *hw;
240         struct ifnet *ifp;
241
242         hw = &ha->hw;
243         ifp = ha->ifp;
244
245         if (ha->qla_watchdog_exit) {
246                 ha->qla_watchdog_exited = 1;
247                 return;
248         }
249         ha->qla_watchdog_exited = 0;
250
251         if (!ha->qla_watchdog_pause) {
252                 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
253                         (ha->msg_from_peer == QL_PEER_MSG_RESET)) {
254
255                         if (!(ha->dbg_level & 0x8000)) {
256                                 ha->qla_watchdog_paused = 1;
257                                 ha->qla_watchdog_pause = 1;
258                                 ha->qla_initiate_recovery = 0;
259                                 ha->err_inject = 0;
260                                 device_printf(ha->pci_dev,
261                                         "%s: taskqueue_enqueue(err_task) \n",
262                                         __func__);
263                                 taskqueue_enqueue(ha->err_tq, &ha->err_task);
264                                 return;
265                         }
266
267                 } else if (ha->qla_interface_up) {
268
269                         ha->watchdog_ticks++;
270
271                         if (ha->watchdog_ticks > 1000)
272                                 ha->watchdog_ticks = 0;
273
274                         if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
275                                 taskqueue_enqueue(ha->stats_tq, &ha->stats_task);
276                         }
277
278                         if (ha->async_event) {
279                                 taskqueue_enqueue(ha->async_event_tq,
280                                         &ha->async_event_task);
281                         }
282
283 #if 0
284                         for (i = 0; ((i < ha->hw.num_sds_rings) &&
285                                         !ha->watchdog_ticks); i++) {
286                                 qla_tx_fp_t *fp = &ha->tx_fp[i];
287
288                                 if (fp->fp_taskqueue != NULL)
289                                         taskqueue_enqueue(fp->fp_taskqueue,
290                                                 &fp->fp_task);
291                         }
292 #endif
293                         ha->qla_watchdog_paused = 0;
294                 } else {
295                         ha->qla_watchdog_paused = 0;
296                 }
297         } else {
298                 ha->qla_watchdog_paused = 1;
299         }
300
301         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
302                 qla_watchdog, ha);
303 }
304
305 /*
306  * Name:        qla_pci_attach
307  * Function:    attaches the device to the operating system
308  */
309 static int
310 qla_pci_attach(device_t dev)
311 {
312         qla_host_t *ha = NULL;
313         uint32_t rsrc_len;
314         int i;
315         uint32_t num_rcvq = 0;
316
317         if ((ha = device_get_softc(dev)) == NULL) {
318                 device_printf(dev, "cannot get softc\n");
319                 return (ENOMEM);
320         }
321
322         memset(ha, 0, sizeof (qla_host_t));
323
324         if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
325                 device_printf(dev, "device is not ISP8030\n");
326                 return (ENXIO);
327         }
328
329         ha->pci_func = pci_get_function(dev) & 0x1;
330
331         ha->pci_dev = dev;
332
333         pci_enable_busmaster(dev);
334
335         ha->reg_rid = PCIR_BAR(0);
336         ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
337                                 RF_ACTIVE);
338
339         if (ha->pci_reg == NULL) {
340                 device_printf(dev, "unable to map any ports\n");
341                 goto qla_pci_attach_err;
342         }
343
344         rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
345                                         ha->reg_rid);
346
347         mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
348         ha->flags.lock_init = 1;
349
350         qla_add_sysctls(ha);
351
352         ha->hw.num_sds_rings = MAX_SDS_RINGS;
353         ha->hw.num_rds_rings = MAX_RDS_RINGS;
354         ha->hw.num_tx_rings = NUM_TX_RINGS;
355
356         ha->reg_rid1 = PCIR_BAR(2);
357         ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
358                         &ha->reg_rid1, RF_ACTIVE);
359
360         ha->msix_count = pci_msix_count(dev);
361
362         if (ha->msix_count < 1 ) {
363                 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
364                         ha->msix_count);
365                 goto qla_pci_attach_err;
366         }
367
368         if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
369                 ha->hw.num_sds_rings = ha->msix_count - 1;
370         }
371
372         QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
373                 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
374                 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
375                 ha->pci_reg1));
376
377         /* initialize hardware */
378         if (ql_init_hw(ha)) {
379                 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
380                 goto qla_pci_attach_err;
381         }
382
383         device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
384                 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
385                 ha->fw_ver_build);
386         snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
387                         ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
388                         ha->fw_ver_build);
389
390         if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
391                 device_printf(dev, "%s: qla_get_nic_partition failed\n",
392                         __func__);
393                 goto qla_pci_attach_err;
394         }
395         device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
396                 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
397                 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
398                 ha->pci_reg, ha->pci_reg1, num_rcvq);
399
400         if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
401                 if (ha->hw.num_sds_rings > 15) {
402                         ha->hw.num_sds_rings = 15;
403                 }
404         }
405
406         ha->hw.num_rds_rings = ha->hw.num_sds_rings;
407         ha->hw.num_tx_rings = ha->hw.num_sds_rings;
408
409 #ifdef QL_ENABLE_ISCSI_TLV
410         ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
411 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
412
413         ql_hw_add_sysctls(ha);
414
415         ha->msix_count = ha->hw.num_sds_rings + 1;
416
417         if (pci_alloc_msix(dev, &ha->msix_count)) {
418                 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
419                         ha->msix_count);
420                 ha->msix_count = 0;
421                 goto qla_pci_attach_err;
422         }
423
424         ha->mbx_irq_rid = 1;
425         ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
426                                 &ha->mbx_irq_rid,
427                                 (RF_ACTIVE | RF_SHAREABLE));
428         if (ha->mbx_irq == NULL) {
429                 device_printf(dev, "could not allocate mbx interrupt\n");
430                 goto qla_pci_attach_err;
431         }
432         if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
433                 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
434                 device_printf(dev, "could not setup mbx interrupt\n");
435                 goto qla_pci_attach_err;
436         }
437
438         for (i = 0; i < ha->hw.num_sds_rings; i++) {
439                 ha->irq_vec[i].sds_idx = i;
440                 ha->irq_vec[i].ha = ha;
441                 ha->irq_vec[i].irq_rid = 2 + i;
442
443                 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
444                                 &ha->irq_vec[i].irq_rid,
445                                 (RF_ACTIVE | RF_SHAREABLE));
446
447                 if (ha->irq_vec[i].irq == NULL) {
448                         device_printf(dev, "could not allocate interrupt\n");
449                         goto qla_pci_attach_err;
450                 }
451                 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
452                         (INTR_TYPE_NET | INTR_MPSAFE),
453                         NULL, ql_isr, &ha->irq_vec[i],
454                         &ha->irq_vec[i].handle)) {
455                         device_printf(dev, "could not setup interrupt\n");
456                         goto qla_pci_attach_err;
457                 }
458
459                 ha->tx_fp[i].ha = ha;
460                 ha->tx_fp[i].txr_idx = i;
461
462                 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
463                         device_printf(dev, "%s: could not allocate tx_br[%d]\n",
464                                 __func__, i);
465                         goto qla_pci_attach_err;
466                 }
467         }
468
469         if (qla_create_fp_taskqueues(ha) != 0)
470                 goto qla_pci_attach_err;
471
472         printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
473                 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
474
475         ql_read_mac_addr(ha);
476
477         /* allocate parent dma tag */
478         if (qla_alloc_parent_dma_tag(ha)) {
479                 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
480                         __func__);
481                 goto qla_pci_attach_err;
482         }
483
484         /* alloc all dma buffers */
485         if (ql_alloc_dma(ha)) {
486                 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
487                 goto qla_pci_attach_err;
488         }
489         qla_get_peer(ha);
490
491         if (ql_minidump_init(ha) != 0) {
492                 device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
493                 goto qla_pci_attach_err;
494         }
495         /* create the o.s ethernet interface */
496         qla_init_ifnet(dev, ha);
497
498         ha->flags.qla_watchdog_active = 1;
499         ha->qla_watchdog_pause = 0;
500
501         callout_init(&ha->tx_callout, TRUE);
502         ha->flags.qla_callout_init = 1;
503
504         /* create ioctl device interface */
505         if (ql_make_cdev(ha)) {
506                 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
507                 goto qla_pci_attach_err;
508         }
509
510         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
511                 qla_watchdog, ha);
512
513         TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
514         ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
515                         taskqueue_thread_enqueue, &ha->err_tq);
516         taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
517                 device_get_nameunit(ha->pci_dev));
518
519         TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
520         ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
521                         taskqueue_thread_enqueue, &ha->async_event_tq);
522         taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
523                 device_get_nameunit(ha->pci_dev));
524
525         TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
526         ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
527                         taskqueue_thread_enqueue, &ha->stats_tq);
528         taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
529                 device_get_nameunit(ha->pci_dev));
530
531         QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
532         return (0);
533
534 qla_pci_attach_err:
535
536         qla_release(ha);
537
538         if (ha->flags.lock_init) {
539                 mtx_destroy(&ha->hw_lock);
540         }
541
542         QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
543         return (ENXIO);
544 }
545
546 /*
547  * Name:        qla_pci_detach
548  * Function:    Unhooks the device from the operating system
549  */
550 static int
551 qla_pci_detach(device_t dev)
552 {
553         qla_host_t *ha = NULL;
554         struct ifnet *ifp;
555
556
557         if ((ha = device_get_softc(dev)) == NULL) {
558                 device_printf(dev, "cannot get softc\n");
559                 return (ENOMEM);
560         }
561
562         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
563
564         ifp = ha->ifp;
565
566         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
567         QLA_LOCK(ha, __func__, -1, 0);
568
569         ha->qla_detach_active = 1;
570         qla_stop(ha);
571
572         qla_release(ha);
573
574         QLA_UNLOCK(ha, __func__);
575
576         if (ha->flags.lock_init) {
577                 mtx_destroy(&ha->hw_lock);
578         }
579
580         QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
581
582         return (0);
583 }
584
585 /*
586  * SYSCTL Related Callbacks
587  */
588 static int
589 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
590 {
591         int err, ret = 0;
592         qla_host_t *ha;
593
594         err = sysctl_handle_int(oidp, &ret, 0, req);
595
596         if (err || !req->newptr)
597                 return (err);
598
599         if (ret == 1) {
600                 ha = (qla_host_t *)arg1;
601                 ql_hw_link_status(ha);
602         }
603         return (err);
604 }
605
606 /*
607  * Name:        qla_release
608  * Function:    Releases the resources allocated for the device
609  */
610 static void
611 qla_release(qla_host_t *ha)
612 {
613         device_t dev;
614         int i;
615
616         dev = ha->pci_dev;
617
618         if (ha->async_event_tq) {
619                 taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
620                 taskqueue_free(ha->async_event_tq);
621         }
622
623         if (ha->err_tq) {
624                 taskqueue_drain(ha->err_tq, &ha->err_task);
625                 taskqueue_free(ha->err_tq);
626         }
627
628         if (ha->stats_tq) {
629                 taskqueue_drain(ha->stats_tq, &ha->stats_task);
630                 taskqueue_free(ha->stats_tq);
631         }
632
633         ql_del_cdev(ha);
634
635         if (ha->flags.qla_watchdog_active) {
636                 ha->qla_watchdog_exit = 1;
637
638                 while (ha->qla_watchdog_exited == 0)
639                         qla_mdelay(__func__, 1);
640         }
641
642         if (ha->flags.qla_callout_init)
643                 callout_stop(&ha->tx_callout);
644
645         if (ha->ifp != NULL)
646                 ether_ifdetach(ha->ifp);
647
648         ql_free_dma(ha); 
649         qla_free_parent_dma_tag(ha);
650
651         if (ha->mbx_handle)
652                 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
653
654         if (ha->mbx_irq)
655                 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
656                                 ha->mbx_irq);
657
658         for (i = 0; i < ha->hw.num_sds_rings; i++) {
659
660                 if (ha->irq_vec[i].handle) {
661                         (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
662                                         ha->irq_vec[i].handle);
663                 }
664                         
665                 if (ha->irq_vec[i].irq) {
666                         (void)bus_release_resource(dev, SYS_RES_IRQ,
667                                 ha->irq_vec[i].irq_rid,
668                                 ha->irq_vec[i].irq);
669                 }
670
671                 qla_free_tx_br(ha, &ha->tx_fp[i]);
672         }
673         qla_destroy_fp_taskqueues(ha);
674
675         if (ha->msix_count)
676                 pci_release_msi(dev);
677
678 //      if (ha->flags.lock_init) {
679 //              mtx_destroy(&ha->hw_lock);
680 //      }
681
682         if (ha->pci_reg)
683                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
684                                 ha->pci_reg);
685
686         if (ha->pci_reg1)
687                 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
688                                 ha->pci_reg1);
689
690         return;
691 }
692
693 /*
694  * DMA Related Functions
695  */
696
697 static void
698 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
699 {
700         *((bus_addr_t *)arg) = 0;
701
702         if (error) {
703                 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
704                 return;
705         }
706
707         *((bus_addr_t *)arg) = segs[0].ds_addr;
708
709         return;
710 }
711
712 int
713 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
714 {
715         int             ret = 0;
716         device_t        dev;
717         bus_addr_t      b_addr;
718
719         dev = ha->pci_dev;
720
721         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
722
723         ret = bus_dma_tag_create(
724                         ha->parent_tag,/* parent */
725                         dma_buf->alignment,
726                         ((bus_size_t)(1ULL << 32)),/* boundary */
727                         BUS_SPACE_MAXADDR,      /* lowaddr */
728                         BUS_SPACE_MAXADDR,      /* highaddr */
729                         NULL, NULL,             /* filter, filterarg */
730                         dma_buf->size,          /* maxsize */
731                         1,                      /* nsegments */
732                         dma_buf->size,          /* maxsegsize */
733                         0,                      /* flags */
734                         NULL, NULL,             /* lockfunc, lockarg */
735                         &dma_buf->dma_tag);
736
737         if (ret) {
738                 device_printf(dev, "%s: could not create dma tag\n", __func__);
739                 goto ql_alloc_dmabuf_exit;
740         }
741         ret = bus_dmamem_alloc(dma_buf->dma_tag,
742                         (void **)&dma_buf->dma_b,
743                         (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
744                         &dma_buf->dma_map);
745         if (ret) {
746                 bus_dma_tag_destroy(dma_buf->dma_tag);
747                 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
748                 goto ql_alloc_dmabuf_exit;
749         }
750
751         ret = bus_dmamap_load(dma_buf->dma_tag,
752                         dma_buf->dma_map,
753                         dma_buf->dma_b,
754                         dma_buf->size,
755                         qla_dmamap_callback,
756                         &b_addr, BUS_DMA_NOWAIT);
757
758         if (ret || !b_addr) {
759                 bus_dma_tag_destroy(dma_buf->dma_tag);
760                 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
761                         dma_buf->dma_map);
762                 ret = -1;
763                 goto ql_alloc_dmabuf_exit;
764         }
765
766         dma_buf->dma_addr = b_addr;
767
768 ql_alloc_dmabuf_exit:
769         QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
770                 __func__, ret, (void *)dma_buf->dma_tag,
771                 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
772                 dma_buf->size));
773
774         return ret;
775 }
776
777 void
778 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
779 {
780         bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 
781         bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
782         bus_dma_tag_destroy(dma_buf->dma_tag);
783 }
784
785 static int
786 qla_alloc_parent_dma_tag(qla_host_t *ha)
787 {
788         int             ret;
789         device_t        dev;
790
791         dev = ha->pci_dev;
792
793         /*
794          * Allocate parent DMA Tag
795          */
796         ret = bus_dma_tag_create(
797                         bus_get_dma_tag(dev),   /* parent */
798                         1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
799                         BUS_SPACE_MAXADDR,      /* lowaddr */
800                         BUS_SPACE_MAXADDR,      /* highaddr */
801                         NULL, NULL,             /* filter, filterarg */
802                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
803                         0,                      /* nsegments */
804                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
805                         0,                      /* flags */
806                         NULL, NULL,             /* lockfunc, lockarg */
807                         &ha->parent_tag);
808
809         if (ret) {
810                 device_printf(dev, "%s: could not create parent dma tag\n",
811                         __func__);
812                 return (-1);
813         }
814
815         ha->flags.parent_tag = 1;
816         
817         return (0);
818 }
819
820 static void
821 qla_free_parent_dma_tag(qla_host_t *ha)
822 {
823         if (ha->flags.parent_tag) {
824                 bus_dma_tag_destroy(ha->parent_tag);
825                 ha->flags.parent_tag = 0;
826         }
827 }
828
829 /*
830  * Name: qla_init_ifnet
831  * Function: Creates the Network Device Interface and Registers it with the O.S
832  */
833
834 static void
835 qla_init_ifnet(device_t dev, qla_host_t *ha)
836 {
837         struct ifnet *ifp;
838
839         QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
840
841         ifp = ha->ifp = if_alloc(IFT_ETHER);
842
843         if (ifp == NULL)
844                 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
845
846         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
847
848         ifp->if_baudrate = IF_Gbps(10);
849         ifp->if_capabilities = IFCAP_LINKSTATE;
850         ifp->if_mtu = ETHERMTU;
851
852         ifp->if_init = qla_init;
853         ifp->if_softc = ha;
854         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
855         ifp->if_ioctl = qla_ioctl;
856
857         ifp->if_transmit = qla_transmit;
858         ifp->if_qflush = qla_qflush;
859
860         IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
861         ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
862         IFQ_SET_READY(&ifp->if_snd);
863
864         ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
865
866         ether_ifattach(ifp, qla_get_mac_addr(ha));
867
868         ifp->if_capabilities |= IFCAP_HWCSUM |
869                                 IFCAP_TSO4 |
870                                 IFCAP_JUMBO_MTU |
871                                 IFCAP_VLAN_HWTAGGING |
872                                 IFCAP_VLAN_MTU |
873                                 IFCAP_VLAN_HWTSO |
874                                 IFCAP_LRO;
875
876         ifp->if_capenable = ifp->if_capabilities;
877
878         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
879
880         ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
881
882         ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
883                 NULL);
884         ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
885
886         ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
887
888         QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
889
890         return;
891 }
892
893 static void
894 qla_init_locked(qla_host_t *ha)
895 {
896         struct ifnet *ifp = ha->ifp;
897
898         qla_stop(ha);
899
900         if (qla_alloc_xmt_bufs(ha) != 0) 
901                 return;
902
903         qla_confirm_9kb_enable(ha);
904
905         if (qla_alloc_rcv_bufs(ha) != 0)
906                 return;
907
908         bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
909
910         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
911         ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
912
913         ha->stop_rcv = 0;
914         if (ql_init_hw_if(ha) == 0) {
915                 ifp = ha->ifp;
916                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
917                 ha->qla_watchdog_pause = 0;
918                 ha->hw_vlan_tx_frames = 0;
919                 ha->tx_tso_frames = 0;
920                 ha->qla_interface_up = 1;
921                 ql_update_link_state(ha);
922         }
923
924         return;
925 }
926
927 static void
928 qla_init(void *arg)
929 {
930         qla_host_t *ha;
931
932         ha = (qla_host_t *)arg;
933
934         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
935
936         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
937                 return;
938
939         qla_init_locked(ha);
940
941         QLA_UNLOCK(ha, __func__);
942
943         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
944 }
945
946 static int
947 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
948 {
949         uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
950         struct ifmultiaddr *ifma;
951         int mcnt = 0;
952         struct ifnet *ifp = ha->ifp;
953         int ret = 0;
954
955         if_maddr_rlock(ifp);
956
957         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
958
959                 if (ifma->ifma_addr->sa_family != AF_LINK)
960                         continue;
961
962                 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
963                         break;
964
965                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
966                         &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
967
968                 mcnt++;
969         }
970
971         if_maddr_runlock(ifp);
972
973         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
974                 QLA_LOCK_NO_SLEEP) != 0)
975                 return (-1);
976
977         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
978
979                 if (!add_multi) {
980                         ret = qla_hw_del_all_mcast(ha);
981
982                         if (ret)
983                                 device_printf(ha->pci_dev,
984                                         "%s: qla_hw_del_all_mcast() failed\n",
985                                 __func__);
986                 }
987
988                 if (!ret)
989                         ret = ql_hw_set_multi(ha, mta, mcnt, 1);
990
991         }
992
993         QLA_UNLOCK(ha, __func__);
994
995         return (ret);
996 }
997
998 static int
999 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1000 {
1001         int ret = 0;
1002         struct ifreq *ifr = (struct ifreq *)data;
1003         struct ifaddr *ifa = (struct ifaddr *)data;
1004         qla_host_t *ha;
1005
1006         ha = (qla_host_t *)ifp->if_softc;
1007
1008         switch (cmd) {
1009         case SIOCSIFADDR:
1010                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1011                         __func__, cmd));
1012
1013                 if (ifa->ifa_addr->sa_family == AF_INET) {
1014
1015                         ret = QLA_LOCK(ha, __func__,
1016                                         QLA_LOCK_DEFAULT_MS_TIMEOUT,
1017                                         QLA_LOCK_NO_SLEEP);
1018                         if (ret)
1019                                 break;
1020
1021                         ifp->if_flags |= IFF_UP;
1022
1023                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1024                                 qla_init_locked(ha);
1025                         }
1026
1027                         QLA_UNLOCK(ha, __func__);
1028                         QL_DPRINT4(ha, (ha->pci_dev,
1029                                 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1030                                 __func__, cmd,
1031                                 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1032
1033                         arp_ifinit(ifp, ifa);
1034                 } else {
1035                         ether_ioctl(ifp, cmd, data);
1036                 }
1037                 break;
1038
1039         case SIOCSIFMTU:
1040                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1041                         __func__, cmd));
1042
1043                 if (ifr->ifr_mtu > QLA_MAX_MTU) {
1044                         ret = EINVAL;
1045                 } else {
1046                         ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1047                                         QLA_LOCK_NO_SLEEP);
1048
1049                         if (ret)
1050                                 break;
1051
1052                         ifp->if_mtu = ifr->ifr_mtu;
1053                         ha->max_frame_size =
1054                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1055
1056                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1057                                 qla_init_locked(ha);
1058                         }
1059
1060                         if (ifp->if_mtu > ETHERMTU)
1061                                 ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1062                         else
1063                                 ha->std_replenish = QL_STD_REPLENISH_THRES;
1064                                 
1065
1066                         QLA_UNLOCK(ha, __func__);
1067                 }
1068
1069                 break;
1070
1071         case SIOCSIFFLAGS:
1072                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1073                         __func__, cmd));
1074
1075                 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1076                                 QLA_LOCK_NO_SLEEP);
1077
1078                 if (ret)
1079                         break;
1080
1081                 if (ifp->if_flags & IFF_UP) {
1082
1083                         ha->max_frame_size = ifp->if_mtu +
1084                                         ETHER_HDR_LEN + ETHER_CRC_LEN;
1085                         qla_init_locked(ha);
1086                                                 
1087                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1088                                 if ((ifp->if_flags ^ ha->if_flags) &
1089                                         IFF_PROMISC) {
1090                                         ret = ql_set_promisc(ha);
1091                                 } else if ((ifp->if_flags ^ ha->if_flags) &
1092                                         IFF_ALLMULTI) {
1093                                         ret = ql_set_allmulti(ha);
1094                                 }
1095                         }
1096                 } else {
1097                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098                                 qla_stop(ha);
1099                         ha->if_flags = ifp->if_flags;
1100                 }
1101
1102                 QLA_UNLOCK(ha, __func__);
1103                 break;
1104
1105         case SIOCADDMULTI:
1106                 QL_DPRINT4(ha, (ha->pci_dev,
1107                         "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1108
1109                 if (qla_set_multi(ha, 1))
1110                         ret = EINVAL;
1111                 break;
1112
1113         case SIOCDELMULTI:
1114                 QL_DPRINT4(ha, (ha->pci_dev,
1115                         "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1116
1117                 if (qla_set_multi(ha, 0))
1118                         ret = EINVAL;
1119                 break;
1120
1121         case SIOCSIFMEDIA:
1122         case SIOCGIFMEDIA:
1123                 QL_DPRINT4(ha, (ha->pci_dev,
1124                         "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1125                         __func__, cmd));
1126                 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1127                 break;
1128
1129         case SIOCSIFCAP:
1130         {
1131                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1132
1133                 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1134                         __func__, cmd));
1135
1136                 if (mask & IFCAP_HWCSUM)
1137                         ifp->if_capenable ^= IFCAP_HWCSUM;
1138                 if (mask & IFCAP_TSO4)
1139                         ifp->if_capenable ^= IFCAP_TSO4;
1140                 if (mask & IFCAP_TSO6)
1141                         ifp->if_capenable ^= IFCAP_TSO6;
1142                 if (mask & IFCAP_VLAN_HWTAGGING)
1143                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1144                 if (mask & IFCAP_VLAN_HWTSO)
1145                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1146                 if (mask & IFCAP_LRO)
1147                         ifp->if_capenable ^= IFCAP_LRO;
1148
1149                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1150                         ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1151                                 QLA_LOCK_NO_SLEEP);
1152
1153                         if (ret)
1154                                 break;
1155
1156                         qla_init_locked(ha);
1157
1158                         QLA_UNLOCK(ha, __func__);
1159
1160                 }
1161                 VLAN_CAPABILITIES(ifp);
1162                 break;
1163         }
1164
1165         default:
1166                 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1167                         __func__, cmd));
1168                 ret = ether_ioctl(ifp, cmd, data);
1169                 break;
1170         }
1171
1172         return (ret);
1173 }
1174
1175 static int
1176 qla_media_change(struct ifnet *ifp)
1177 {
1178         qla_host_t *ha;
1179         struct ifmedia *ifm;
1180         int ret = 0;
1181
1182         ha = (qla_host_t *)ifp->if_softc;
1183
1184         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1185
1186         ifm = &ha->media;
1187
1188         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1189                 ret = EINVAL;
1190
1191         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1192
1193         return (ret);
1194 }
1195
1196 static void
1197 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1198 {
1199         qla_host_t *ha;
1200
1201         ha = (qla_host_t *)ifp->if_softc;
1202
1203         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1204
1205         ifmr->ifm_status = IFM_AVALID;
1206         ifmr->ifm_active = IFM_ETHER;
1207         
1208         ql_update_link_state(ha);
1209         if (ha->hw.link_up) {
1210                 ifmr->ifm_status |= IFM_ACTIVE;
1211                 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1212         }
1213
1214         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1215                 (ha->hw.link_up ? "link_up" : "link_down")));
1216
1217         return;
1218 }
1219
1220
1221 static int
1222 qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1223         uint32_t iscsi_pdu)
1224 {
1225         bus_dma_segment_t       segs[QLA_MAX_SEGMENTS];
1226         bus_dmamap_t            map;
1227         int                     nsegs;
1228         int                     ret = -1;
1229         uint32_t                tx_idx;
1230         struct mbuf             *m_head = *m_headp;
1231
1232         QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1233
1234         tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1235         map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1236
1237         ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1238                         BUS_DMA_NOWAIT);
1239
1240         if (ret == EFBIG) {
1241
1242                 struct mbuf *m;
1243
1244                 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1245                         m_head->m_pkthdr.len));
1246
1247                 m = m_defrag(m_head, M_NOWAIT);
1248                 if (m == NULL) {
1249                         ha->err_tx_defrag++;
1250                         m_freem(m_head);
1251                         *m_headp = NULL;
1252                         device_printf(ha->pci_dev,
1253                                 "%s: m_defrag() = NULL [%d]\n",
1254                                 __func__, ret);
1255                         return (ENOBUFS);
1256                 }
1257                 m_head = m;
1258                 *m_headp = m_head;
1259
1260                 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1261                                         segs, &nsegs, BUS_DMA_NOWAIT))) {
1262
1263                         ha->err_tx_dmamap_load++;
1264
1265                         device_printf(ha->pci_dev,
1266                                 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1267                                 __func__, ret, m_head->m_pkthdr.len);
1268
1269                         if (ret != ENOMEM) {
1270                                 m_freem(m_head);
1271                                 *m_headp = NULL;
1272                         }
1273                         return (ret);
1274                 }
1275
1276         } else if (ret) {
1277
1278                 ha->err_tx_dmamap_load++;
1279
1280                 device_printf(ha->pci_dev,
1281                         "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1282                         __func__, ret, m_head->m_pkthdr.len);
1283
1284                 if (ret != ENOMEM) {
1285                         m_freem(m_head);
1286                         *m_headp = NULL;
1287                 }
1288                 return (ret);
1289         }
1290
1291         QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1292
1293         bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1294
1295         if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1296                                 iscsi_pdu))) {
1297                 ha->tx_ring[txr_idx].count++;
1298                 if (iscsi_pdu)
1299                         ha->tx_ring[txr_idx].iscsi_pkt_count++;
1300                 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1301         } else {
1302                 bus_dmamap_unload(ha->tx_tag, map); 
1303                 if (ret == EINVAL) {
1304                         if (m_head)
1305                                 m_freem(m_head);
1306                         *m_headp = NULL;
1307                 }
1308         }
1309
1310         QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1311         return (ret);
1312 }
1313
1314 static int
1315 qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1316 {
1317         snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1318                 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1319
1320         mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1321
1322         fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1323                                    M_NOWAIT, &fp->tx_mtx);
1324         if (fp->tx_br == NULL) {
1325             QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1326                 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1327             return (-ENOMEM);
1328         }
1329         return 0;
1330 }
1331
1332 static void
1333 qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1334 {
1335         struct mbuf *mp;
1336         struct ifnet *ifp = ha->ifp;
1337
1338         if (mtx_initialized(&fp->tx_mtx)) {
1339
1340                 if (fp->tx_br != NULL) {
1341
1342                         mtx_lock(&fp->tx_mtx);
1343
1344                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1345                                 m_freem(mp);
1346                         }
1347
1348                         mtx_unlock(&fp->tx_mtx);
1349
1350                         buf_ring_free(fp->tx_br, M_DEVBUF);
1351                         fp->tx_br = NULL;
1352                 }
1353                 mtx_destroy(&fp->tx_mtx);
1354         }
1355         return;
1356 }
1357
1358 static void
1359 qla_fp_taskqueue(void *context, int pending)
1360 {
1361         qla_tx_fp_t *fp;
1362         qla_host_t *ha;
1363         struct ifnet *ifp;
1364         struct mbuf  *mp;
1365         int ret;
1366         uint32_t txr_idx;
1367         uint32_t iscsi_pdu = 0;
1368         uint32_t rx_pkts_left = -1;
1369
1370         fp = context;
1371
1372         if (fp == NULL)
1373                 return;
1374
1375         ha = (qla_host_t *)fp->ha;
1376
1377         ifp = ha->ifp;
1378
1379         txr_idx = fp->txr_idx;
1380
1381         mtx_lock(&fp->tx_mtx);
1382
1383         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1384                 mtx_unlock(&fp->tx_mtx);
1385                 goto qla_fp_taskqueue_exit;
1386         }
1387
1388         while (rx_pkts_left && !ha->stop_rcv &&
1389                 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1390                 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1391
1392 #ifdef QL_ENABLE_ISCSI_TLV
1393                 ql_hw_tx_done_locked(ha, fp->txr_idx);
1394                 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1395 #else
1396                 ql_hw_tx_done_locked(ha, fp->txr_idx);
1397 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1398
1399                 mp = drbr_peek(ifp, fp->tx_br);
1400
1401                 while (mp != NULL) {
1402
1403                         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1404 #ifdef QL_ENABLE_ISCSI_TLV
1405                                 if (ql_iscsi_pdu(ha, mp) == 0) {
1406                                         txr_idx = txr_idx +
1407                                                 (ha->hw.num_tx_rings >> 1);
1408                                         iscsi_pdu = 1;
1409                                 } else {
1410                                         iscsi_pdu = 0;
1411                                         txr_idx = fp->txr_idx;
1412                                 }
1413 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1414                         }
1415
1416                         ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1417
1418                         if (ret) {
1419                                 if (mp != NULL)
1420                                         drbr_putback(ifp, fp->tx_br, mp);
1421                                 else {
1422                                         drbr_advance(ifp, fp->tx_br);
1423                                 }
1424
1425                                 mtx_unlock(&fp->tx_mtx);
1426
1427                                 goto qla_fp_taskqueue_exit0;
1428                         } else {
1429                                 drbr_advance(ifp, fp->tx_br);
1430                         }
1431
1432                         /* Send a copy of the frame to the BPF listener */
1433                         ETHER_BPF_MTAP(ifp, mp);
1434                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1435                                 break;
1436
1437                         mp = drbr_peek(ifp, fp->tx_br);
1438                 }
1439         }
1440         mtx_unlock(&fp->tx_mtx);
1441
1442 qla_fp_taskqueue_exit0:
1443
1444         if (rx_pkts_left || ((mp != NULL) && ret)) {
1445                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1446         } else {
1447                 if (!ha->stop_rcv) {
1448                         QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1449                 }
1450         }
1451
1452 qla_fp_taskqueue_exit:
1453
1454         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1455         return;
1456 }
1457
1458 static int
1459 qla_create_fp_taskqueues(qla_host_t *ha)
1460 {
1461         int     i;
1462         uint8_t tq_name[32];
1463
1464         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1465
1466                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1467
1468                 bzero(tq_name, sizeof (tq_name));
1469                 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1470
1471                 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1472
1473                 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1474                                         taskqueue_thread_enqueue,
1475                                         &fp->fp_taskqueue);
1476
1477                 if (fp->fp_taskqueue == NULL)
1478                         return (-1);
1479
1480                 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1481                         tq_name);
1482
1483                 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1484                         fp->fp_taskqueue));
1485         }
1486
1487         return (0);
1488 }
1489
1490 static void
1491 qla_destroy_fp_taskqueues(qla_host_t *ha)
1492 {
1493         int     i;
1494
1495         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1496
1497                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1498
1499                 if (fp->fp_taskqueue != NULL) {
1500                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1501                         taskqueue_free(fp->fp_taskqueue);
1502                         fp->fp_taskqueue = NULL;
1503                 }
1504         }
1505         return;
1506 }
1507
1508 static void
1509 qla_drain_fp_taskqueues(qla_host_t *ha)
1510 {
1511         int     i;
1512
1513         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1514                 qla_tx_fp_t *fp = &ha->tx_fp[i];
1515
1516                 if (fp->fp_taskqueue != NULL) {
1517                         taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1518                 }
1519         }
1520         return;
1521 }
1522
1523 static int
1524 qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1525 {
1526         qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1527         qla_tx_fp_t *fp;
1528         int rss_id = 0;
1529         int ret = 0;
1530
1531         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1532
1533 #if __FreeBSD_version >= 1100000
1534         if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1535 #else
1536         if (mp->m_flags & M_FLOWID)
1537 #endif
1538                 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1539                                         ha->hw.num_sds_rings;
1540         fp = &ha->tx_fp[rss_id];
1541
1542         if (fp->tx_br == NULL) {
1543                 ret = EINVAL;
1544                 goto qla_transmit_exit;
1545         }
1546
1547         if (mp != NULL) {
1548                 ret = drbr_enqueue(ifp, fp->tx_br, mp);
1549         }
1550
1551         if (fp->fp_taskqueue != NULL)
1552                 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1553
1554         ret = 0;
1555
1556 qla_transmit_exit:
1557
1558         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1559         return ret;
1560 }
1561
1562 static void
1563 qla_qflush(struct ifnet *ifp)
1564 {
1565         int                     i;
1566         qla_tx_fp_t             *fp;
1567         struct mbuf             *mp;
1568         qla_host_t              *ha;
1569
1570         ha = (qla_host_t *)ifp->if_softc;
1571
1572         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1573
1574         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1575
1576                 fp = &ha->tx_fp[i];
1577
1578                 if (fp == NULL)
1579                         continue;
1580
1581                 if (fp->tx_br) {
1582                         mtx_lock(&fp->tx_mtx);
1583
1584                         while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1585                                 m_freem(mp);
1586                         }
1587                         mtx_unlock(&fp->tx_mtx);
1588                 }
1589         }
1590         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1591
1592         return;
1593 }
1594
1595 static void
1596 qla_stop(qla_host_t *ha)
1597 {
1598         struct ifnet *ifp = ha->ifp;
1599         device_t        dev;
1600         int i = 0;
1601
1602         dev = ha->pci_dev;
1603
1604         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1605         ha->qla_watchdog_pause = 1;
1606
1607         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1608                 qla_tx_fp_t *fp;
1609
1610                 fp = &ha->tx_fp[i];
1611
1612                 if (fp == NULL)
1613                         continue;
1614
1615                 if (fp->tx_br != NULL) {
1616                         mtx_lock(&fp->tx_mtx);
1617                         mtx_unlock(&fp->tx_mtx);
1618                 }
1619         }
1620
1621         while (!ha->qla_watchdog_paused)
1622                 qla_mdelay(__func__, 1);
1623
1624         ha->qla_interface_up = 0;
1625
1626         qla_drain_fp_taskqueues(ha);
1627
1628         ql_del_hw_if(ha);
1629
1630         qla_free_xmt_bufs(ha);
1631         qla_free_rcv_bufs(ha);
1632
1633         return;
1634 }
1635
1636 /*
1637  * Buffer Management Functions for Transmit and Receive Rings
1638  */
1639 static int
1640 qla_alloc_xmt_bufs(qla_host_t *ha)
1641 {
1642         int ret = 0;
1643         uint32_t i, j;
1644         qla_tx_buf_t *txb;
1645
1646         if (bus_dma_tag_create(NULL,    /* parent */
1647                 1, 0,    /* alignment, bounds */
1648                 BUS_SPACE_MAXADDR,       /* lowaddr */
1649                 BUS_SPACE_MAXADDR,       /* highaddr */
1650                 NULL, NULL,      /* filter, filterarg */
1651                 QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1652                 QLA_MAX_SEGMENTS,        /* nsegments */
1653                 PAGE_SIZE,        /* maxsegsize */
1654                 BUS_DMA_ALLOCNOW,        /* flags */
1655                 NULL,    /* lockfunc */
1656                 NULL,    /* lockfuncarg */
1657                 &ha->tx_tag)) {
1658                 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1659                         __func__);
1660                 return (ENOMEM);
1661         }
1662
1663         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1664                 bzero((void *)ha->tx_ring[i].tx_buf,
1665                         (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1666         }
1667
1668         for (j = 0; j < ha->hw.num_tx_rings; j++) {
1669                 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1670
1671                         txb = &ha->tx_ring[j].tx_buf[i];
1672
1673                         if ((ret = bus_dmamap_create(ha->tx_tag,
1674                                         BUS_DMA_NOWAIT, &txb->map))) {
1675
1676                                 ha->err_tx_dmamap_create++;
1677                                 device_printf(ha->pci_dev,
1678                                         "%s: bus_dmamap_create failed[%d]\n",
1679                                         __func__, ret);
1680
1681                                 qla_free_xmt_bufs(ha);
1682
1683                                 return (ret);
1684                         }
1685                 }
1686         }
1687
1688         return 0;
1689 }
1690
1691 /*
1692  * Release mbuf after it sent on the wire
1693  */
1694 static void
1695 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1696 {
1697         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1698
1699         if (txb->m_head) {
1700                 bus_dmamap_sync(ha->tx_tag, txb->map,
1701                         BUS_DMASYNC_POSTWRITE);
1702
1703                 bus_dmamap_unload(ha->tx_tag, txb->map);
1704
1705                 m_freem(txb->m_head);
1706                 txb->m_head = NULL;
1707
1708                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1709                 txb->map = NULL;
1710         }
1711
1712         if (txb->map) {
1713                 bus_dmamap_unload(ha->tx_tag, txb->map);
1714                 bus_dmamap_destroy(ha->tx_tag, txb->map);
1715                 txb->map = NULL;
1716         }
1717
1718         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1719 }
1720
1721 static void
1722 qla_free_xmt_bufs(qla_host_t *ha)
1723 {
1724         int             i, j;
1725
1726         for (j = 0; j < ha->hw.num_tx_rings; j++) {
1727                 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1728                         qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1729         }
1730
1731         if (ha->tx_tag != NULL) {
1732                 bus_dma_tag_destroy(ha->tx_tag);
1733                 ha->tx_tag = NULL;
1734         }
1735
1736         for (i = 0; i < ha->hw.num_tx_rings; i++) {
1737                 bzero((void *)ha->tx_ring[i].tx_buf,
1738                         (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1739         }
1740         return;
1741 }
1742
1743
1744 static int
1745 qla_alloc_rcv_std(qla_host_t *ha)
1746 {
1747         int             i, j, k, r, ret = 0;
1748         qla_rx_buf_t    *rxb;
1749         qla_rx_ring_t   *rx_ring;
1750
1751         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1752
1753                 rx_ring = &ha->rx_ring[r];
1754
1755                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1756
1757                         rxb = &rx_ring->rx_buf[i];
1758
1759                         ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1760                                         &rxb->map);
1761
1762                         if (ret) {
1763                                 device_printf(ha->pci_dev,
1764                                         "%s: dmamap[%d, %d] failed\n",
1765                                         __func__, r, i);
1766
1767                                 for (k = 0; k < r; k++) {
1768                                         for (j = 0; j < NUM_RX_DESCRIPTORS;
1769                                                 j++) {
1770                                                 rxb = &ha->rx_ring[k].rx_buf[j];
1771                                                 bus_dmamap_destroy(ha->rx_tag,
1772                                                         rxb->map);
1773                                         }
1774                                 }
1775
1776                                 for (j = 0; j < i; j++) {
1777                                         bus_dmamap_destroy(ha->rx_tag,
1778                                                 rx_ring->rx_buf[j].map);
1779                                 }
1780                                 goto qla_alloc_rcv_std_err;
1781                         }
1782                 }
1783         }
1784
1785         qla_init_hw_rcv_descriptors(ha);
1786
1787         
1788         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1789
1790                 rx_ring = &ha->rx_ring[r];
1791
1792                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1793                         rxb = &rx_ring->rx_buf[i];
1794                         rxb->handle = i;
1795                         if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1796                                 /*
1797                                  * set the physical address in the
1798                                  * corresponding descriptor entry in the
1799                                  * receive ring/queue for the hba 
1800                                  */
1801                                 qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1802                                         rxb->paddr,
1803                                         (rxb->m_head)->m_pkthdr.len);
1804                         } else {
1805                                 device_printf(ha->pci_dev,
1806                                         "%s: ql_get_mbuf [%d, %d] failed\n",
1807                                         __func__, r, i);
1808                                 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1809                                 goto qla_alloc_rcv_std_err;
1810                         }
1811                 }
1812         }
1813         return 0;
1814
1815 qla_alloc_rcv_std_err:
1816         return (-1);
1817 }
1818
1819 static void
1820 qla_free_rcv_std(qla_host_t *ha)
1821 {
1822         int             i, r;
1823         qla_rx_buf_t    *rxb;
1824
1825         for (r = 0; r < ha->hw.num_rds_rings; r++) {
1826                 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1827                         rxb = &ha->rx_ring[r].rx_buf[i];
1828                         if (rxb->m_head != NULL) {
1829                                 bus_dmamap_unload(ha->rx_tag, rxb->map);
1830                                 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1831                                 m_freem(rxb->m_head);
1832                                 rxb->m_head = NULL;
1833                         }
1834                 }
1835         }
1836         return;
1837 }
1838
1839 static int
1840 qla_alloc_rcv_bufs(qla_host_t *ha)
1841 {
1842         int             i, ret = 0;
1843
1844         if (bus_dma_tag_create(NULL,    /* parent */
1845                         1, 0,    /* alignment, bounds */
1846                         BUS_SPACE_MAXADDR,       /* lowaddr */
1847                         BUS_SPACE_MAXADDR,       /* highaddr */
1848                         NULL, NULL,      /* filter, filterarg */
1849                         MJUM9BYTES,     /* maxsize */
1850                         1,        /* nsegments */
1851                         MJUM9BYTES,        /* maxsegsize */
1852                         BUS_DMA_ALLOCNOW,        /* flags */
1853                         NULL,    /* lockfunc */
1854                         NULL,    /* lockfuncarg */
1855                         &ha->rx_tag)) {
1856
1857                 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1858                         __func__);
1859
1860                 return (ENOMEM);
1861         }
1862
1863         bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1864
1865         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1866                 ha->hw.sds[i].sdsr_next = 0;
1867                 ha->hw.sds[i].rxb_free = NULL;
1868                 ha->hw.sds[i].rx_free = 0;
1869         }
1870
1871         ret = qla_alloc_rcv_std(ha);
1872
1873         return (ret);
1874 }
1875
1876 static void
1877 qla_free_rcv_bufs(qla_host_t *ha)
1878 {
1879         int             i;
1880
1881         qla_free_rcv_std(ha);
1882
1883         if (ha->rx_tag != NULL) {
1884                 bus_dma_tag_destroy(ha->rx_tag);
1885                 ha->rx_tag = NULL;
1886         }
1887
1888         bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1889
1890         for (i = 0; i < ha->hw.num_sds_rings; i++) {
1891                 ha->hw.sds[i].sdsr_next = 0;
1892                 ha->hw.sds[i].rxb_free = NULL;
1893                 ha->hw.sds[i].rx_free = 0;
1894         }
1895
1896         return;
1897 }
1898
1899 int
1900 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1901 {
1902         register struct mbuf *mp = nmp;
1903         struct ifnet            *ifp;
1904         int                     ret = 0;
1905         uint32_t                offset;
1906         bus_dma_segment_t       segs[1];
1907         int                     nsegs, mbuf_size;
1908
1909         QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1910
1911         ifp = ha->ifp;
1912
1913         if (ha->hw.enable_9kb)
1914                 mbuf_size = MJUM9BYTES;
1915         else
1916                 mbuf_size = MCLBYTES;
1917
1918         if (mp == NULL) {
1919
1920                 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1921                         return(-1);
1922
1923                 if (ha->hw.enable_9kb)
1924                         mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1925                 else
1926                         mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1927
1928                 if (mp == NULL) {
1929                         ha->err_m_getcl++;
1930                         ret = ENOBUFS;
1931                         device_printf(ha->pci_dev,
1932                                         "%s: m_getcl failed\n", __func__);
1933                         goto exit_ql_get_mbuf;
1934                 }
1935                 mp->m_len = mp->m_pkthdr.len = mbuf_size;
1936         } else {
1937                 mp->m_len = mp->m_pkthdr.len = mbuf_size;
1938                 mp->m_data = mp->m_ext.ext_buf;
1939                 mp->m_next = NULL;
1940         }
1941
1942         offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1943         if (offset) {
1944                 offset = 8 - offset;
1945                 m_adj(mp, offset);
1946         }
1947
1948         /*
1949          * Using memory from the mbuf cluster pool, invoke the bus_dma
1950          * machinery to arrange the memory mapping.
1951          */
1952         ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1953                         mp, segs, &nsegs, BUS_DMA_NOWAIT);
1954         rxb->paddr = segs[0].ds_addr;
1955
1956         if (ret || !rxb->paddr || (nsegs != 1)) {
1957                 m_free(mp);
1958                 rxb->m_head = NULL;
1959                 device_printf(ha->pci_dev,
1960                         "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1961                         __func__, ret, (long long unsigned int)rxb->paddr,
1962                         nsegs);
1963                 ret = -1;
1964                 goto exit_ql_get_mbuf;
1965         }
1966         rxb->m_head = mp;
1967         bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1968
1969 exit_ql_get_mbuf:
1970         QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1971         return (ret);
1972 }
1973
1974
1975 static void
1976 qla_get_peer(qla_host_t *ha)
1977 {
1978         device_t *peers;
1979         int count, i, slot;
1980         int my_slot = pci_get_slot(ha->pci_dev);
1981
1982         if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1983                 return;
1984
1985         for (i = 0; i < count; i++) {
1986                 slot = pci_get_slot(peers[i]);
1987
1988                 if ((slot >= 0) && (slot == my_slot) &&
1989                         (pci_get_device(peers[i]) ==
1990                                 pci_get_device(ha->pci_dev))) {
1991                         if (ha->pci_dev != peers[i]) 
1992                                 ha->peer_dev = peers[i];
1993                 }
1994         }
1995 }
1996
1997 static void
1998 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1999 {
2000         qla_host_t *ha_peer;
2001         
2002         if (ha->peer_dev) {
2003                 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2004
2005                         ha_peer->msg_from_peer = msg_to_peer;
2006                 }
2007         }
2008 }
2009
2010 static void
2011 qla_error_recovery(void *context, int pending)
2012 {
2013         qla_host_t *ha = context;
2014         uint32_t msecs_100 = 100;
2015         struct ifnet *ifp = ha->ifp;
2016         int i = 0;
2017
2018 device_printf(ha->pci_dev, "%s: \n", __func__);
2019         ha->hw.imd_compl = 1;
2020
2021         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2022                 return;
2023
2024 device_printf(ha->pci_dev, "%s: enter\n", __func__);
2025
2026         if (ha->qla_interface_up) {
2027
2028                 qla_mdelay(__func__, 300);
2029
2030                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2031
2032                 for (i = 0; i < ha->hw.num_sds_rings; i++) {
2033                         qla_tx_fp_t *fp;
2034
2035                         fp = &ha->tx_fp[i];
2036
2037                         if (fp == NULL)
2038                                 continue;
2039
2040                         if (fp->tx_br != NULL) {
2041                                 mtx_lock(&fp->tx_mtx);
2042                                 mtx_unlock(&fp->tx_mtx);
2043                         }
2044                 }
2045         }
2046
2047
2048         qla_drain_fp_taskqueues(ha);
2049
2050         if ((ha->pci_func & 0x1) == 0) {
2051
2052                 if (!ha->msg_from_peer) {
2053                         qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2054
2055                         while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2056                                 msecs_100--)
2057                                 qla_mdelay(__func__, 100);
2058                 }
2059
2060                 ha->msg_from_peer = 0;
2061
2062                 if (ha->enable_minidump)
2063                         ql_minidump(ha);
2064
2065                 (void) ql_init_hw(ha);
2066
2067                 if (ha->qla_interface_up) {
2068                         qla_free_xmt_bufs(ha);
2069                         qla_free_rcv_bufs(ha);
2070                 }
2071
2072                 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2073
2074         } else {
2075                 if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2076
2077                         ha->msg_from_peer = 0;
2078
2079                         qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2080                 } else {
2081                         qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2082                 }
2083
2084                 while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2085                         qla_mdelay(__func__, 100);
2086                 ha->msg_from_peer = 0;
2087
2088                 (void) ql_init_hw(ha);
2089
2090                 qla_mdelay(__func__, 1000);
2091
2092                 if (ha->qla_interface_up) {
2093                         qla_free_xmt_bufs(ha);
2094                         qla_free_rcv_bufs(ha);
2095                 }
2096         }
2097
2098         if (ha->qla_interface_up) {
2099
2100                 if (qla_alloc_xmt_bufs(ha) != 0) {
2101                         goto qla_error_recovery_exit;
2102                 }
2103                 qla_confirm_9kb_enable(ha);
2104
2105                 if (qla_alloc_rcv_bufs(ha) != 0) {
2106                         goto qla_error_recovery_exit;
2107                 }
2108
2109                 ha->stop_rcv = 0;
2110
2111                 if (ql_init_hw_if(ha) == 0) {
2112                         ifp = ha->ifp;
2113                         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2114                         ha->qla_watchdog_pause = 0;
2115                 }
2116         } else
2117                 ha->qla_watchdog_pause = 0;
2118
2119 qla_error_recovery_exit:
2120
2121 device_printf(ha->pci_dev, "%s: exit\n", __func__);
2122
2123         QLA_UNLOCK(ha, __func__);
2124
2125         callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2126                 qla_watchdog, ha);
2127         return;
2128 }
2129
2130 static void
2131 qla_async_event(void *context, int pending)
2132 {
2133         qla_host_t *ha = context;
2134
2135         if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2136                 return;
2137
2138         if (ha->async_event) {
2139                 ha->async_event = 0;
2140                 qla_hw_async_event(ha);
2141         }
2142
2143         QLA_UNLOCK(ha, __func__);
2144
2145         return;
2146 }
2147
2148 static void
2149 qla_stats(void *context, int pending)
2150 {
2151         qla_host_t *ha;
2152
2153         ha = context;
2154
2155         ql_get_stats(ha);
2156         return;
2157 }
2158