2 * Copyright (c) 2013-2014 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include "ql_inline.h"
47 * Some PCI Configuration Space Related Defines
50 #ifndef PCI_VENDOR_QLOGIC
51 #define PCI_VENDOR_QLOGIC 0x1077
54 #ifndef PCI_PRODUCT_QLOGIC_ISP8030
55 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030
58 #define PCI_QLOGIC_ISP8030 \
59 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
64 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65 static void qla_free_parent_dma_tag(qla_host_t *ha);
66 static int qla_alloc_xmt_bufs(qla_host_t *ha);
67 static void qla_free_xmt_bufs(qla_host_t *ha);
68 static int qla_alloc_rcv_bufs(qla_host_t *ha);
69 static void qla_free_rcv_bufs(qla_host_t *ha);
70 static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75 static void qla_release(qla_host_t *ha);
76 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
78 static void qla_stop(qla_host_t *ha);
79 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80 static void qla_tx_done(void *context, int pending);
81 static void qla_get_peer(qla_host_t *ha);
82 static void qla_error_recovery(void *context, int pending);
85 * Hooks to the Operating Systems
87 static int qla_pci_probe (device_t);
88 static int qla_pci_attach (device_t);
89 static int qla_pci_detach (device_t);
91 static void qla_init(void *arg);
92 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93 static int qla_media_change(struct ifnet *ifp);
94 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95 static void qla_start(struct ifnet *ifp);
97 static device_method_t qla_pci_methods[] = {
98 /* Device interface */
99 DEVMETHOD(device_probe, qla_pci_probe),
100 DEVMETHOD(device_attach, qla_pci_attach),
101 DEVMETHOD(device_detach, qla_pci_detach),
105 static driver_t qla_pci_driver = {
106 "ql", qla_pci_methods, sizeof (qla_host_t),
109 static devclass_t qla83xx_devclass;
111 DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
113 MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
114 MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
116 MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
118 #define QL_STD_REPLENISH_THRES 0
119 #define QL_JUMBO_REPLENISH_THRES 32
122 static char dev_str[64];
125 * Name: qla_pci_probe
126 * Function: Validate the PCI device to be a QLA80XX device
129 qla_pci_probe(device_t dev)
131 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
132 case PCI_QLOGIC_ISP8030:
133 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
134 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
135 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
137 device_set_desc(dev, dev_str);
144 printf("%s: %s\n ", __func__, dev_str);
146 return (BUS_PROBE_DEFAULT);
150 qla_add_sysctls(qla_host_t *ha)
152 device_t dev = ha->pci_dev;
154 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
155 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
156 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
158 qla_sysctl_get_stats, "I", "Statistics");
160 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
161 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
162 OID_AUTO, "fw_version", CTLFLAG_RD,
163 ha->fw_ver_str, 0, "firmware version");
165 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
166 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
167 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
169 qla_sysctl_get_link_status, "I", "Link Status");
172 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
173 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
174 OID_AUTO, "debug", CTLFLAG_RW,
175 &ha->dbg_level, ha->dbg_level, "Debug Level");
177 ha->std_replenish = QL_STD_REPLENISH_THRES;
178 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180 OID_AUTO, "std_replenish", CTLFLAG_RW,
181 &ha->std_replenish, ha->std_replenish,
182 "Threshold for Replenishing Standard Frames");
184 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
185 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
186 OID_AUTO, "ipv4_lro",
187 CTLFLAG_RD, &ha->ipv4_lro,
188 "number of ipv4 lro completions");
190 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
191 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
192 OID_AUTO, "ipv6_lro",
193 CTLFLAG_RD, &ha->ipv6_lro,
194 "number of ipv6 lro completions");
196 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
197 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
198 OID_AUTO, "tx_tso_frames",
199 CTLFLAG_RD, &ha->tx_tso_frames,
200 "number of Tx TSO Frames");
202 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
203 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
204 OID_AUTO, "hw_vlan_tx_frames",
205 CTLFLAG_RD, &ha->hw_vlan_tx_frames,
206 "number of Tx VLAN Frames");
212 qla_watchdog(void *arg)
214 qla_host_t *ha = arg;
218 qla_hw_tx_cntxt_t *hw_tx_cntxt;
223 if (ha->flags.qla_watchdog_exit) {
224 ha->qla_watchdog_exited = 1;
227 ha->qla_watchdog_exited = 0;
229 if (!ha->flags.qla_watchdog_pause) {
230 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
231 (ha->msg_from_peer == QL_PEER_MSG_RESET)) {
232 ha->qla_watchdog_paused = 1;
233 ha->flags.qla_watchdog_pause = 1;
234 ha->qla_initiate_recovery = 0;
236 taskqueue_enqueue(ha->err_tq, &ha->err_task);
238 for (i = 0; i < ha->hw.num_tx_rings; i++) {
239 hw_tx_cntxt = &hw->tx_cntxt[i];
240 if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
241 hw_tx_cntxt->txr_comp) {
242 taskqueue_enqueue(ha->tx_tq,
248 if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
249 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
251 ha->qla_watchdog_paused = 0;
255 ha->qla_watchdog_paused = 1;
258 ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
259 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
264 * Name: qla_pci_attach
265 * Function: attaches the device to the operating system
268 qla_pci_attach(device_t dev)
270 qla_host_t *ha = NULL;
274 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
276 if ((ha = device_get_softc(dev)) == NULL) {
277 device_printf(dev, "cannot get softc\n");
281 memset(ha, 0, sizeof (qla_host_t));
283 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
284 device_printf(dev, "device is not ISP8030\n");
288 ha->pci_func = pci_get_function(dev);
292 pci_enable_busmaster(dev);
294 ha->reg_rid = PCIR_BAR(0);
295 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
298 if (ha->pci_reg == NULL) {
299 device_printf(dev, "unable to map any ports\n");
300 goto qla_pci_attach_err;
303 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
306 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
308 mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
311 ql_hw_add_sysctls(ha);
313 ha->flags.lock_init = 1;
315 ha->reg_rid1 = PCIR_BAR(2);
316 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
317 &ha->reg_rid1, RF_ACTIVE);
319 ha->msix_count = pci_msix_count(dev);
321 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
322 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
324 goto qla_pci_attach_err;
327 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
328 " msix_count 0x%x pci_reg %p\n", __func__, ha,
329 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
331 ha->msix_count = ha->hw.num_sds_rings + 1;
333 if (pci_alloc_msix(dev, &ha->msix_count)) {
334 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
337 goto qla_pci_attach_err;
341 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
343 (RF_ACTIVE | RF_SHAREABLE));
344 if (ha->mbx_irq == NULL) {
345 device_printf(dev, "could not allocate mbx interrupt\n");
346 goto qla_pci_attach_err;
348 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
349 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
350 device_printf(dev, "could not setup mbx interrupt\n");
351 goto qla_pci_attach_err;
355 for (i = 0; i < ha->hw.num_sds_rings; i++) {
356 ha->irq_vec[i].sds_idx = i;
357 ha->irq_vec[i].ha = ha;
358 ha->irq_vec[i].irq_rid = 2 + i;
360 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
361 &ha->irq_vec[i].irq_rid,
362 (RF_ACTIVE | RF_SHAREABLE));
364 if (ha->irq_vec[i].irq == NULL) {
365 device_printf(dev, "could not allocate interrupt\n");
366 goto qla_pci_attach_err;
368 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
369 (INTR_TYPE_NET | INTR_MPSAFE),
370 NULL, ql_isr, &ha->irq_vec[i],
371 &ha->irq_vec[i].handle)) {
372 device_printf(dev, "could not setup interrupt\n");
373 goto qla_pci_attach_err;
377 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
378 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
380 /* initialize hardware */
381 if (ql_init_hw(ha)) {
382 device_printf(dev, "%s: ql_init_hw failed\n", __func__);
383 goto qla_pci_attach_err;
386 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
387 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
389 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
390 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
393 ql_read_mac_addr(ha);
395 /* allocate parent dma tag */
396 if (qla_alloc_parent_dma_tag(ha)) {
397 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
399 goto qla_pci_attach_err;
402 /* alloc all dma buffers */
403 if (ql_alloc_dma(ha)) {
404 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
405 goto qla_pci_attach_err;
409 /* create the o.s ethernet interface */
410 qla_init_ifnet(dev, ha);
412 ha->flags.qla_watchdog_active = 1;
413 ha->flags.qla_watchdog_pause = 1;
416 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
417 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
418 taskqueue_thread_enqueue, &ha->tx_tq);
419 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
420 device_get_nameunit(ha->pci_dev));
422 callout_init(&ha->tx_callout, TRUE);
423 ha->flags.qla_callout_init = 1;
425 /* create ioctl device interface */
426 if (ql_make_cdev(ha)) {
427 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
428 goto qla_pci_attach_err;
431 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
434 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
435 ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
436 taskqueue_thread_enqueue, &ha->err_tq);
437 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
438 device_get_nameunit(ha->pci_dev));
440 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
447 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
452 * Name: qla_pci_detach
453 * Function: Unhooks the device from the operating system
456 qla_pci_detach(device_t dev)
458 qla_host_t *ha = NULL;
461 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
463 if ((ha = device_get_softc(dev)) == NULL) {
464 device_printf(dev, "cannot get softc\n");
470 (void)QLA_LOCK(ha, __func__, 0);
472 QLA_UNLOCK(ha, __func__);
476 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
482 * SYSCTL Related Callbacks
485 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
490 err = sysctl_handle_int(oidp, &ret, 0, req);
492 if (err || !req->newptr)
496 ha = (qla_host_t *)arg1;
502 qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
507 err = sysctl_handle_int(oidp, &ret, 0, req);
509 if (err || !req->newptr)
513 ha = (qla_host_t *)arg1;
514 ql_hw_link_status(ha);
521 * Function: Releases the resources allocated for the device
524 qla_release(qla_host_t *ha)
532 taskqueue_drain(ha->err_tq, &ha->err_task);
533 taskqueue_free(ha->err_tq);
537 taskqueue_drain(ha->tx_tq, &ha->tx_task);
538 taskqueue_free(ha->tx_tq);
543 if (ha->flags.qla_watchdog_active) {
544 ha->flags.qla_watchdog_exit = 1;
546 while (ha->qla_watchdog_exited == 0)
547 qla_mdelay(__func__, 1);
550 if (ha->flags.qla_callout_init)
551 callout_stop(&ha->tx_callout);
554 ether_ifdetach(ha->ifp);
557 qla_free_parent_dma_tag(ha);
560 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
563 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
566 for (i = 0; i < ha->hw.num_sds_rings; i++) {
568 if (ha->irq_vec[i].handle) {
569 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
570 ha->irq_vec[i].handle);
573 if (ha->irq_vec[i].irq) {
574 (void)bus_release_resource(dev, SYS_RES_IRQ,
575 ha->irq_vec[i].irq_rid,
581 pci_release_msi(dev);
583 if (ha->flags.lock_init) {
584 mtx_destroy(&ha->tx_lock);
585 mtx_destroy(&ha->hw_lock);
589 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
593 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
598 * DMA Related Functions
602 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
604 *((bus_addr_t *)arg) = 0;
607 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
611 *((bus_addr_t *)arg) = segs[0].ds_addr;
617 ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
625 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
627 ret = bus_dma_tag_create(
628 ha->parent_tag,/* parent */
630 ((bus_size_t)(1ULL << 32)),/* boundary */
631 BUS_SPACE_MAXADDR, /* lowaddr */
632 BUS_SPACE_MAXADDR, /* highaddr */
633 NULL, NULL, /* filter, filterarg */
634 dma_buf->size, /* maxsize */
636 dma_buf->size, /* maxsegsize */
638 NULL, NULL, /* lockfunc, lockarg */
642 device_printf(dev, "%s: could not create dma tag\n", __func__);
643 goto ql_alloc_dmabuf_exit;
645 ret = bus_dmamem_alloc(dma_buf->dma_tag,
646 (void **)&dma_buf->dma_b,
647 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
650 bus_dma_tag_destroy(dma_buf->dma_tag);
651 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
652 goto ql_alloc_dmabuf_exit;
655 ret = bus_dmamap_load(dma_buf->dma_tag,
660 &b_addr, BUS_DMA_NOWAIT);
662 if (ret || !b_addr) {
663 bus_dma_tag_destroy(dma_buf->dma_tag);
664 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
667 goto ql_alloc_dmabuf_exit;
670 dma_buf->dma_addr = b_addr;
672 ql_alloc_dmabuf_exit:
673 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
674 __func__, ret, (void *)dma_buf->dma_tag,
675 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
682 ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
684 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
685 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
686 bus_dma_tag_destroy(dma_buf->dma_tag);
690 qla_alloc_parent_dma_tag(qla_host_t *ha)
698 * Allocate parent DMA Tag
700 ret = bus_dma_tag_create(
701 bus_get_dma_tag(dev), /* parent */
702 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
703 BUS_SPACE_MAXADDR, /* lowaddr */
704 BUS_SPACE_MAXADDR, /* highaddr */
705 NULL, NULL, /* filter, filterarg */
706 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
708 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
710 NULL, NULL, /* lockfunc, lockarg */
714 device_printf(dev, "%s: could not create parent dma tag\n",
719 ha->flags.parent_tag = 1;
725 qla_free_parent_dma_tag(qla_host_t *ha)
727 if (ha->flags.parent_tag) {
728 bus_dma_tag_destroy(ha->parent_tag);
729 ha->flags.parent_tag = 0;
734 * Name: qla_init_ifnet
735 * Function: Creates the Network Device Interface and Registers it with the O.S
739 qla_init_ifnet(device_t dev, qla_host_t *ha)
743 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
745 ifp = ha->ifp = if_alloc(IFT_ETHER);
748 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
750 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
752 ifp->if_baudrate = IF_Gbps(10);
753 ifp->if_capabilities = IFCAP_LINKSTATE;
755 ifp->if_init = qla_init;
757 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
758 ifp->if_ioctl = qla_ioctl;
759 ifp->if_start = qla_start;
761 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
762 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
763 IFQ_SET_READY(&ifp->if_snd);
765 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
767 ether_ifattach(ifp, qla_get_mac_addr(ha));
769 ifp->if_capabilities = IFCAP_HWCSUM |
773 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
774 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
776 ifp->if_capenable = ifp->if_capabilities;
778 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
780 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
782 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
784 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
786 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
788 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
794 qla_init_locked(qla_host_t *ha)
796 struct ifnet *ifp = ha->ifp;
800 if (qla_alloc_xmt_bufs(ha) != 0)
803 if (qla_alloc_rcv_bufs(ha) != 0)
806 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
808 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
810 ha->flags.stop_rcv = 0;
811 if (ql_init_hw_if(ha) == 0) {
813 ifp->if_drv_flags |= IFF_DRV_RUNNING;
814 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
815 ha->flags.qla_watchdog_pause = 0;
816 ha->hw_vlan_tx_frames = 0;
817 ha->tx_tso_frames = 0;
828 ha = (qla_host_t *)arg;
830 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
832 (void)QLA_LOCK(ha, __func__, 0);
834 QLA_UNLOCK(ha, __func__);
836 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
840 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
842 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
843 struct ifmultiaddr *ifma;
845 struct ifnet *ifp = ha->ifp;
850 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
852 if (ifma->ifma_addr->sa_family != AF_LINK)
855 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
858 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
859 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
864 if_maddr_runlock(ifp);
866 if (QLA_LOCK(ha, __func__, 1) == 0) {
867 ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
868 QLA_UNLOCK(ha, __func__);
875 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
878 struct ifreq *ifr = (struct ifreq *)data;
879 struct ifaddr *ifa = (struct ifaddr *)data;
882 ha = (qla_host_t *)ifp->if_softc;
886 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
889 if (ifa->ifa_addr->sa_family == AF_INET) {
890 ifp->if_flags |= IFF_UP;
891 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
892 (void)QLA_LOCK(ha, __func__, 0);
894 QLA_UNLOCK(ha, __func__);
896 QL_DPRINT4(ha, (ha->pci_dev,
897 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
899 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
901 arp_ifinit(ifp, ifa);
903 ether_ioctl(ifp, cmd, data);
908 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
911 if (ifr->ifr_mtu > QLA_MAX_MTU) {
914 (void) QLA_LOCK(ha, __func__, 0);
915 ifp->if_mtu = ifr->ifr_mtu;
917 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
918 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
919 ret = ql_set_max_mtu(ha, ha->max_frame_size,
920 ha->hw.rcv_cntxt_id);
923 if (ifp->if_mtu > ETHERMTU)
924 ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
926 ha->std_replenish = QL_STD_REPLENISH_THRES;
929 QLA_UNLOCK(ha, __func__);
938 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
941 (void)QLA_LOCK(ha, __func__, 0);
943 if (ifp->if_flags & IFF_UP) {
944 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
945 if ((ifp->if_flags ^ ha->if_flags) &
947 ret = ql_set_promisc(ha);
948 } else if ((ifp->if_flags ^ ha->if_flags) &
950 ret = ql_set_allmulti(ha);
954 ha->max_frame_size = ifp->if_mtu +
955 ETHER_HDR_LEN + ETHER_CRC_LEN;
956 ret = ql_set_max_mtu(ha, ha->max_frame_size,
957 ha->hw.rcv_cntxt_id);
960 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
962 ha->if_flags = ifp->if_flags;
965 QLA_UNLOCK(ha, __func__);
969 QL_DPRINT4(ha, (ha->pci_dev,
970 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
972 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
973 if (qla_set_multi(ha, 1))
979 QL_DPRINT4(ha, (ha->pci_dev,
980 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
982 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
983 if (qla_set_multi(ha, 0))
990 QL_DPRINT4(ha, (ha->pci_dev,
991 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
993 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
998 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1000 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1003 if (mask & IFCAP_HWCSUM)
1004 ifp->if_capenable ^= IFCAP_HWCSUM;
1005 if (mask & IFCAP_TSO4)
1006 ifp->if_capenable ^= IFCAP_TSO4;
1007 if (mask & IFCAP_VLAN_HWTAGGING)
1008 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1009 if (mask & IFCAP_VLAN_HWTSO)
1010 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1012 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1015 VLAN_CAPABILITIES(ifp);
1020 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1022 ret = ether_ioctl(ifp, cmd, data);
1030 qla_media_change(struct ifnet *ifp)
1033 struct ifmedia *ifm;
1036 ha = (qla_host_t *)ifp->if_softc;
1038 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1042 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1045 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1051 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1055 ha = (qla_host_t *)ifp->if_softc;
1057 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1059 ifmr->ifm_status = IFM_AVALID;
1060 ifmr->ifm_active = IFM_ETHER;
1062 ql_update_link_state(ha);
1063 if (ha->hw.link_up) {
1064 ifmr->ifm_status |= IFM_ACTIVE;
1065 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1068 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1069 (ha->hw.link_up ? "link_up" : "link_down")));
1075 qla_start(struct ifnet *ifp)
1077 struct mbuf *m_head;
1078 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1080 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1082 if (!mtx_trylock(&ha->tx_lock)) {
1083 QL_DPRINT8(ha, (ha->pci_dev,
1084 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1088 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1091 (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1096 if (!ha->watchdog_ticks)
1097 ql_update_link_state(ha);
1099 if (!ha->hw.link_up) {
1100 QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1105 while (ifp->if_snd.ifq_head != NULL) {
1106 IF_DEQUEUE(&ifp->if_snd, m_head);
1108 if (m_head == NULL) {
1109 QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1114 if (qla_send(ha, &m_head)) {
1117 QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1118 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1119 IF_PREPEND(&ifp->if_snd, m_head);
1122 /* Send a copy of the frame to the BPF listener */
1123 ETHER_BPF_MTAP(ifp, m_head);
1126 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1131 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1133 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1138 struct mbuf *m_head = *m_headp;
1139 uint32_t txr_idx = ha->txr_idx;
1141 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1143 /* check if flowid is set */
1144 if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1145 txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1147 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1148 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1150 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1157 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1158 m_head->m_pkthdr.len));
1160 m = m_defrag(m_head, M_NOWAIT);
1162 ha->err_tx_defrag++;
1165 device_printf(ha->pci_dev,
1166 "%s: m_defrag() = NULL [%d]\n",
1173 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1174 segs, &nsegs, BUS_DMA_NOWAIT))) {
1176 ha->err_tx_dmamap_load++;
1178 device_printf(ha->pci_dev,
1179 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1180 __func__, ret, m_head->m_pkthdr.len);
1182 if (ret != ENOMEM) {
1191 ha->err_tx_dmamap_load++;
1193 device_printf(ha->pci_dev,
1194 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1195 __func__, ret, m_head->m_pkthdr.len);
1197 if (ret != ENOMEM) {
1204 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1206 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1208 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1210 ha->tx_ring[txr_idx].count++;
1211 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1213 if (ret == EINVAL) {
1220 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1225 qla_stop(qla_host_t *ha)
1227 struct ifnet *ifp = ha->ifp;
1232 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1234 ha->flags.qla_watchdog_pause = 1;
1236 while (!ha->qla_watchdog_paused)
1237 qla_mdelay(__func__, 1);
1239 ha->flags.stop_rcv = 1;
1244 qla_free_xmt_bufs(ha);
1245 qla_free_rcv_bufs(ha);
1251 * Buffer Management Functions for Transmit and Receive Rings
1254 qla_alloc_xmt_bufs(qla_host_t *ha)
1260 if (bus_dma_tag_create(NULL, /* parent */
1261 1, 0, /* alignment, bounds */
1262 BUS_SPACE_MAXADDR, /* lowaddr */
1263 BUS_SPACE_MAXADDR, /* highaddr */
1264 NULL, NULL, /* filter, filterarg */
1265 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1266 QLA_MAX_SEGMENTS, /* nsegments */
1267 PAGE_SIZE, /* maxsegsize */
1268 BUS_DMA_ALLOCNOW, /* flags */
1269 NULL, /* lockfunc */
1270 NULL, /* lockfuncarg */
1272 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1277 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1278 bzero((void *)ha->tx_ring[i].tx_buf,
1279 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1282 for (j = 0; j < ha->hw.num_tx_rings; j++) {
1283 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1285 txb = &ha->tx_ring[j].tx_buf[i];
1287 if ((ret = bus_dmamap_create(ha->tx_tag,
1288 BUS_DMA_NOWAIT, &txb->map))) {
1290 ha->err_tx_dmamap_create++;
1291 device_printf(ha->pci_dev,
1292 "%s: bus_dmamap_create failed[%d]\n",
1295 qla_free_xmt_bufs(ha);
1306 * Release mbuf after it sent on the wire
1309 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1311 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1313 if (txb->m_head && txb->map) {
1315 bus_dmamap_unload(ha->tx_tag, txb->map);
1317 m_freem(txb->m_head);
1322 bus_dmamap_destroy(ha->tx_tag, txb->map);
1324 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1328 qla_free_xmt_bufs(qla_host_t *ha)
1332 for (j = 0; j < ha->hw.num_tx_rings; j++) {
1333 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1334 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1337 if (ha->tx_tag != NULL) {
1338 bus_dma_tag_destroy(ha->tx_tag);
1342 for (i = 0; i < ha->hw.num_tx_rings; i++) {
1343 bzero((void *)ha->tx_ring[i].tx_buf,
1344 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1351 qla_alloc_rcv_std(qla_host_t *ha)
1353 int i, j, k, r, ret = 0;
1355 qla_rx_ring_t *rx_ring;
1357 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1359 rx_ring = &ha->rx_ring[r];
1361 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1363 rxb = &rx_ring->rx_buf[i];
1365 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1369 device_printf(ha->pci_dev,
1370 "%s: dmamap[%d, %d] failed\n",
1373 for (k = 0; k < r; k++) {
1374 for (j = 0; j < NUM_RX_DESCRIPTORS;
1376 rxb = &ha->rx_ring[k].rx_buf[j];
1377 bus_dmamap_destroy(ha->rx_tag,
1382 for (j = 0; j < i; j++) {
1383 bus_dmamap_destroy(ha->rx_tag,
1384 rx_ring->rx_buf[j].map);
1386 goto qla_alloc_rcv_std_err;
1391 qla_init_hw_rcv_descriptors(ha);
1394 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1396 rx_ring = &ha->rx_ring[r];
1398 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1399 rxb = &rx_ring->rx_buf[i];
1401 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1403 * set the physical address in the
1404 * corresponding descriptor entry in the
1405 * receive ring/queue for the hba
1407 qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1409 (rxb->m_head)->m_pkthdr.len);
1411 device_printf(ha->pci_dev,
1412 "%s: ql_get_mbuf [%d, %d] failed\n",
1414 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1415 goto qla_alloc_rcv_std_err;
1421 qla_alloc_rcv_std_err:
1426 qla_free_rcv_std(qla_host_t *ha)
1431 for (r = 0; r < ha->hw.num_rds_rings; r++) {
1432 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1433 rxb = &ha->rx_ring[r].rx_buf[i];
1434 if (rxb->m_head != NULL) {
1435 bus_dmamap_unload(ha->rx_tag, rxb->map);
1436 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1437 m_freem(rxb->m_head);
1446 qla_alloc_rcv_bufs(qla_host_t *ha)
1450 if (bus_dma_tag_create(NULL, /* parent */
1451 1, 0, /* alignment, bounds */
1452 BUS_SPACE_MAXADDR, /* lowaddr */
1453 BUS_SPACE_MAXADDR, /* highaddr */
1454 NULL, NULL, /* filter, filterarg */
1455 MJUM9BYTES, /* maxsize */
1457 MJUM9BYTES, /* maxsegsize */
1458 BUS_DMA_ALLOCNOW, /* flags */
1459 NULL, /* lockfunc */
1460 NULL, /* lockfuncarg */
1463 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1469 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1471 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1472 ha->hw.sds[i].sdsr_next = 0;
1473 ha->hw.sds[i].rxb_free = NULL;
1474 ha->hw.sds[i].rx_free = 0;
1477 ret = qla_alloc_rcv_std(ha);
1483 qla_free_rcv_bufs(qla_host_t *ha)
1487 qla_free_rcv_std(ha);
1489 if (ha->rx_tag != NULL) {
1490 bus_dma_tag_destroy(ha->rx_tag);
1494 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1496 for (i = 0; i < ha->hw.num_sds_rings; i++) {
1497 ha->hw.sds[i].sdsr_next = 0;
1498 ha->hw.sds[i].rxb_free = NULL;
1499 ha->hw.sds[i].rx_free = 0;
1506 ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1508 register struct mbuf *mp = nmp;
1512 bus_dma_segment_t segs[1];
1515 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1521 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1526 device_printf(ha->pci_dev,
1527 "%s: m_getcl failed\n", __func__);
1528 goto exit_ql_get_mbuf;
1530 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1532 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1533 mp->m_data = mp->m_ext.ext_buf;
1537 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1539 offset = 8 - offset;
1544 * Using memory from the mbuf cluster pool, invoke the bus_dma
1545 * machinery to arrange the memory mapping.
1547 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1548 mp, segs, &nsegs, BUS_DMA_NOWAIT);
1549 rxb->paddr = segs[0].ds_addr;
1551 if (ret || !rxb->paddr || (nsegs != 1)) {
1554 device_printf(ha->pci_dev,
1555 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1556 __func__, ret, (long long unsigned int)rxb->paddr,
1559 goto exit_ql_get_mbuf;
1562 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1565 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1570 qla_tx_done(void *context, int pending)
1572 qla_host_t *ha = context;
1580 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1581 QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1590 qla_get_peer(qla_host_t *ha)
1594 int my_slot = pci_get_slot(ha->pci_dev);
1596 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1599 for (i = 0; i < count; i++) {
1600 slot = pci_get_slot(peers[i]);
1602 if ((slot >= 0) && (slot == my_slot) &&
1603 (pci_get_device(peers[i]) ==
1604 pci_get_device(ha->pci_dev))) {
1605 if (ha->pci_dev != peers[i])
1606 ha->peer_dev = peers[i];
1612 qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1614 qla_host_t *ha_peer;
1617 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1619 ha_peer->msg_from_peer = msg_to_peer;
1625 qla_error_recovery(void *context, int pending)
1627 qla_host_t *ha = context;
1628 uint32_t msecs_100 = 100;
1629 struct ifnet *ifp = ha->ifp;
1631 (void)QLA_LOCK(ha, __func__, 0);
1633 ha->flags.stop_rcv = 1;
1637 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1639 QLA_UNLOCK(ha, __func__);
1641 if ((ha->pci_func & 0x1) == 0) {
1643 if (!ha->msg_from_peer) {
1644 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1646 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1648 qla_mdelay(__func__, 100);
1651 ha->msg_from_peer = 0;
1655 (void) ql_init_hw(ha);
1656 qla_free_xmt_bufs(ha);
1657 qla_free_rcv_bufs(ha);
1659 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1662 if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1664 ha->msg_from_peer = 0;
1666 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1668 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1671 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--)
1672 qla_mdelay(__func__, 100);
1673 ha->msg_from_peer = 0;
1675 (void) ql_init_hw(ha);
1676 qla_free_xmt_bufs(ha);
1677 qla_free_rcv_bufs(ha);
1679 (void)QLA_LOCK(ha, __func__, 0);
1681 if (qla_alloc_xmt_bufs(ha) != 0) {
1682 QLA_UNLOCK(ha, __func__);
1686 if (qla_alloc_rcv_bufs(ha) != 0) {
1687 QLA_UNLOCK(ha, __func__);
1691 ha->flags.stop_rcv = 0;
1692 if (ql_init_hw_if(ha) == 0) {
1694 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1695 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1696 ha->flags.qla_watchdog_pause = 0;
1699 QLA_UNLOCK(ha, __func__);