2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011-2013 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
42 #include "qla_inline.h"
48 * Some PCI Configuration Space Related Defines
51 #ifndef PCI_VENDOR_QLOGIC
52 #define PCI_VENDOR_QLOGIC 0x1077
55 #ifndef PCI_PRODUCT_QLOGIC_ISP8020
56 #define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
59 #define PCI_QLOGIC_ISP8020 \
60 ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
65 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
66 static void qla_free_parent_dma_tag(qla_host_t *ha);
67 static int qla_alloc_xmt_bufs(qla_host_t *ha);
68 static void qla_free_xmt_bufs(qla_host_t *ha);
69 static int qla_alloc_rcv_bufs(qla_host_t *ha);
70 static void qla_free_rcv_bufs(qla_host_t *ha);
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static void qla_release(qla_host_t *ha);
75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77 static void qla_stop(qla_host_t *ha);
78 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
79 static void qla_tx_done(void *context, int pending);
82 * Hooks to the Operating Systems
84 static int qla_pci_probe (device_t);
85 static int qla_pci_attach (device_t);
86 static int qla_pci_detach (device_t);
88 static void qla_init(void *arg);
89 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
90 static int qla_media_change(struct ifnet *ifp);
91 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
93 static device_method_t qla_pci_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, qla_pci_probe),
96 DEVMETHOD(device_attach, qla_pci_attach),
97 DEVMETHOD(device_detach, qla_pci_detach),
101 static driver_t qla_pci_driver = {
102 "ql", qla_pci_methods, sizeof (qla_host_t),
105 static devclass_t qla80xx_devclass;
107 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
109 MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
110 MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
112 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
114 uint32_t std_replenish = 8;
115 uint32_t jumbo_replenish = 2;
116 uint32_t rcv_pkt_thres = 128;
117 uint32_t rcv_pkt_thres_d = 32;
118 uint32_t snd_pkt_thres = 16;
119 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
121 static char dev_str[64];
124 * Name: qla_pci_probe
125 * Function: Validate the PCI device to be a QLA80XX device
128 qla_pci_probe(device_t dev)
130 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
131 case PCI_QLOGIC_ISP8020:
132 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
133 "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
134 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136 device_set_desc(dev, dev_str);
143 printf("%s: %s\n ", __func__, dev_str);
145 return (BUS_PROBE_DEFAULT);
149 qla_add_sysctls(qla_host_t *ha)
151 device_t dev = ha->pci_dev;
153 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
154 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
155 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD,
157 qla_sysctl_get_stats, "I", "Statistics");
159 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
160 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
161 OID_AUTO, "fw_version", CTLFLAG_RD,
162 ha->fw_ver_str, 0, "firmware version");
165 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
166 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
167 OID_AUTO, "debug", CTLFLAG_RW,
168 &dbg_level, dbg_level, "Debug Level");
170 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
171 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
172 OID_AUTO, "std_replenish", CTLFLAG_RW,
173 &std_replenish, std_replenish,
174 "Threshold for Replenishing Standard Frames");
176 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
177 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178 OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
179 &jumbo_replenish, jumbo_replenish,
180 "Threshold for Replenishing Jumbo Frames");
182 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
183 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184 OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
185 &rcv_pkt_thres, rcv_pkt_thres,
186 "Threshold for # of rcv pkts to trigger indication isr");
188 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
189 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190 OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
191 &rcv_pkt_thres_d, rcv_pkt_thres_d,
192 "Threshold for # of rcv pkts to trigger indication defered");
194 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
195 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196 OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
197 &snd_pkt_thres, snd_pkt_thres,
198 "Threshold for # of snd packets");
200 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202 OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
203 &free_pkt_thres, free_pkt_thres,
204 "Threshold for # of packets to free at a time");
210 qla_watchdog(void *arg)
212 qla_host_t *ha = arg;
219 if (ha->flags.qla_watchdog_exit)
222 if (!ha->flags.qla_watchdog_pause) {
223 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
224 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
225 } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
226 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
229 ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
230 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
235 * Name: qla_pci_attach
236 * Function: attaches the device to the operating system
239 qla_pci_attach(device_t dev)
241 qla_host_t *ha = NULL;
242 uint32_t rsrc_len, i;
244 QL_DPRINT2((dev, "%s: enter\n", __func__));
246 if ((ha = device_get_softc(dev)) == NULL) {
247 device_printf(dev, "cannot get softc\n");
251 memset(ha, 0, sizeof (qla_host_t));
253 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
254 device_printf(dev, "device is not ISP8020\n");
258 ha->pci_func = pci_get_function(dev);
262 pci_enable_busmaster(dev);
264 ha->reg_rid = PCIR_BAR(0);
265 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
268 if (ha->pci_reg == NULL) {
269 device_printf(dev, "unable to map any ports\n");
270 goto qla_pci_attach_err;
273 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
276 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
277 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
278 mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
279 mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
280 ha->flags.lock_init = 1;
282 ha->msix_count = pci_msix_count(dev);
284 if (ha->msix_count < qla_get_msix_count(ha)) {
285 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
287 goto qla_pci_attach_err;
290 QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
291 " msix_count 0x%x pci_reg %p\n", __func__, ha,
292 ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
294 ha->msix_count = qla_get_msix_count(ha);
296 if (pci_alloc_msix(dev, &ha->msix_count)) {
297 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
300 goto qla_pci_attach_err;
303 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
304 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
305 taskqueue_thread_enqueue, &ha->tx_tq);
306 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
307 device_get_nameunit(ha->pci_dev));
309 for (i = 0; i < ha->msix_count; i++) {
310 ha->irq_vec[i].irq_rid = i+1;
311 ha->irq_vec[i].ha = ha;
313 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
314 &ha->irq_vec[i].irq_rid,
315 (RF_ACTIVE | RF_SHAREABLE));
317 if (ha->irq_vec[i].irq == NULL) {
318 device_printf(dev, "could not allocate interrupt\n");
319 goto qla_pci_attach_err;
322 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
323 (INTR_TYPE_NET | INTR_MPSAFE),
324 NULL, qla_isr, &ha->irq_vec[i],
325 &ha->irq_vec[i].handle)) {
326 device_printf(dev, "could not setup interrupt\n");
327 goto qla_pci_attach_err;
330 TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
333 ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
334 M_NOWAIT, taskqueue_thread_enqueue,
335 &ha->irq_vec[i].rcv_tq);
337 taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
339 device_get_nameunit(ha->pci_dev));
344 /* add hardware specific sysctls */
345 qla_hw_add_sysctls(ha);
347 /* initialize hardware */
348 if (qla_init_hw(ha)) {
349 device_printf(dev, "%s: qla_init_hw failed\n", __func__);
350 goto qla_pci_attach_err;
353 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
354 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
357 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
358 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
361 //qla_get_hw_caps(ha);
362 qla_read_mac_addr(ha);
364 /* allocate parent dma tag */
365 if (qla_alloc_parent_dma_tag(ha)) {
366 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
368 goto qla_pci_attach_err;
371 /* alloc all dma buffers */
372 if (qla_alloc_dma(ha)) {
373 device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
374 goto qla_pci_attach_err;
377 /* create the o.s ethernet interface */
378 qla_init_ifnet(dev, ha);
380 ha->flags.qla_watchdog_active = 1;
381 ha->flags.qla_watchdog_pause = 1;
383 callout_init(&ha->tx_callout, 1);
385 /* create ioctl device interface */
386 if (qla_make_cdev(ha)) {
387 device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
388 goto qla_pci_attach_err;
391 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
394 QL_DPRINT2((dev, "%s: exit 0\n", __func__));
401 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
406 * Name: qla_pci_detach
407 * Function: Unhooks the device from the operating system
410 qla_pci_detach(device_t dev)
412 qla_host_t *ha = NULL;
416 QL_DPRINT2((dev, "%s: enter\n", __func__));
418 if ((ha = device_get_softc(dev)) == NULL) {
419 device_printf(dev, "cannot get softc\n");
425 QLA_LOCK(ha, __func__);
427 QLA_UNLOCK(ha, __func__);
430 taskqueue_drain(ha->tx_tq, &ha->tx_task);
431 taskqueue_free(ha->tx_tq);
434 for (i = 0; i < ha->msix_count; i++) {
435 taskqueue_drain(ha->irq_vec[i].rcv_tq,
436 &ha->irq_vec[i].rcv_task);
437 taskqueue_free(ha->irq_vec[i].rcv_tq);
442 QL_DPRINT2((dev, "%s: exit\n", __func__));
448 * SYSCTL Related Callbacks
451 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
456 err = sysctl_handle_int(oidp, &ret, 0, req);
461 ha = (qla_host_t *)arg1;
463 QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
470 * Function: Releases the resources allocated for the device
473 qla_release(qla_host_t *ha)
482 if (ha->flags.qla_watchdog_active)
483 ha->flags.qla_watchdog_exit = 1;
485 callout_stop(&ha->tx_callout);
486 qla_mdelay(__func__, 100);
489 ether_ifdetach(ha->ifp);
492 qla_free_parent_dma_tag(ha);
494 for (i = 0; i < ha->msix_count; i++) {
495 if (ha->irq_vec[i].handle)
496 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
497 ha->irq_vec[i].handle);
498 if (ha->irq_vec[i].irq)
499 (void) bus_release_resource(dev, SYS_RES_IRQ,
500 ha->irq_vec[i].irq_rid,
504 pci_release_msi(dev);
506 if (ha->flags.lock_init) {
507 mtx_destroy(&ha->tx_lock);
508 mtx_destroy(&ha->rx_lock);
509 mtx_destroy(&ha->rxj_lock);
510 mtx_destroy(&ha->hw_lock);
514 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
519 * DMA Related Functions
523 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
525 *((bus_addr_t *)arg) = 0;
528 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
532 QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
534 *((bus_addr_t *)arg) = segs[0].ds_addr;
540 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
548 QL_DPRINT2((dev, "%s: enter\n", __func__));
550 ret = bus_dma_tag_create(
551 ha->parent_tag,/* parent */
553 ((bus_size_t)(1ULL << 32)),/* boundary */
554 BUS_SPACE_MAXADDR, /* lowaddr */
555 BUS_SPACE_MAXADDR, /* highaddr */
556 NULL, NULL, /* filter, filterarg */
557 dma_buf->size, /* maxsize */
559 dma_buf->size, /* maxsegsize */
561 NULL, NULL, /* lockfunc, lockarg */
565 device_printf(dev, "%s: could not create dma tag\n", __func__);
566 goto qla_alloc_dmabuf_exit;
568 ret = bus_dmamem_alloc(dma_buf->dma_tag,
569 (void **)&dma_buf->dma_b,
570 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
573 bus_dma_tag_destroy(dma_buf->dma_tag);
574 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
575 goto qla_alloc_dmabuf_exit;
578 ret = bus_dmamap_load(dma_buf->dma_tag,
583 &b_addr, BUS_DMA_NOWAIT);
585 if (ret || !b_addr) {
586 bus_dma_tag_destroy(dma_buf->dma_tag);
587 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
590 goto qla_alloc_dmabuf_exit;
593 dma_buf->dma_addr = b_addr;
595 qla_alloc_dmabuf_exit:
596 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
597 __func__, ret, (void *)dma_buf->dma_tag,
598 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
605 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
607 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
608 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
609 bus_dma_tag_destroy(dma_buf->dma_tag);
613 qla_alloc_parent_dma_tag(qla_host_t *ha)
621 * Allocate parent DMA Tag
623 ret = bus_dma_tag_create(
624 bus_get_dma_tag(dev), /* parent */
625 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
626 BUS_SPACE_MAXADDR, /* lowaddr */
627 BUS_SPACE_MAXADDR, /* highaddr */
628 NULL, NULL, /* filter, filterarg */
629 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
631 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
633 NULL, NULL, /* lockfunc, lockarg */
637 device_printf(dev, "%s: could not create parent dma tag\n",
642 ha->flags.parent_tag = 1;
648 qla_free_parent_dma_tag(qla_host_t *ha)
650 if (ha->flags.parent_tag) {
651 bus_dma_tag_destroy(ha->parent_tag);
652 ha->flags.parent_tag = 0;
657 * Name: qla_init_ifnet
658 * Function: Creates the Network Device Interface and Registers it with the O.S
662 qla_init_ifnet(device_t dev, qla_host_t *ha)
666 QL_DPRINT2((dev, "%s: enter\n", __func__));
668 ifp = ha->ifp = if_alloc(IFT_ETHER);
671 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
673 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
675 ifp->if_baudrate = IF_Gbps(10);
676 ifp->if_init = qla_init;
678 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
679 ifp->if_ioctl = qla_ioctl;
680 ifp->if_start = qla_start;
682 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
683 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
684 IFQ_SET_READY(&ifp->if_snd);
686 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
688 ether_ifattach(ifp, qla_get_mac_addr(ha));
690 ifp->if_capabilities = IFCAP_HWCSUM |
694 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
695 ifp->if_capabilities |= IFCAP_LINKSTATE;
697 #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
699 ifp->if_watchdog = NULL;
700 #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
702 ifp->if_capenable = ifp->if_capabilities;
704 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
706 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
708 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
710 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
712 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
714 QL_DPRINT2((dev, "%s: exit\n", __func__));
720 qla_init_locked(qla_host_t *ha)
722 struct ifnet *ifp = ha->ifp;
726 if (qla_alloc_xmt_bufs(ha) != 0)
729 if (qla_alloc_rcv_bufs(ha) != 0)
732 if (qla_config_lro(ha))
735 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
737 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
739 ha->flags.stop_rcv = 0;
740 if (qla_init_hw_if(ha) == 0) {
742 ifp->if_drv_flags |= IFF_DRV_RUNNING;
743 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
744 ha->flags.qla_watchdog_pause = 0;
755 ha = (qla_host_t *)arg;
757 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
759 QLA_LOCK(ha, __func__);
761 QLA_UNLOCK(ha, __func__);
763 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
767 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
769 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
770 struct ifmultiaddr *ifma;
772 struct ifnet *ifp = ha->ifp;
776 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
778 if (ifma->ifma_addr->sa_family != AF_LINK)
781 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
784 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
785 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
790 if_maddr_runlock(ifp);
792 qla_hw_set_multi(ha, mta, mcnt, add_multi);
798 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
801 struct ifreq *ifr = (struct ifreq *)data;
802 struct ifaddr *ifa = (struct ifaddr *)data;
805 ha = (qla_host_t *)ifp->if_softc;
809 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
812 if (ifa->ifa_addr->sa_family == AF_INET) {
813 ifp->if_flags |= IFF_UP;
814 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815 QLA_LOCK(ha, __func__);
817 QLA_UNLOCK(ha, __func__);
819 QL_DPRINT4((ha->pci_dev,
820 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
821 __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
823 arp_ifinit(ifp, ifa);
824 if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
825 qla_config_ipv4_addr(ha,
826 (IA_SIN(ifa)->sin_addr.s_addr));
829 ether_ioctl(ifp, cmd, data);
834 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
837 if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
840 QLA_LOCK(ha, __func__);
841 ifp->if_mtu = ifr->ifr_mtu;
843 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
844 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
845 ret = qla_set_max_mtu(ha, ha->max_frame_size,
846 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
848 QLA_UNLOCK(ha, __func__);
857 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
860 if (ifp->if_flags & IFF_UP) {
861 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
862 if ((ifp->if_flags ^ ha->if_flags) &
865 } else if ((ifp->if_flags ^ ha->if_flags) &
867 qla_set_allmulti(ha);
870 QLA_LOCK(ha, __func__);
872 ha->max_frame_size = ifp->if_mtu +
873 ETHER_HDR_LEN + ETHER_CRC_LEN;
874 ret = qla_set_max_mtu(ha, ha->max_frame_size,
875 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
876 QLA_UNLOCK(ha, __func__);
879 QLA_LOCK(ha, __func__);
880 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
882 ha->if_flags = ifp->if_flags;
883 QLA_UNLOCK(ha, __func__);
888 QL_DPRINT4((ha->pci_dev,
889 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
891 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
892 qla_set_multi(ha, 1);
897 QL_DPRINT4((ha->pci_dev,
898 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
900 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
901 qla_set_multi(ha, 0);
907 QL_DPRINT4((ha->pci_dev,
908 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
910 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
915 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
917 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
920 if (mask & IFCAP_HWCSUM)
921 ifp->if_capenable ^= IFCAP_HWCSUM;
922 if (mask & IFCAP_TSO4)
923 ifp->if_capenable ^= IFCAP_TSO4;
924 if (mask & IFCAP_TSO6)
925 ifp->if_capenable ^= IFCAP_TSO6;
926 if (mask & IFCAP_VLAN_HWTAGGING)
927 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
929 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
932 VLAN_CAPABILITIES(ifp);
937 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
939 ret = ether_ioctl(ifp, cmd, data);
947 qla_media_change(struct ifnet *ifp)
953 ha = (qla_host_t *)ifp->if_softc;
955 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
959 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
962 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
968 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
972 ha = (qla_host_t *)ifp->if_softc;
974 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
976 ifmr->ifm_status = IFM_AVALID;
977 ifmr->ifm_active = IFM_ETHER;
979 qla_update_link_state(ha);
980 if (ha->hw.flags.link_up) {
981 ifmr->ifm_status |= IFM_ACTIVE;
982 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
985 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
986 (ha->hw.flags.link_up ? "link_up" : "link_down")));
992 qla_start(struct ifnet *ifp)
995 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
997 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
999 if (!mtx_trylock(&ha->tx_lock)) {
1000 QL_DPRINT8((ha->pci_dev,
1001 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1005 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1007 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1012 if (!ha->watchdog_ticks)
1013 qla_update_link_state(ha);
1015 if (!ha->hw.flags.link_up) {
1016 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1021 while (ifp->if_snd.ifq_head != NULL) {
1022 IF_DEQUEUE(&ifp->if_snd, m_head);
1024 if (m_head == NULL) {
1025 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1030 if (qla_send(ha, &m_head)) {
1033 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1034 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1035 IF_PREPEND(&ifp->if_snd, m_head);
1038 /* Send a copy of the frame to the BPF listener */
1039 ETHER_BPF_MTAP(ifp, m_head);
1042 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1047 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1049 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1054 struct mbuf *m_head = *m_headp;
1056 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1058 if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
1059 ha->err_tx_dmamap_create++;
1060 device_printf(ha->pci_dev,
1061 "%s: bus_dmamap_create failed[%d, %d]\n",
1062 __func__, ret, m_head->m_pkthdr.len);
1066 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1073 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1074 m_head->m_pkthdr.len));
1076 m = m_defrag(m_head, M_NOWAIT);
1078 ha->err_tx_defrag++;
1081 device_printf(ha->pci_dev,
1082 "%s: m_defrag() = NULL [%d]\n",
1088 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1089 segs, &nsegs, BUS_DMA_NOWAIT))) {
1091 ha->err_tx_dmamap_load++;
1093 device_printf(ha->pci_dev,
1094 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1095 __func__, ret, m_head->m_pkthdr.len);
1097 bus_dmamap_destroy(ha->tx_tag, map);
1098 if (ret != ENOMEM) {
1105 ha->err_tx_dmamap_load++;
1107 device_printf(ha->pci_dev,
1108 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1109 __func__, ret, m_head->m_pkthdr.len);
1111 bus_dmamap_destroy(ha->tx_tag, map);
1113 if (ret != ENOMEM) {
1120 QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
1122 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1124 if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
1125 ha->tx_buf[tx_idx].m_head = m_head;
1126 ha->tx_buf[tx_idx].map = map;
1128 if (ret == EINVAL) {
1134 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1139 qla_stop(qla_host_t *ha)
1141 struct ifnet *ifp = ha->ifp;
1146 ha->flags.qla_watchdog_pause = 1;
1147 qla_mdelay(__func__, 100);
1149 ha->flags.stop_rcv = 1;
1150 qla_hw_stop_rcv(ha);
1156 qla_free_xmt_bufs(ha);
1157 qla_free_rcv_bufs(ha);
1159 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1165 * Buffer Management Functions for Transmit and Receive Rings
1168 qla_alloc_xmt_bufs(qla_host_t *ha)
1170 if (bus_dma_tag_create(NULL, /* parent */
1171 1, 0, /* alignment, bounds */
1172 BUS_SPACE_MAXADDR, /* lowaddr */
1173 BUS_SPACE_MAXADDR, /* highaddr */
1174 NULL, NULL, /* filter, filterarg */
1175 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1176 QLA_MAX_SEGMENTS, /* nsegments */
1177 PAGE_SIZE, /* maxsegsize */
1178 BUS_DMA_ALLOCNOW, /* flags */
1179 NULL, /* lockfunc */
1180 NULL, /* lockfuncarg */
1182 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1186 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1192 * Release mbuf after it sent on the wire
1195 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1197 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1201 bus_dmamap_unload(ha->tx_tag, txb->map);
1202 bus_dmamap_destroy(ha->tx_tag, txb->map);
1204 m_freem(txb->m_head);
1208 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1212 qla_free_xmt_bufs(qla_host_t *ha)
1216 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1217 qla_clear_tx_buf(ha, &ha->tx_buf[i]);
1219 if (ha->tx_tag != NULL) {
1220 bus_dma_tag_destroy(ha->tx_tag);
1223 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1230 qla_alloc_rcv_bufs(qla_host_t *ha)
1235 if (bus_dma_tag_create(NULL, /* parent */
1236 1, 0, /* alignment, bounds */
1237 BUS_SPACE_MAXADDR, /* lowaddr */
1238 BUS_SPACE_MAXADDR, /* highaddr */
1239 NULL, NULL, /* filter, filterarg */
1240 MJUM9BYTES, /* maxsize */
1242 MJUM9BYTES, /* maxsegsize */
1243 BUS_DMA_ALLOCNOW, /* flags */
1244 NULL, /* lockfunc */
1245 NULL, /* lockfuncarg */
1248 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1254 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1255 bzero((void *)ha->rx_jbuf,
1256 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1258 for (i = 0; i < MAX_SDS_RINGS; i++) {
1259 ha->hw.sds[i].sdsr_next = 0;
1260 ha->hw.sds[i].rxb_free = NULL;
1261 ha->hw.sds[i].rx_free = 0;
1262 ha->hw.sds[i].rxjb_free = NULL;
1263 ha->hw.sds[i].rxj_free = 0;
1266 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1268 rxb = &ha->rx_buf[i];
1270 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1273 device_printf(ha->pci_dev,
1274 "%s: dmamap[%d] failed\n", __func__, i);
1276 for (j = 0; j < i; j++) {
1277 bus_dmamap_destroy(ha->rx_tag,
1280 goto qla_alloc_rcv_bufs_failed;
1284 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
1286 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1287 rxb = &ha->rx_buf[i];
1289 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
1291 * set the physical address in the corresponding
1292 * descriptor entry in the receive ring/queue for the
1295 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
1296 rxb->handle, rxb->paddr,
1297 (rxb->m_head)->m_pkthdr.len);
1299 device_printf(ha->pci_dev,
1300 "%s: qla_get_mbuf [standard(%d)] failed\n",
1302 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1303 goto qla_alloc_rcv_bufs_failed;
1308 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1310 rxb = &ha->rx_jbuf[i];
1312 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1315 device_printf(ha->pci_dev,
1316 "%s: dmamap[%d] failed\n", __func__, i);
1318 for (j = 0; j < i; j++) {
1319 bus_dmamap_destroy(ha->rx_tag,
1320 ha->rx_jbuf[j].map);
1322 goto qla_alloc_rcv_bufs_failed;
1326 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
1328 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1329 rxb = &ha->rx_jbuf[i];
1331 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
1333 * set the physical address in the corresponding
1334 * descriptor entry in the receive ring/queue for the
1337 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
1338 rxb->handle, rxb->paddr,
1339 (rxb->m_head)->m_pkthdr.len);
1341 device_printf(ha->pci_dev,
1342 "%s: qla_get_mbuf [jumbo(%d)] failed\n",
1344 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1345 goto qla_alloc_rcv_bufs_failed;
1351 qla_alloc_rcv_bufs_failed:
1352 qla_free_rcv_bufs(ha);
1357 qla_free_rcv_bufs(qla_host_t *ha)
1362 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1363 rxb = &ha->rx_buf[i];
1364 if (rxb->m_head != NULL) {
1365 bus_dmamap_unload(ha->rx_tag, rxb->map);
1366 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1367 m_freem(rxb->m_head);
1372 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1373 rxb = &ha->rx_jbuf[i];
1374 if (rxb->m_head != NULL) {
1375 bus_dmamap_unload(ha->rx_tag, rxb->map);
1376 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1377 m_freem(rxb->m_head);
1382 if (ha->rx_tag != NULL) {
1383 bus_dma_tag_destroy(ha->rx_tag);
1387 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1388 bzero((void *)ha->rx_jbuf,
1389 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1391 for (i = 0; i < MAX_SDS_RINGS; i++) {
1392 ha->hw.sds[i].sdsr_next = 0;
1393 ha->hw.sds[i].rxb_free = NULL;
1394 ha->hw.sds[i].rx_free = 0;
1395 ha->hw.sds[i].rxjb_free = NULL;
1396 ha->hw.sds[i].rxj_free = 0;
1403 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
1406 struct mbuf *mp = nmp;
1411 QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
1418 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1423 device_printf(ha->pci_dev,
1424 "%s: m_getcl failed\n", __func__);
1425 goto exit_qla_get_mbuf;
1427 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1429 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1434 device_printf(ha->pci_dev,
1435 "%s: m_getjcl failed\n", __func__);
1436 goto exit_qla_get_mbuf;
1438 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1442 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1444 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1446 mp->m_data = mp->m_ext.ext_buf;
1451 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1453 offset = 8 - offset;
1458 * Using memory from the mbuf cluster pool, invoke the bus_dma
1459 * machinery to arrange the memory mapping.
1461 ret = bus_dmamap_load(ha->rx_tag, rxb->map,
1462 mtod(mp, void *), mp->m_len,
1463 qla_dmamap_callback, &rxb->paddr,
1465 if (ret || !rxb->paddr) {
1468 device_printf(ha->pci_dev,
1469 "%s: bus_dmamap_load failed\n", __func__);
1471 goto exit_qla_get_mbuf;
1474 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1477 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1482 qla_tx_done(void *context, int pending)
1484 qla_host_t *ha = context;