2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011-2013 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
42 #include "qla_inline.h"
48 * Some PCI Configuration Space Related Defines
51 #ifndef PCI_VENDOR_QLOGIC
52 #define PCI_VENDOR_QLOGIC 0x1077
55 #ifndef PCI_PRODUCT_QLOGIC_ISP8020
56 #define PCI_PRODUCT_QLOGIC_ISP8020 0x8020
59 #define PCI_QLOGIC_ISP8020 \
60 ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC)
65 static int qla_alloc_parent_dma_tag(qla_host_t *ha);
66 static void qla_free_parent_dma_tag(qla_host_t *ha);
67 static int qla_alloc_xmt_bufs(qla_host_t *ha);
68 static void qla_free_xmt_bufs(qla_host_t *ha);
69 static int qla_alloc_rcv_bufs(qla_host_t *ha);
70 static void qla_free_rcv_bufs(qla_host_t *ha);
72 static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73 static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74 static void qla_release(qla_host_t *ha);
75 static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77 static void qla_stop(qla_host_t *ha);
78 static int qla_send(qla_host_t *ha, struct mbuf **m_headp);
79 static void qla_tx_done(void *context, int pending);
82 * Hooks to the Operating Systems
84 static int qla_pci_probe (device_t);
85 static int qla_pci_attach (device_t);
86 static int qla_pci_detach (device_t);
88 static void qla_init(void *arg);
89 static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
90 static int qla_media_change(struct ifnet *ifp);
91 static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
93 static device_method_t qla_pci_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, qla_pci_probe),
96 DEVMETHOD(device_attach, qla_pci_attach),
97 DEVMETHOD(device_detach, qla_pci_detach),
101 static driver_t qla_pci_driver = {
102 "ql", qla_pci_methods, sizeof (qla_host_t),
105 static devclass_t qla80xx_devclass;
107 DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0);
109 MODULE_DEPEND(qla80xx, pci, 1, 1, 1);
110 MODULE_DEPEND(qla80xx, ether, 1, 1, 1);
112 MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver");
114 uint32_t std_replenish = 8;
115 uint32_t jumbo_replenish = 2;
116 uint32_t rcv_pkt_thres = 128;
117 uint32_t rcv_pkt_thres_d = 32;
118 uint32_t snd_pkt_thres = 16;
119 uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2);
121 static char dev_str[64];
124 * Name: qla_pci_probe
125 * Function: Validate the PCI device to be a QLA80XX device
128 qla_pci_probe(device_t dev)
130 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
131 case PCI_QLOGIC_ISP8020:
132 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
133 "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function",
134 QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136 device_set_desc(dev, dev_str);
143 printf("%s: %s\n ", __func__, dev_str);
145 return (BUS_PROBE_DEFAULT);
149 qla_add_sysctls(qla_host_t *ha)
151 device_t dev = ha->pci_dev;
153 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
154 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
155 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
156 (void *)ha, 0, qla_sysctl_get_stats, "I", "Statistics");
158 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
159 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
160 OID_AUTO, "fw_version", CTLFLAG_RD,
161 ha->fw_ver_str, 0, "firmware version");
164 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
165 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
166 OID_AUTO, "debug", CTLFLAG_RW,
167 &dbg_level, dbg_level, "Debug Level");
169 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
170 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
171 OID_AUTO, "std_replenish", CTLFLAG_RW,
172 &std_replenish, std_replenish,
173 "Threshold for Replenishing Standard Frames");
175 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
176 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
177 OID_AUTO, "jumbo_replenish", CTLFLAG_RW,
178 &jumbo_replenish, jumbo_replenish,
179 "Threshold for Replenishing Jumbo Frames");
181 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
182 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
183 OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW,
184 &rcv_pkt_thres, rcv_pkt_thres,
185 "Threshold for # of rcv pkts to trigger indication isr");
187 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
188 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
189 OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW,
190 &rcv_pkt_thres_d, rcv_pkt_thres_d,
191 "Threshold for # of rcv pkts to trigger indication defered");
193 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
194 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
195 OID_AUTO, "snd_pkt_thres", CTLFLAG_RW,
196 &snd_pkt_thres, snd_pkt_thres,
197 "Threshold for # of snd packets");
199 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
200 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
201 OID_AUTO, "free_pkt_thres", CTLFLAG_RW,
202 &free_pkt_thres, free_pkt_thres,
203 "Threshold for # of packets to free at a time");
209 qla_watchdog(void *arg)
211 qla_host_t *ha = arg;
218 if (ha->flags.qla_watchdog_exit)
221 if (!ha->flags.qla_watchdog_pause) {
222 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
223 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
224 } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
225 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
228 ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000;
229 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
234 * Name: qla_pci_attach
235 * Function: attaches the device to the operating system
238 qla_pci_attach(device_t dev)
240 qla_host_t *ha = NULL;
241 uint32_t rsrc_len, i;
243 QL_DPRINT2((dev, "%s: enter\n", __func__));
245 if ((ha = device_get_softc(dev)) == NULL) {
246 device_printf(dev, "cannot get softc\n");
250 memset(ha, 0, sizeof (qla_host_t));
252 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) {
253 device_printf(dev, "device is not ISP8020\n");
257 ha->pci_func = pci_get_function(dev);
261 pci_enable_busmaster(dev);
263 ha->reg_rid = PCIR_BAR(0);
264 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
267 if (ha->pci_reg == NULL) {
268 device_printf(dev, "unable to map any ports\n");
269 goto qla_pci_attach_err;
272 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
275 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
276 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
277 mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF);
278 mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF);
279 ha->flags.lock_init = 1;
281 ha->msix_count = pci_msix_count(dev);
283 if (ha->msix_count < qla_get_msix_count(ha)) {
284 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
286 goto qla_pci_attach_err;
289 QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x"
290 " msix_count 0x%x pci_reg %p\n", __func__, ha,
291 ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
293 ha->msix_count = qla_get_msix_count(ha);
295 if (pci_alloc_msix(dev, &ha->msix_count)) {
296 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
299 goto qla_pci_attach_err;
302 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
303 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
304 taskqueue_thread_enqueue, &ha->tx_tq);
305 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
306 device_get_nameunit(ha->pci_dev));
308 for (i = 0; i < ha->msix_count; i++) {
309 ha->irq_vec[i].irq_rid = i+1;
310 ha->irq_vec[i].ha = ha;
312 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
313 &ha->irq_vec[i].irq_rid,
314 (RF_ACTIVE | RF_SHAREABLE));
316 if (ha->irq_vec[i].irq == NULL) {
317 device_printf(dev, "could not allocate interrupt\n");
318 goto qla_pci_attach_err;
321 if (bus_setup_intr(dev, ha->irq_vec[i].irq,
322 (INTR_TYPE_NET | INTR_MPSAFE),
323 NULL, qla_isr, &ha->irq_vec[i],
324 &ha->irq_vec[i].handle)) {
325 device_printf(dev, "could not setup interrupt\n");
326 goto qla_pci_attach_err;
329 TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\
332 ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq",
333 M_NOWAIT, taskqueue_thread_enqueue,
334 &ha->irq_vec[i].rcv_tq);
336 taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET,
338 device_get_nameunit(ha->pci_dev));
343 /* add hardware specific sysctls */
344 qla_hw_add_sysctls(ha);
346 /* initialize hardware */
347 if (qla_init_hw(ha)) {
348 device_printf(dev, "%s: qla_init_hw failed\n", __func__);
349 goto qla_pci_attach_err;
352 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
353 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
356 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
357 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
360 //qla_get_hw_caps(ha);
361 qla_read_mac_addr(ha);
363 /* allocate parent dma tag */
364 if (qla_alloc_parent_dma_tag(ha)) {
365 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
367 goto qla_pci_attach_err;
370 /* alloc all dma buffers */
371 if (qla_alloc_dma(ha)) {
372 device_printf(dev, "%s: qla_alloc_dma failed\n", __func__);
373 goto qla_pci_attach_err;
376 /* create the o.s ethernet interface */
377 qla_init_ifnet(dev, ha);
379 ha->flags.qla_watchdog_active = 1;
380 ha->flags.qla_watchdog_pause = 1;
382 callout_init(&ha->tx_callout, 1);
384 /* create ioctl device interface */
385 if (qla_make_cdev(ha)) {
386 device_printf(dev, "%s: qla_make_cdev failed\n", __func__);
387 goto qla_pci_attach_err;
390 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
393 QL_DPRINT2((dev, "%s: exit 0\n", __func__));
400 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
405 * Name: qla_pci_detach
406 * Function: Unhooks the device from the operating system
409 qla_pci_detach(device_t dev)
411 qla_host_t *ha = NULL;
415 QL_DPRINT2((dev, "%s: enter\n", __func__));
417 if ((ha = device_get_softc(dev)) == NULL) {
418 device_printf(dev, "cannot get softc\n");
424 QLA_LOCK(ha, __func__);
426 QLA_UNLOCK(ha, __func__);
429 taskqueue_drain(ha->tx_tq, &ha->tx_task);
430 taskqueue_free(ha->tx_tq);
433 for (i = 0; i < ha->msix_count; i++) {
434 taskqueue_drain(ha->irq_vec[i].rcv_tq,
435 &ha->irq_vec[i].rcv_task);
436 taskqueue_free(ha->irq_vec[i].rcv_tq);
441 QL_DPRINT2((dev, "%s: exit\n", __func__));
447 * SYSCTL Related Callbacks
450 qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
455 err = sysctl_handle_int(oidp, &ret, 0, req);
460 ha = (qla_host_t *)arg1;
462 QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret));
468 * Function: Releases the resources allocated for the device
471 qla_release(qla_host_t *ha)
480 if (ha->flags.qla_watchdog_active)
481 ha->flags.qla_watchdog_exit = 1;
483 callout_stop(&ha->tx_callout);
484 qla_mdelay(__func__, 100);
487 ether_ifdetach(ha->ifp);
490 qla_free_parent_dma_tag(ha);
492 for (i = 0; i < ha->msix_count; i++) {
493 if (ha->irq_vec[i].handle)
494 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
495 ha->irq_vec[i].handle);
496 if (ha->irq_vec[i].irq)
497 (void) bus_release_resource(dev, SYS_RES_IRQ,
498 ha->irq_vec[i].irq_rid,
502 pci_release_msi(dev);
504 if (ha->flags.lock_init) {
505 mtx_destroy(&ha->tx_lock);
506 mtx_destroy(&ha->rx_lock);
507 mtx_destroy(&ha->rxj_lock);
508 mtx_destroy(&ha->hw_lock);
512 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
517 * DMA Related Functions
521 qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
523 *((bus_addr_t *)arg) = 0;
526 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
530 QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs));
532 *((bus_addr_t *)arg) = segs[0].ds_addr;
538 qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
546 QL_DPRINT2((dev, "%s: enter\n", __func__));
548 ret = bus_dma_tag_create(
549 ha->parent_tag,/* parent */
551 ((bus_size_t)(1ULL << 32)),/* boundary */
552 BUS_SPACE_MAXADDR, /* lowaddr */
553 BUS_SPACE_MAXADDR, /* highaddr */
554 NULL, NULL, /* filter, filterarg */
555 dma_buf->size, /* maxsize */
557 dma_buf->size, /* maxsegsize */
559 NULL, NULL, /* lockfunc, lockarg */
563 device_printf(dev, "%s: could not create dma tag\n", __func__);
564 goto qla_alloc_dmabuf_exit;
566 ret = bus_dmamem_alloc(dma_buf->dma_tag,
567 (void **)&dma_buf->dma_b,
568 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
571 bus_dma_tag_destroy(dma_buf->dma_tag);
572 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
573 goto qla_alloc_dmabuf_exit;
576 ret = bus_dmamap_load(dma_buf->dma_tag,
581 &b_addr, BUS_DMA_NOWAIT);
583 if (ret || !b_addr) {
584 bus_dma_tag_destroy(dma_buf->dma_tag);
585 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
588 goto qla_alloc_dmabuf_exit;
591 dma_buf->dma_addr = b_addr;
593 qla_alloc_dmabuf_exit:
594 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
595 __func__, ret, (void *)dma_buf->dma_tag,
596 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
603 qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
605 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
606 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
607 bus_dma_tag_destroy(dma_buf->dma_tag);
611 qla_alloc_parent_dma_tag(qla_host_t *ha)
619 * Allocate parent DMA Tag
621 ret = bus_dma_tag_create(
622 bus_get_dma_tag(dev), /* parent */
623 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
624 BUS_SPACE_MAXADDR, /* lowaddr */
625 BUS_SPACE_MAXADDR, /* highaddr */
626 NULL, NULL, /* filter, filterarg */
627 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
629 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
631 NULL, NULL, /* lockfunc, lockarg */
635 device_printf(dev, "%s: could not create parent dma tag\n",
640 ha->flags.parent_tag = 1;
646 qla_free_parent_dma_tag(qla_host_t *ha)
648 if (ha->flags.parent_tag) {
649 bus_dma_tag_destroy(ha->parent_tag);
650 ha->flags.parent_tag = 0;
655 * Name: qla_init_ifnet
656 * Function: Creates the Network Device Interface and Registers it with the O.S
660 qla_init_ifnet(device_t dev, qla_host_t *ha)
664 QL_DPRINT2((dev, "%s: enter\n", __func__));
666 ifp = ha->ifp = if_alloc(IFT_ETHER);
669 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
671 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
673 ifp->if_mtu = ETHERMTU;
674 ifp->if_baudrate = IF_Gbps(10);
675 ifp->if_init = qla_init;
677 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
678 ifp->if_ioctl = qla_ioctl;
679 ifp->if_start = qla_start;
681 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
682 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
683 IFQ_SET_READY(&ifp->if_snd);
685 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
687 ether_ifattach(ifp, qla_get_mac_addr(ha));
689 ifp->if_capabilities = IFCAP_HWCSUM |
693 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
694 ifp->if_capabilities |= IFCAP_LINKSTATE;
696 #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002)
698 ifp->if_watchdog = NULL;
699 #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */
701 ifp->if_capenable = ifp->if_capabilities;
703 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
705 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
707 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
709 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
711 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
713 QL_DPRINT2((dev, "%s: exit\n", __func__));
719 qla_init_locked(qla_host_t *ha)
721 struct ifnet *ifp = ha->ifp;
725 if (qla_alloc_xmt_bufs(ha) != 0)
728 if (qla_alloc_rcv_bufs(ha) != 0)
731 if (qla_config_lro(ha))
734 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
736 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
738 ha->flags.stop_rcv = 0;
739 if (qla_init_hw_if(ha) == 0) {
741 ifp->if_drv_flags |= IFF_DRV_RUNNING;
742 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
743 ha->flags.qla_watchdog_pause = 0;
754 ha = (qla_host_t *)arg;
756 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
758 QLA_LOCK(ha, __func__);
760 QLA_UNLOCK(ha, __func__);
762 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
766 qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
770 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
772 bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
778 qla_set_multi(qla_host_t *ha, uint32_t add_multi)
780 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
781 struct ifnet *ifp = ha->ifp;
784 mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
785 qla_hw_set_multi(ha, mta, mcnt, add_multi);
791 qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
794 struct ifreq *ifr = (struct ifreq *)data;
795 struct ifaddr *ifa = (struct ifaddr *)data;
798 ha = (qla_host_t *)ifp->if_softc;
802 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
805 if (ifa->ifa_addr->sa_family == AF_INET) {
806 ifp->if_flags |= IFF_UP;
807 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
808 QLA_LOCK(ha, __func__);
810 QLA_UNLOCK(ha, __func__);
812 QL_DPRINT4((ha->pci_dev,
813 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
814 __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
816 arp_ifinit(ifp, ifa);
817 if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
818 qla_config_ipv4_addr(ha,
819 (IA_SIN(ifa)->sin_addr.s_addr));
822 ether_ioctl(ifp, cmd, data);
827 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
830 if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
833 QLA_LOCK(ha, __func__);
834 ifp->if_mtu = ifr->ifr_mtu;
836 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
837 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
838 ret = qla_set_max_mtu(ha, ha->max_frame_size,
839 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
841 QLA_UNLOCK(ha, __func__);
850 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
853 if (ifp->if_flags & IFF_UP) {
854 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
855 if ((ifp->if_flags ^ ha->if_flags) &
858 } else if ((ifp->if_flags ^ ha->if_flags) &
860 qla_set_allmulti(ha);
863 QLA_LOCK(ha, __func__);
865 ha->max_frame_size = ifp->if_mtu +
866 ETHER_HDR_LEN + ETHER_CRC_LEN;
867 ret = qla_set_max_mtu(ha, ha->max_frame_size,
868 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id);
869 QLA_UNLOCK(ha, __func__);
872 QLA_LOCK(ha, __func__);
873 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
875 ha->if_flags = ifp->if_flags;
876 QLA_UNLOCK(ha, __func__);
881 QL_DPRINT4((ha->pci_dev,
882 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
884 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
885 qla_set_multi(ha, 1);
890 QL_DPRINT4((ha->pci_dev,
891 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
893 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
894 qla_set_multi(ha, 0);
900 QL_DPRINT4((ha->pci_dev,
901 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
903 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
908 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
910 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
913 if (mask & IFCAP_HWCSUM)
914 ifp->if_capenable ^= IFCAP_HWCSUM;
915 if (mask & IFCAP_TSO4)
916 ifp->if_capenable ^= IFCAP_TSO4;
917 if (mask & IFCAP_TSO6)
918 ifp->if_capenable ^= IFCAP_TSO6;
919 if (mask & IFCAP_VLAN_HWTAGGING)
920 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
922 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
925 VLAN_CAPABILITIES(ifp);
930 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
932 ret = ether_ioctl(ifp, cmd, data);
940 qla_media_change(struct ifnet *ifp)
946 ha = (qla_host_t *)ifp->if_softc;
948 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
952 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
955 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
961 qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
965 ha = (qla_host_t *)ifp->if_softc;
967 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
969 ifmr->ifm_status = IFM_AVALID;
970 ifmr->ifm_active = IFM_ETHER;
972 qla_update_link_state(ha);
973 if (ha->hw.flags.link_up) {
974 ifmr->ifm_status |= IFM_ACTIVE;
975 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
978 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
979 (ha->hw.flags.link_up ? "link_up" : "link_down")));
985 qla_start(struct ifnet *ifp)
988 qla_host_t *ha = (qla_host_t *)ifp->if_softc;
990 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
992 if (!mtx_trylock(&ha->tx_lock)) {
993 QL_DPRINT8((ha->pci_dev,
994 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
998 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1000 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1005 if (!ha->watchdog_ticks)
1006 qla_update_link_state(ha);
1008 if (!ha->hw.flags.link_up) {
1009 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1014 while (ifp->if_snd.ifq_head != NULL) {
1015 IF_DEQUEUE(&ifp->if_snd, m_head);
1017 if (m_head == NULL) {
1018 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1023 if (qla_send(ha, &m_head)) {
1026 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1027 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1028 IF_PREPEND(&ifp->if_snd, m_head);
1031 /* Send a copy of the frame to the BPF listener */
1032 ETHER_BPF_MTAP(ifp, m_head);
1035 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1040 qla_send(qla_host_t *ha, struct mbuf **m_headp)
1042 bus_dma_segment_t segs[QLA_MAX_SEGMENTS];
1047 struct mbuf *m_head = *m_headp;
1049 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1051 if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) {
1052 ha->err_tx_dmamap_create++;
1053 device_printf(ha->pci_dev,
1054 "%s: bus_dmamap_create failed[%d, %d]\n",
1055 __func__, ret, m_head->m_pkthdr.len);
1059 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1065 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1066 m_head->m_pkthdr.len));
1068 m = m_defrag(m_head, M_NOWAIT);
1070 ha->err_tx_defrag++;
1073 device_printf(ha->pci_dev,
1074 "%s: m_defrag() = NULL [%d]\n",
1080 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1081 segs, &nsegs, BUS_DMA_NOWAIT))) {
1082 ha->err_tx_dmamap_load++;
1084 device_printf(ha->pci_dev,
1085 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1086 __func__, ret, m_head->m_pkthdr.len);
1088 bus_dmamap_destroy(ha->tx_tag, map);
1089 if (ret != ENOMEM) {
1096 ha->err_tx_dmamap_load++;
1098 device_printf(ha->pci_dev,
1099 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1100 __func__, ret, m_head->m_pkthdr.len);
1102 bus_dmamap_destroy(ha->tx_tag, map);
1104 if (ret != ENOMEM) {
1111 QL_ASSERT((nsegs != 0), ("qla_send: empty packet"));
1113 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1115 if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) {
1116 ha->tx_buf[tx_idx].m_head = m_head;
1117 ha->tx_buf[tx_idx].map = map;
1119 if (ret == EINVAL) {
1125 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1130 qla_stop(qla_host_t *ha)
1132 struct ifnet *ifp = ha->ifp;
1137 ha->flags.qla_watchdog_pause = 1;
1138 qla_mdelay(__func__, 100);
1140 ha->flags.stop_rcv = 1;
1141 qla_hw_stop_rcv(ha);
1147 qla_free_xmt_bufs(ha);
1148 qla_free_rcv_bufs(ha);
1150 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1156 * Buffer Management Functions for Transmit and Receive Rings
1159 qla_alloc_xmt_bufs(qla_host_t *ha)
1161 if (bus_dma_tag_create(NULL, /* parent */
1162 1, 0, /* alignment, bounds */
1163 BUS_SPACE_MAXADDR, /* lowaddr */
1164 BUS_SPACE_MAXADDR, /* highaddr */
1165 NULL, NULL, /* filter, filterarg */
1166 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */
1167 QLA_MAX_SEGMENTS, /* nsegments */
1168 PAGE_SIZE, /* maxsegsize */
1169 BUS_DMA_ALLOCNOW, /* flags */
1170 NULL, /* lockfunc */
1171 NULL, /* lockfuncarg */
1173 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1177 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1183 * Release mbuf after it sent on the wire
1186 qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1188 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1191 bus_dmamap_unload(ha->tx_tag, txb->map);
1192 bus_dmamap_destroy(ha->tx_tag, txb->map);
1194 m_freem(txb->m_head);
1198 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1202 qla_free_xmt_bufs(qla_host_t *ha)
1206 for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1207 qla_clear_tx_buf(ha, &ha->tx_buf[i]);
1209 if (ha->tx_tag != NULL) {
1210 bus_dma_tag_destroy(ha->tx_tag);
1213 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1219 qla_alloc_rcv_bufs(qla_host_t *ha)
1224 if (bus_dma_tag_create(NULL, /* parent */
1225 1, 0, /* alignment, bounds */
1226 BUS_SPACE_MAXADDR, /* lowaddr */
1227 BUS_SPACE_MAXADDR, /* highaddr */
1228 NULL, NULL, /* filter, filterarg */
1229 MJUM9BYTES, /* maxsize */
1231 MJUM9BYTES, /* maxsegsize */
1232 BUS_DMA_ALLOCNOW, /* flags */
1233 NULL, /* lockfunc */
1234 NULL, /* lockfuncarg */
1236 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1242 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1243 bzero((void *)ha->rx_jbuf,
1244 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1246 for (i = 0; i < MAX_SDS_RINGS; i++) {
1247 ha->hw.sds[i].sdsr_next = 0;
1248 ha->hw.sds[i].rxb_free = NULL;
1249 ha->hw.sds[i].rx_free = 0;
1250 ha->hw.sds[i].rxjb_free = NULL;
1251 ha->hw.sds[i].rxj_free = 0;
1254 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1255 rxb = &ha->rx_buf[i];
1257 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1260 device_printf(ha->pci_dev,
1261 "%s: dmamap[%d] failed\n", __func__, i);
1263 for (j = 0; j < i; j++) {
1264 bus_dmamap_destroy(ha->rx_tag,
1267 goto qla_alloc_rcv_bufs_failed;
1271 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL);
1273 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1274 rxb = &ha->rx_buf[i];
1276 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) {
1278 * set the physical address in the corresponding
1279 * descriptor entry in the receive ring/queue for the
1282 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i,
1283 rxb->handle, rxb->paddr,
1284 (rxb->m_head)->m_pkthdr.len);
1286 device_printf(ha->pci_dev,
1287 "%s: qla_get_mbuf [standard(%d)] failed\n",
1289 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1290 goto qla_alloc_rcv_bufs_failed;
1294 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1295 rxb = &ha->rx_jbuf[i];
1297 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1300 device_printf(ha->pci_dev,
1301 "%s: dmamap[%d] failed\n", __func__, i);
1303 for (j = 0; j < i; j++) {
1304 bus_dmamap_destroy(ha->rx_tag,
1305 ha->rx_jbuf[j].map);
1307 goto qla_alloc_rcv_bufs_failed;
1311 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO);
1313 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1314 rxb = &ha->rx_jbuf[i];
1316 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) {
1318 * set the physical address in the corresponding
1319 * descriptor entry in the receive ring/queue for the
1322 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i,
1323 rxb->handle, rxb->paddr,
1324 (rxb->m_head)->m_pkthdr.len);
1326 device_printf(ha->pci_dev,
1327 "%s: qla_get_mbuf [jumbo(%d)] failed\n",
1329 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1330 goto qla_alloc_rcv_bufs_failed;
1336 qla_alloc_rcv_bufs_failed:
1337 qla_free_rcv_bufs(ha);
1342 qla_free_rcv_bufs(qla_host_t *ha)
1347 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1348 rxb = &ha->rx_buf[i];
1349 if (rxb->m_head != NULL) {
1350 bus_dmamap_unload(ha->rx_tag, rxb->map);
1351 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1352 m_freem(rxb->m_head);
1357 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) {
1358 rxb = &ha->rx_jbuf[i];
1359 if (rxb->m_head != NULL) {
1360 bus_dmamap_unload(ha->rx_tag, rxb->map);
1361 bus_dmamap_destroy(ha->rx_tag, rxb->map);
1362 m_freem(rxb->m_head);
1367 if (ha->rx_tag != NULL) {
1368 bus_dma_tag_destroy(ha->rx_tag);
1372 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1373 bzero((void *)ha->rx_jbuf,
1374 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS));
1376 for (i = 0; i < MAX_SDS_RINGS; i++) {
1377 ha->hw.sds[i].sdsr_next = 0;
1378 ha->hw.sds[i].rxb_free = NULL;
1379 ha->hw.sds[i].rx_free = 0;
1380 ha->hw.sds[i].rxjb_free = NULL;
1381 ha->hw.sds[i].rxj_free = 0;
1388 qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
1391 struct mbuf *mp = nmp;
1396 QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo));
1402 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1407 device_printf(ha->pci_dev,
1408 "%s: m_getcl failed\n", __func__);
1409 goto exit_qla_get_mbuf;
1411 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1413 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1418 device_printf(ha->pci_dev,
1419 "%s: m_getjcl failed\n", __func__);
1420 goto exit_qla_get_mbuf;
1422 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1426 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1428 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES;
1430 mp->m_data = mp->m_ext.ext_buf;
1434 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1436 offset = 8 - offset;
1441 * Using memory from the mbuf cluster pool, invoke the bus_dma
1442 * machinery to arrange the memory mapping.
1444 ret = bus_dmamap_load(ha->rx_tag, rxb->map,
1445 mtod(mp, void *), mp->m_len,
1446 qla_dmamap_callback, &rxb->paddr,
1448 if (ret || !rxb->paddr) {
1451 device_printf(ha->pci_dev,
1452 "%s: bus_dmamap_load failed\n", __func__);
1454 goto exit_qla_get_mbuf;
1457 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1460 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1465 qla_tx_done(void *context, int pending)
1467 qla_host_t *ha = context;