2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
7 * This software was developed in part by Philip Paeps under contract for
8 * Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
49 #include <sys/socket.h>
50 #include <sys/taskqueue.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
59 #include <net/ethernet.h>
61 #include <net/if_var.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
66 #include <net/rss_config.h>
69 #include "common/efx.h"
73 #include "sfxge_ioc.h"
74 #include "sfxge_version.h"
76 #define SFXGE_CAP (IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM | \
77 IFCAP_RXCSUM | IFCAP_TXCSUM | \
78 IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6 | \
79 IFCAP_TSO4 | IFCAP_TSO6 | \
81 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWSTATS)
82 #define SFXGE_CAP_ENABLE SFXGE_CAP
83 #define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | \
84 IFCAP_JUMBO_MTU | IFCAP_LINKSTATE | IFCAP_HWSTATS)
86 MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
89 SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0,
90 "SFXGE driver parameters");
92 #define SFXGE_PARAM_RX_RING SFXGE_PARAM(rx_ring)
93 static int sfxge_rx_ring_entries = SFXGE_NDESCS;
94 TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
95 SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
96 &sfxge_rx_ring_entries, 0,
97 "Maximum number of descriptors in a receive ring");
99 #define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring)
100 static int sfxge_tx_ring_entries = SFXGE_NDESCS;
101 TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
102 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
103 &sfxge_tx_ring_entries, 0,
104 "Maximum number of descriptors in a transmit ring");
106 #define SFXGE_PARAM_RESTART_ATTEMPTS SFXGE_PARAM(restart_attempts)
107 static int sfxge_restart_attempts = 3;
108 TUNABLE_INT(SFXGE_PARAM_RESTART_ATTEMPTS, &sfxge_restart_attempts);
109 SYSCTL_INT(_hw_sfxge, OID_AUTO, restart_attempts, CTLFLAG_RDTUN,
110 &sfxge_restart_attempts, 0,
111 "Maximum number of attempts to bring interface up after reset");
113 #if EFSYS_OPT_MCDI_LOGGING
114 #define SFXGE_PARAM_MCDI_LOGGING SFXGE_PARAM(mcdi_logging)
115 static int sfxge_mcdi_logging = 0;
116 TUNABLE_INT(SFXGE_PARAM_MCDI_LOGGING, &sfxge_mcdi_logging);
120 sfxge_reset(void *arg, int npending);
123 sfxge_estimate_rsrc_limits(struct sfxge_softc *sc)
125 efx_drv_limits_t limits;
127 unsigned int evq_max;
128 uint32_t evq_allocated;
129 uint32_t rxq_allocated;
130 uint32_t txq_allocated;
133 * Limit the number of event queues to:
135 * - hardwire maximum RSS channels
136 * - administratively specified maximum RSS channels
140 * Avoid extra limitations so that the number of queues
141 * may be configured at administrator's will
143 evq_max = MIN(MAX(rss_getnumbuckets(), 1), EFX_MAXRSS);
145 evq_max = MIN(mp_ncpus, EFX_MAXRSS);
147 if (sc->max_rss_channels > 0)
148 evq_max = MIN(evq_max, sc->max_rss_channels);
150 memset(&limits, 0, sizeof(limits));
152 limits.edl_min_evq_count = 1;
153 limits.edl_max_evq_count = evq_max;
154 limits.edl_min_txq_count = SFXGE_EVQ0_N_TXQ(sc);
155 limits.edl_max_txq_count = evq_max + SFXGE_EVQ0_N_TXQ(sc) - 1;
156 limits.edl_min_rxq_count = 1;
157 limits.edl_max_rxq_count = evq_max;
159 efx_nic_set_drv_limits(sc->enp, &limits);
161 if ((rc = efx_nic_init(sc->enp)) != 0)
164 rc = efx_nic_get_vi_pool(sc->enp, &evq_allocated, &rxq_allocated,
167 efx_nic_fini(sc->enp);
171 KASSERT(txq_allocated >= SFXGE_EVQ0_N_TXQ(sc),
172 ("txq_allocated < %u", SFXGE_EVQ0_N_TXQ(sc)));
174 sc->evq_max = MIN(evq_allocated, evq_max);
175 sc->evq_max = MIN(rxq_allocated, sc->evq_max);
176 sc->evq_max = MIN(txq_allocated - (SFXGE_EVQ0_N_TXQ(sc) - 1),
179 KASSERT(sc->evq_max <= evq_max,
180 ("allocated more than maximum requested"));
183 if (sc->evq_max < rss_getnumbuckets())
184 device_printf(sc->dev, "The number of allocated queues (%u) "
185 "is less than the number of RSS buckets (%u); "
186 "performance degradation might be observed",
187 sc->evq_max, rss_getnumbuckets());
191 * NIC is kept initialized in the case of success to be able to
192 * initialize port to find out media types.
198 sfxge_set_drv_limits(struct sfxge_softc *sc)
200 efx_drv_limits_t limits;
202 memset(&limits, 0, sizeof(limits));
204 /* Limits are strict since take into account initial estimation */
205 limits.edl_min_evq_count = limits.edl_max_evq_count =
207 limits.edl_min_txq_count = limits.edl_max_txq_count =
208 sc->intr.n_alloc + SFXGE_EVQ0_N_TXQ(sc) - 1;
209 limits.edl_min_rxq_count = limits.edl_max_rxq_count =
212 return (efx_nic_set_drv_limits(sc->enp, &limits));
216 sfxge_start(struct sfxge_softc *sc)
220 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
222 if (sc->init_state == SFXGE_STARTED)
225 if (sc->init_state != SFXGE_REGISTERED) {
230 /* Set required resource limits */
231 if ((rc = sfxge_set_drv_limits(sc)) != 0)
234 if ((rc = efx_nic_init(sc->enp)) != 0)
237 /* Start processing interrupts. */
238 if ((rc = sfxge_intr_start(sc)) != 0)
241 /* Start processing events. */
242 if ((rc = sfxge_ev_start(sc)) != 0)
245 /* Fire up the port. */
246 if ((rc = sfxge_port_start(sc)) != 0)
249 /* Start the receiver side. */
250 if ((rc = sfxge_rx_start(sc)) != 0)
253 /* Start the transmitter side. */
254 if ((rc = sfxge_tx_start(sc)) != 0)
257 sc->init_state = SFXGE_STARTED;
259 /* Tell the stack we're running. */
260 sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING;
261 sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE;
278 efx_nic_fini(sc->enp);
281 device_printf(sc->dev, "sfxge_start: %d\n", rc);
287 sfxge_if_init(void *arg)
289 struct sfxge_softc *sc;
291 sc = (struct sfxge_softc *)arg;
293 SFXGE_ADAPTER_LOCK(sc);
294 (void)sfxge_start(sc);
295 SFXGE_ADAPTER_UNLOCK(sc);
299 sfxge_stop(struct sfxge_softc *sc)
301 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
303 if (sc->init_state != SFXGE_STARTED)
306 sc->init_state = SFXGE_REGISTERED;
308 /* Stop the transmitter. */
311 /* Stop the receiver. */
317 /* Stop processing events. */
320 /* Stop processing interrupts. */
323 efx_nic_fini(sc->enp);
325 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
330 sfxge_vpd_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
332 efx_vpd_value_t value;
335 switch (ioc->u.vpd.op) {
336 case SFXGE_VPD_OP_GET_KEYWORD:
337 value.evv_tag = ioc->u.vpd.tag;
338 value.evv_keyword = ioc->u.vpd.keyword;
339 rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value);
342 ioc->u.vpd.len = MIN(ioc->u.vpd.len, value.evv_length);
343 if (ioc->u.vpd.payload != 0) {
344 rc = copyout(value.evv_value, ioc->u.vpd.payload,
348 case SFXGE_VPD_OP_SET_KEYWORD:
349 if (ioc->u.vpd.len > sizeof(value.evv_value))
351 value.evv_tag = ioc->u.vpd.tag;
352 value.evv_keyword = ioc->u.vpd.keyword;
353 value.evv_length = ioc->u.vpd.len;
354 rc = copyin(ioc->u.vpd.payload, value.evv_value, value.evv_length);
357 rc = efx_vpd_set(sc->enp, sc->vpd_data, sc->vpd_size, &value);
360 rc = efx_vpd_verify(sc->enp, sc->vpd_data, sc->vpd_size);
363 rc = efx_vpd_write(sc->enp, sc->vpd_data, sc->vpd_size);
374 sfxge_private_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
378 return (sfxge_mcdi_ioctl(sc, ioc));
379 case SFXGE_NVRAM_IOC:
380 return (sfxge_nvram_ioctl(sc, ioc));
382 return (sfxge_vpd_ioctl(sc, ioc));
390 sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
392 struct sfxge_softc *sc;
397 ifr = (struct ifreq *)data;
403 SFXGE_ADAPTER_LOCK(sc);
404 if (ifp->if_flags & IFF_UP) {
405 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
406 if ((ifp->if_flags ^ sc->if_flags) &
407 (IFF_PROMISC | IFF_ALLMULTI)) {
408 sfxge_mac_filter_set(sc);
413 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
415 sc->if_flags = ifp->if_flags;
416 SFXGE_ADAPTER_UNLOCK(sc);
419 if (ifr->ifr_mtu == ifp->if_mtu) {
422 } else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
424 } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
425 ifp->if_mtu = ifr->ifr_mtu;
428 /* Restart required */
429 SFXGE_ADAPTER_LOCK(sc);
431 ifp->if_mtu = ifr->ifr_mtu;
432 error = sfxge_start(sc);
433 SFXGE_ADAPTER_UNLOCK(sc);
435 ifp->if_flags &= ~IFF_UP;
436 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
443 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
444 sfxge_mac_filter_set(sc);
448 int reqcap = ifr->ifr_reqcap;
451 SFXGE_ADAPTER_LOCK(sc);
453 /* Capabilities to be changed in accordance with request */
454 capchg_mask = ifp->if_capenable ^ reqcap;
457 * The networking core already rejects attempts to
458 * enable capabilities we don't have. We still have
459 * to reject attempts to disable capabilities that we
460 * can't (yet) disable.
462 KASSERT((reqcap & ~ifp->if_capabilities) == 0,
463 ("Unsupported capabilities 0x%x requested 0x%x vs "
465 reqcap & ~ifp->if_capabilities,
466 reqcap , ifp->if_capabilities));
467 if (capchg_mask & SFXGE_CAP_FIXED) {
469 SFXGE_ADAPTER_UNLOCK(sc);
473 /* Check request before any changes */
474 if ((capchg_mask & IFCAP_TSO4) &&
475 (reqcap & (IFCAP_TSO4 | IFCAP_TXCSUM)) == IFCAP_TSO4) {
477 SFXGE_ADAPTER_UNLOCK(sc);
478 if_printf(ifp, "enable txcsum before tso4\n");
481 if ((capchg_mask & IFCAP_TSO6) &&
482 (reqcap & (IFCAP_TSO6 | IFCAP_TXCSUM_IPV6)) == IFCAP_TSO6) {
484 SFXGE_ADAPTER_UNLOCK(sc);
485 if_printf(ifp, "enable txcsum6 before tso6\n");
489 if (reqcap & IFCAP_TXCSUM) {
490 ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
492 ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP);
493 if (reqcap & IFCAP_TSO4) {
494 reqcap &= ~IFCAP_TSO4;
496 "tso4 disabled due to -txcsum\n");
499 if (reqcap & IFCAP_TXCSUM_IPV6) {
500 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
502 ifp->if_hwassist &= ~(CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
503 if (reqcap & IFCAP_TSO6) {
504 reqcap &= ~IFCAP_TSO6;
506 "tso6 disabled due to -txcsum6\n");
511 * The kernel takes both IFCAP_TSOx and CSUM_TSO into
512 * account before using TSO. So, we do not touch
513 * checksum flags when IFCAP_TSOx is modified.
514 * Note that CSUM_TSO is (CSUM_IP_TSO|CSUM_IP6_TSO),
515 * but both bits are set in IPv4 and IPv6 mbufs.
518 ifp->if_capenable = reqcap;
520 SFXGE_ADAPTER_UNLOCK(sc);
525 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
532 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
536 if (i2c.len > sizeof(i2c.data)) {
541 SFXGE_ADAPTER_LOCK(sc);
542 error = efx_phy_module_get_info(sc->enp, i2c.dev_addr,
545 SFXGE_ADAPTER_UNLOCK(sc);
547 error = copyout(&i2c, ifr_data_get_ptr(ifr),
553 error = priv_check(curthread, PRIV_DRIVER);
556 error = copyin(ifr_data_get_ptr(ifr), &ioc, sizeof(ioc));
559 error = sfxge_private_ioctl(sc, &ioc);
561 error = copyout(&ioc, ifr_data_get_ptr(ifr),
566 error = ether_ioctl(ifp, command, data);
573 sfxge_ifnet_fini(struct ifnet *ifp)
575 struct sfxge_softc *sc = ifp->if_softc;
577 SFXGE_ADAPTER_LOCK(sc);
579 SFXGE_ADAPTER_UNLOCK(sc);
581 ifmedia_removeall(&sc->media);
587 sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
589 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
596 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
597 ifp->if_init = sfxge_if_init;
599 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
600 ifp->if_ioctl = sfxge_if_ioctl;
602 ifp->if_capabilities = SFXGE_CAP;
603 ifp->if_capenable = SFXGE_CAP_ENABLE;
604 ifp->if_hw_tsomax = SFXGE_TSO_MAX_SIZE;
605 ifp->if_hw_tsomaxsegcount = SFXGE_TX_MAPPING_MAX_SEG;
606 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
609 ifp->if_capabilities |= IFCAP_LRO;
610 ifp->if_capenable |= IFCAP_LRO;
613 if (encp->enc_hw_tx_insert_vlan_enabled) {
614 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
615 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING;
617 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
618 CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
620 ether_ifattach(ifp, encp->enc_mac_addr);
622 ifp->if_transmit = sfxge_if_transmit;
623 ifp->if_qflush = sfxge_if_qflush;
625 ifp->if_get_counter = sfxge_get_counter;
627 DBGPRINT(sc->dev, "ifmedia_init");
628 if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
634 ether_ifdetach(sc->ifnet);
639 sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp)
641 KASSERT(sc->buffer_table_next + n <=
642 efx_nic_cfg_get(sc->enp)->enc_buftbl_limit,
643 ("buffer table full"));
645 *idp = sc->buffer_table_next;
646 sc->buffer_table_next += n;
650 sfxge_bar_init(struct sfxge_softc *sc)
652 efsys_bar_t *esbp = &sc->bar;
654 esbp->esb_rid = PCIR_BAR(sc->mem_bar);
655 if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
656 &esbp->esb_rid, RF_ACTIVE)) == NULL) {
657 device_printf(sc->dev, "Cannot allocate BAR region %d\n",
661 esbp->esb_tag = rman_get_bustag(esbp->esb_res);
662 esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
664 SFXGE_BAR_LOCK_INIT(esbp, device_get_nameunit(sc->dev));
670 sfxge_bar_fini(struct sfxge_softc *sc)
672 efsys_bar_t *esbp = &sc->bar;
674 bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
676 SFXGE_BAR_LOCK_DESTROY(esbp);
680 sfxge_create(struct sfxge_softc *sc)
685 char rss_param_name[sizeof(SFXGE_PARAM(%d.max_rss_channels))];
686 #if EFSYS_OPT_MCDI_LOGGING
687 char mcdi_log_param_name[sizeof(SFXGE_PARAM(%d.mcdi_logging))];
692 SFXGE_ADAPTER_LOCK_INIT(sc, device_get_nameunit(sc->dev));
694 sc->max_rss_channels = 0;
695 snprintf(rss_param_name, sizeof(rss_param_name),
696 SFXGE_PARAM(%d.max_rss_channels),
697 (int)device_get_unit(dev));
698 TUNABLE_INT_FETCH(rss_param_name, &sc->max_rss_channels);
699 #if EFSYS_OPT_MCDI_LOGGING
700 sc->mcdi_logging = sfxge_mcdi_logging;
701 snprintf(mcdi_log_param_name, sizeof(mcdi_log_param_name),
702 SFXGE_PARAM(%d.mcdi_logging),
703 (int)device_get_unit(dev));
704 TUNABLE_INT_FETCH(mcdi_log_param_name, &sc->mcdi_logging);
707 sc->stats_node = SYSCTL_ADD_NODE(
708 device_get_sysctl_ctx(dev),
709 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
710 OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
711 if (sc->stats_node == NULL) {
716 TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);
718 (void) pci_enable_busmaster(dev);
720 /* Initialize DMA mappings. */
721 DBGPRINT(sc->dev, "dma_init...");
722 if ((error = sfxge_dma_init(sc)) != 0)
725 error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
726 &sc->family, &sc->mem_bar);
727 KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
729 /* Map the device registers. */
730 DBGPRINT(sc->dev, "bar_init...");
731 if ((error = sfxge_bar_init(sc)) != 0)
734 DBGPRINT(sc->dev, "nic_create...");
736 /* Create the common code nic object. */
737 SFXGE_EFSYS_LOCK_INIT(&sc->enp_lock,
738 device_get_nameunit(sc->dev), "nic");
739 if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
740 &sc->bar, &sc->enp_lock, &enp)) != 0)
744 /* Initialize MCDI to talk to the microcontroller. */
745 DBGPRINT(sc->dev, "mcdi_init...");
746 if ((error = sfxge_mcdi_init(sc)) != 0)
749 /* Probe the NIC and build the configuration data area. */
750 DBGPRINT(sc->dev, "nic_probe...");
751 if ((error = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE)) != 0)
754 if (!ISP2(sfxge_rx_ring_entries) ||
755 (sfxge_rx_ring_entries < EFX_RXQ_MINNDESCS) ||
756 (sfxge_rx_ring_entries > EFX_RXQ_MAXNDESCS)) {
757 log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
758 SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
759 EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
761 goto fail_rx_ring_entries;
763 sc->rxq_entries = sfxge_rx_ring_entries;
765 if (efx_nic_cfg_get(enp)->enc_features & EFX_FEATURE_TXQ_CKSUM_OP_DESC)
766 sc->txq_dynamic_cksum_toggle_supported = B_TRUE;
768 sc->txq_dynamic_cksum_toggle_supported = B_FALSE;
770 if (!ISP2(sfxge_tx_ring_entries) ||
771 (sfxge_tx_ring_entries < EFX_TXQ_MINNDESCS) ||
772 (sfxge_tx_ring_entries > efx_nic_cfg_get(enp)->enc_txq_max_ndescs)) {
773 log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
774 SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
775 EFX_TXQ_MINNDESCS, efx_nic_cfg_get(enp)->enc_txq_max_ndescs);
777 goto fail_tx_ring_entries;
779 sc->txq_entries = sfxge_tx_ring_entries;
781 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
782 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
783 OID_AUTO, "version", CTLFLAG_RD,
784 SFXGE_VERSION_STRING, 0,
787 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
788 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
789 OID_AUTO, "phy_type", CTLFLAG_RD,
790 NULL, efx_nic_cfg_get(enp)->enc_phy_type,
793 /* Initialize the NVRAM. */
794 DBGPRINT(sc->dev, "nvram_init...");
795 if ((error = efx_nvram_init(enp)) != 0)
798 /* Initialize the VPD. */
799 DBGPRINT(sc->dev, "vpd_init...");
800 if ((error = efx_vpd_init(enp)) != 0)
803 efx_mcdi_new_epoch(enp);
806 DBGPRINT(sc->dev, "nic_reset...");
807 if ((error = efx_nic_reset(enp)) != 0)
810 /* Initialize buffer table allocation. */
811 sc->buffer_table_next = 0;
814 * Guarantee minimum and estimate maximum number of event queues
815 * to take it into account when MSI-X interrupts are allocated.
816 * It initializes NIC and keeps it initialized on success.
818 if ((error = sfxge_estimate_rsrc_limits(sc)) != 0)
821 /* Set up interrupts. */
822 DBGPRINT(sc->dev, "intr_init...");
823 if ((error = sfxge_intr_init(sc)) != 0)
826 /* Initialize event processing state. */
827 DBGPRINT(sc->dev, "ev_init...");
828 if ((error = sfxge_ev_init(sc)) != 0)
831 /* Initialize port state. */
832 DBGPRINT(sc->dev, "port_init...");
833 if ((error = sfxge_port_init(sc)) != 0)
836 /* Initialize receive state. */
837 DBGPRINT(sc->dev, "rx_init...");
838 if ((error = sfxge_rx_init(sc)) != 0)
841 /* Initialize transmit state. */
842 DBGPRINT(sc->dev, "tx_init...");
843 if ((error = sfxge_tx_init(sc)) != 0)
846 sc->init_state = SFXGE_INITIALIZED;
848 DBGPRINT(sc->dev, "success");
864 efx_nic_fini(sc->enp);
873 fail_tx_ring_entries:
874 fail_rx_ring_entries:
875 efx_nic_unprobe(enp);
882 efx_nic_destroy(enp);
883 SFXGE_EFSYS_LOCK_DESTROY(&sc->enp_lock);
887 (void) pci_disable_busmaster(sc->dev);
890 DBGPRINT(sc->dev, "failed %d", error);
892 SFXGE_ADAPTER_LOCK_DESTROY(sc);
897 sfxge_destroy(struct sfxge_softc *sc)
901 /* Clean up transmit state. */
904 /* Clean up receive state. */
907 /* Clean up port state. */
910 /* Clean up event processing state. */
913 /* Clean up interrupts. */
916 /* Tear down common code subsystems. */
917 efx_nic_reset(sc->enp);
918 efx_vpd_fini(sc->enp);
919 efx_nvram_fini(sc->enp);
920 efx_nic_unprobe(sc->enp);
922 /* Tear down MCDI. */
925 /* Destroy common code context. */
928 efx_nic_destroy(enp);
930 /* Free DMA memory. */
933 /* Free mapped BARs. */
936 (void) pci_disable_busmaster(sc->dev);
938 taskqueue_drain(taskqueue_thread, &sc->task_reset);
940 /* Destroy the softc lock. */
941 SFXGE_ADAPTER_LOCK_DESTROY(sc);
945 sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
947 struct sfxge_softc *sc = arg1;
948 efx_vpd_value_t value;
951 value.evv_tag = arg2 >> 16;
952 value.evv_keyword = arg2 & 0xffff;
953 if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
957 return (SYSCTL_OUT(req, value.evv_value, value.evv_length));
961 sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list,
962 efx_vpd_tag_t tag, const char *keyword)
964 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
965 efx_vpd_value_t value;
967 /* Check whether VPD tag/keyword is present */
969 value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
970 if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0)
974 ctx, list, OID_AUTO, keyword, CTLTYPE_STRING|CTLFLAG_RD,
975 sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]),
976 sfxge_vpd_handler, "A", "");
980 sfxge_vpd_init(struct sfxge_softc *sc)
982 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
983 struct sysctl_oid *vpd_node;
984 struct sysctl_oid_list *vpd_list;
986 efx_vpd_value_t value;
989 if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0) {
991 * Unpriviledged functions deny VPD access.
992 * Simply skip VPD in this case.
998 sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK);
999 if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0)
1002 /* Copy ID (product name) into device description, and log it. */
1003 value.evv_tag = EFX_VPD_ID;
1004 if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) {
1005 value.evv_value[value.evv_length] = 0;
1006 device_set_desc_copy(sc->dev, value.evv_value);
1007 device_printf(sc->dev, "%s\n", value.evv_value);
1010 vpd_node = SYSCTL_ADD_NODE(
1011 ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1012 OID_AUTO, "vpd", CTLFLAG_RD, NULL, "Vital Product Data");
1013 vpd_list = SYSCTL_CHILDREN(vpd_node);
1015 /* Add sysctls for all expected and any vendor-defined keywords. */
1016 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN");
1017 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC");
1018 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN");
1021 for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++)
1022 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
1023 for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
1024 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
1030 free(sc->vpd_data, M_SFXGE);
1036 sfxge_vpd_fini(struct sfxge_softc *sc)
1038 free(sc->vpd_data, M_SFXGE);
1042 sfxge_reset(void *arg, int npending)
1044 struct sfxge_softc *sc;
1050 sc = (struct sfxge_softc *)arg;
1052 SFXGE_ADAPTER_LOCK(sc);
1054 if (sc->init_state != SFXGE_STARTED)
1058 efx_nic_reset(sc->enp);
1059 for (attempt = 0; attempt < sfxge_restart_attempts; ++attempt) {
1060 if ((rc = sfxge_start(sc)) == 0)
1063 device_printf(sc->dev, "start on reset failed (%d)\n", rc);
1067 device_printf(sc->dev, "reset failed; interface is now stopped\n");
1070 SFXGE_ADAPTER_UNLOCK(sc);
1074 sfxge_schedule_reset(struct sfxge_softc *sc)
1076 taskqueue_enqueue(taskqueue_thread, &sc->task_reset);
1080 sfxge_attach(device_t dev)
1082 struct sfxge_softc *sc;
1086 sc = device_get_softc(dev);
1089 /* Allocate ifnet. */
1090 ifp = if_alloc(IFT_ETHER);
1092 device_printf(dev, "Couldn't allocate ifnet\n");
1098 /* Initialize hardware. */
1099 DBGPRINT(sc->dev, "create nic");
1100 if ((error = sfxge_create(sc)) != 0)
1103 /* Create the ifnet for the port. */
1104 DBGPRINT(sc->dev, "init ifnet");
1105 if ((error = sfxge_ifnet_init(ifp, sc)) != 0)
1108 DBGPRINT(sc->dev, "init vpd");
1109 if ((error = sfxge_vpd_init(sc)) != 0)
1113 * NIC is initialized inside sfxge_create() and kept inialized
1114 * to be able to initialize port to discover media types in
1115 * sfxge_ifnet_init().
1117 efx_nic_fini(sc->enp);
1119 sc->init_state = SFXGE_REGISTERED;
1121 DBGPRINT(sc->dev, "success");
1125 sfxge_ifnet_fini(ifp);
1127 efx_nic_fini(sc->enp);
1134 DBGPRINT(sc->dev, "failed %d", error);
1139 sfxge_detach(device_t dev)
1141 struct sfxge_softc *sc;
1143 sc = device_get_softc(dev);
1147 /* Destroy the ifnet. */
1148 sfxge_ifnet_fini(sc->ifnet);
1150 /* Tear down hardware. */
1157 sfxge_probe(device_t dev)
1159 uint16_t pci_vendor_id;
1160 uint16_t pci_device_id;
1161 efx_family_t family;
1162 unsigned int mem_bar;
1165 pci_vendor_id = pci_get_vendor(dev);
1166 pci_device_id = pci_get_device(dev);
1168 DBGPRINT(dev, "PCI ID %04x:%04x", pci_vendor_id, pci_device_id);
1169 rc = efx_family(pci_vendor_id, pci_device_id, &family, &mem_bar);
1171 DBGPRINT(dev, "efx_family fail %d", rc);
1175 if (family == EFX_FAMILY_SIENA) {
1176 device_set_desc(dev, "Solarflare SFC9000 family");
1180 if (family == EFX_FAMILY_HUNTINGTON) {
1181 device_set_desc(dev, "Solarflare SFC9100 family");
1185 if (family == EFX_FAMILY_MEDFORD) {
1186 device_set_desc(dev, "Solarflare SFC9200 family");
1190 if (family == EFX_FAMILY_MEDFORD2) {
1191 device_set_desc(dev, "Solarflare SFC9250 family");
1195 DBGPRINT(dev, "impossible controller family %d", family);
1199 static device_method_t sfxge_methods[] = {
1200 DEVMETHOD(device_probe, sfxge_probe),
1201 DEVMETHOD(device_attach, sfxge_attach),
1202 DEVMETHOD(device_detach, sfxge_detach),
1207 static devclass_t sfxge_devclass;
1209 static driver_t sfxge_driver = {
1212 sizeof(struct sfxge_softc)
1215 DRIVER_MODULE(sfxge, pci, sfxge_driver, sfxge_devclass, 0, 0);