2 * Copyright (c) 2010-2015 Solarflare Communications Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * The views and conclusions contained in the software and documentation are
30 * those of the authors and should not be interpreted as representing official
31 * policies, either expressed or implied, of the FreeBSD Project.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
45 #include <sys/socket.h>
46 #include <sys/taskqueue.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
55 #include <net/ethernet.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
60 #include "common/efx.h"
64 #include "sfxge_ioc.h"
65 #include "sfxge_version.h"
67 #define SFXGE_CAP (IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM | \
68 IFCAP_RXCSUM | IFCAP_TXCSUM | \
69 IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6 | \
70 IFCAP_TSO4 | IFCAP_TSO6 | \
72 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWSTATS)
73 #define SFXGE_CAP_ENABLE SFXGE_CAP
74 #define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | \
75 IFCAP_JUMBO_MTU | IFCAP_LINKSTATE | IFCAP_HWSTATS)
77 MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
80 SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0,
81 "SFXGE driver parameters");
83 #define SFXGE_PARAM_RX_RING SFXGE_PARAM(rx_ring)
84 static int sfxge_rx_ring_entries = SFXGE_NDESCS;
85 TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
86 SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
87 &sfxge_rx_ring_entries, 0,
88 "Maximum number of descriptors in a receive ring");
90 #define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring)
91 static int sfxge_tx_ring_entries = SFXGE_NDESCS;
92 TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
93 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
94 &sfxge_tx_ring_entries, 0,
95 "Maximum number of descriptors in a transmit ring");
97 #define SFXGE_PARAM_STATS_UPDATE_PERIOD SFXGE_PARAM(stats_update_period)
98 static int sfxge_stats_update_period = SFXGE_CALLOUT_TICKS;
99 TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD,
100 &sfxge_stats_update_period);
101 SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period, CTLFLAG_RDTUN,
102 &sfxge_stats_update_period, 0,
103 "netstat interface statistics update period in ticks");
105 #define SFXGE_PARAM_RESTART_ATTEMPTS SFXGE_PARAM(restart_attempts)
106 static int sfxge_restart_attempts = 3;
107 TUNABLE_INT(SFXGE_PARAM_RESTART_ATTEMPTS, &sfxge_restart_attempts);
108 SYSCTL_INT(_hw_sfxge, OID_AUTO, restart_attempts, CTLFLAG_RDTUN,
109 &sfxge_restart_attempts, 0,
110 "Maximum number of attempts to bring interface up after reset");
112 #if EFSYS_OPT_MCDI_LOGGING
113 #define SFXGE_PARAM_MCDI_LOGGING SFXGE_PARAM(mcdi_logging)
114 static int sfxge_mcdi_logging = 0;
115 TUNABLE_INT(SFXGE_PARAM_MCDI_LOGGING, &sfxge_mcdi_logging);
119 sfxge_reset(void *arg, int npending);
122 sfxge_estimate_rsrc_limits(struct sfxge_softc *sc)
124 efx_drv_limits_t limits;
126 unsigned int evq_max;
127 uint32_t evq_allocated;
128 uint32_t rxq_allocated;
129 uint32_t txq_allocated;
132 * Limit the number of event queues to:
134 * - hardwire maximum RSS channels
135 * - administratively specified maximum RSS channels
137 evq_max = MIN(mp_ncpus, EFX_MAXRSS);
138 if (sc->max_rss_channels > 0)
139 evq_max = MIN(evq_max, sc->max_rss_channels);
141 memset(&limits, 0, sizeof(limits));
143 limits.edl_min_evq_count = 1;
144 limits.edl_max_evq_count = evq_max;
145 limits.edl_min_txq_count = SFXGE_TXQ_NTYPES;
146 limits.edl_max_txq_count = evq_max + SFXGE_TXQ_NTYPES - 1;
147 limits.edl_min_rxq_count = 1;
148 limits.edl_max_rxq_count = evq_max;
150 efx_nic_set_drv_limits(sc->enp, &limits);
152 if ((rc = efx_nic_init(sc->enp)) != 0)
155 rc = efx_nic_get_vi_pool(sc->enp, &evq_allocated, &rxq_allocated,
158 efx_nic_fini(sc->enp);
162 KASSERT(txq_allocated >= SFXGE_TXQ_NTYPES,
163 ("txq_allocated < SFXGE_TXQ_NTYPES"));
165 sc->evq_max = MIN(evq_allocated, evq_max);
166 sc->evq_max = MIN(rxq_allocated, sc->evq_max);
167 sc->evq_max = MIN(txq_allocated - (SFXGE_TXQ_NTYPES - 1),
170 KASSERT(sc->evq_max <= evq_max,
171 ("allocated more than maximum requested"));
174 * NIC is kept initialized in the case of success to be able to
175 * initialize port to find out media types.
181 sfxge_set_drv_limits(struct sfxge_softc *sc)
183 efx_drv_limits_t limits;
185 memset(&limits, 0, sizeof(limits));
187 /* Limits are strict since take into account initial estimation */
188 limits.edl_min_evq_count = limits.edl_max_evq_count =
190 limits.edl_min_txq_count = limits.edl_max_txq_count =
191 sc->intr.n_alloc + SFXGE_TXQ_NTYPES - 1;
192 limits.edl_min_rxq_count = limits.edl_max_rxq_count =
195 return (efx_nic_set_drv_limits(sc->enp, &limits));
199 sfxge_start(struct sfxge_softc *sc)
203 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
205 if (sc->init_state == SFXGE_STARTED)
208 if (sc->init_state != SFXGE_REGISTERED) {
213 /* Set required resource limits */
214 if ((rc = sfxge_set_drv_limits(sc)) != 0)
217 if ((rc = efx_nic_init(sc->enp)) != 0)
220 /* Start processing interrupts. */
221 if ((rc = sfxge_intr_start(sc)) != 0)
224 /* Start processing events. */
225 if ((rc = sfxge_ev_start(sc)) != 0)
228 /* Fire up the port. */
229 if ((rc = sfxge_port_start(sc)) != 0)
232 /* Start the receiver side. */
233 if ((rc = sfxge_rx_start(sc)) != 0)
236 /* Start the transmitter side. */
237 if ((rc = sfxge_tx_start(sc)) != 0)
240 sc->init_state = SFXGE_STARTED;
242 /* Tell the stack we're running. */
243 sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING;
244 sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE;
261 efx_nic_fini(sc->enp);
264 device_printf(sc->dev, "sfxge_start: %d\n", rc);
270 sfxge_if_init(void *arg)
272 struct sfxge_softc *sc;
274 sc = (struct sfxge_softc *)arg;
276 SFXGE_ADAPTER_LOCK(sc);
277 (void)sfxge_start(sc);
278 SFXGE_ADAPTER_UNLOCK(sc);
282 sfxge_stop(struct sfxge_softc *sc)
284 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
286 if (sc->init_state != SFXGE_STARTED)
289 sc->init_state = SFXGE_REGISTERED;
291 /* Stop the transmitter. */
294 /* Stop the receiver. */
300 /* Stop processing events. */
303 /* Stop processing interrupts. */
306 efx_nic_fini(sc->enp);
308 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
313 sfxge_vpd_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
315 efx_vpd_value_t value;
318 switch (ioc->u.vpd.op) {
319 case SFXGE_VPD_OP_GET_KEYWORD:
320 value.evv_tag = ioc->u.vpd.tag;
321 value.evv_keyword = ioc->u.vpd.keyword;
322 rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value);
325 ioc->u.vpd.len = MIN(ioc->u.vpd.len, value.evv_length);
326 if (ioc->u.vpd.payload != 0) {
327 rc = copyout(value.evv_value, ioc->u.vpd.payload,
331 case SFXGE_VPD_OP_SET_KEYWORD:
332 if (ioc->u.vpd.len > sizeof(value.evv_value))
334 value.evv_tag = ioc->u.vpd.tag;
335 value.evv_keyword = ioc->u.vpd.keyword;
336 value.evv_length = ioc->u.vpd.len;
337 rc = copyin(ioc->u.vpd.payload, value.evv_value, value.evv_length);
340 rc = efx_vpd_set(sc->enp, sc->vpd_data, sc->vpd_size, &value);
343 rc = efx_vpd_verify(sc->enp, sc->vpd_data, sc->vpd_size);
346 rc = efx_vpd_write(sc->enp, sc->vpd_data, sc->vpd_size);
357 sfxge_private_ioctl(struct sfxge_softc *sc, sfxge_ioc_t *ioc)
361 return (sfxge_mcdi_ioctl(sc, ioc));
362 case SFXGE_NVRAM_IOC:
363 return (sfxge_nvram_ioctl(sc, ioc));
365 return (sfxge_vpd_ioctl(sc, ioc));
373 sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
375 struct sfxge_softc *sc;
380 ifr = (struct ifreq *)data;
386 SFXGE_ADAPTER_LOCK(sc);
387 if (ifp->if_flags & IFF_UP) {
388 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
389 if ((ifp->if_flags ^ sc->if_flags) &
390 (IFF_PROMISC | IFF_ALLMULTI)) {
391 sfxge_mac_filter_set(sc);
396 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
398 sc->if_flags = ifp->if_flags;
399 SFXGE_ADAPTER_UNLOCK(sc);
402 if (ifr->ifr_mtu == ifp->if_mtu) {
405 } else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
407 } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
408 ifp->if_mtu = ifr->ifr_mtu;
411 /* Restart required */
412 SFXGE_ADAPTER_LOCK(sc);
414 ifp->if_mtu = ifr->ifr_mtu;
415 error = sfxge_start(sc);
416 SFXGE_ADAPTER_UNLOCK(sc);
418 ifp->if_flags &= ~IFF_UP;
419 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
426 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
427 sfxge_mac_filter_set(sc);
431 int reqcap = ifr->ifr_reqcap;
434 SFXGE_ADAPTER_LOCK(sc);
436 /* Capabilities to be changed in accordance with request */
437 capchg_mask = ifp->if_capenable ^ reqcap;
440 * The networking core already rejects attempts to
441 * enable capabilities we don't have. We still have
442 * to reject attempts to disable capabilities that we
443 * can't (yet) disable.
445 KASSERT((reqcap & ~ifp->if_capabilities) == 0,
446 ("Unsupported capabilities 0x%x requested 0x%x vs "
448 reqcap & ~ifp->if_capabilities,
449 reqcap , ifp->if_capabilities));
450 if (capchg_mask & SFXGE_CAP_FIXED) {
452 SFXGE_ADAPTER_UNLOCK(sc);
456 /* Check request before any changes */
457 if ((capchg_mask & IFCAP_TSO4) &&
458 (reqcap & (IFCAP_TSO4 | IFCAP_TXCSUM)) == IFCAP_TSO4) {
460 SFXGE_ADAPTER_UNLOCK(sc);
461 if_printf(ifp, "enable txcsum before tso4\n");
464 if ((capchg_mask & IFCAP_TSO6) &&
465 (reqcap & (IFCAP_TSO6 | IFCAP_TXCSUM_IPV6)) == IFCAP_TSO6) {
467 SFXGE_ADAPTER_UNLOCK(sc);
468 if_printf(ifp, "enable txcsum6 before tso6\n");
472 if (reqcap & IFCAP_TXCSUM) {
473 ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
475 ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP);
476 if (reqcap & IFCAP_TSO4) {
477 reqcap &= ~IFCAP_TSO4;
479 "tso4 disabled due to -txcsum\n");
482 if (reqcap & IFCAP_TXCSUM_IPV6) {
483 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
485 ifp->if_hwassist &= ~(CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
486 if (reqcap & IFCAP_TSO6) {
487 reqcap &= ~IFCAP_TSO6;
489 "tso6 disabled due to -txcsum6\n");
494 * The kernel takes both IFCAP_TSOx and CSUM_TSO into
495 * account before using TSO. So, we do not touch
496 * checksum flags when IFCAP_TSOx is modified.
497 * Note that CSUM_TSO is (CSUM_IP_TSO|CSUM_IP6_TSO),
498 * but both bits are set in IPv4 and IPv6 mbufs.
501 ifp->if_capenable = reqcap;
503 SFXGE_ADAPTER_UNLOCK(sc);
508 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
515 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
519 if (i2c.len > sizeof(i2c.data)) {
524 SFXGE_ADAPTER_LOCK(sc);
525 error = efx_phy_module_get_info(sc->enp, i2c.dev_addr,
528 SFXGE_ADAPTER_UNLOCK(sc);
530 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
535 error = priv_check(curthread, PRIV_DRIVER);
538 error = copyin(ifr->ifr_data, &ioc, sizeof(ioc));
541 error = sfxge_private_ioctl(sc, &ioc);
543 error = copyout(&ioc, ifr->ifr_data, sizeof(ioc));
547 error = ether_ioctl(ifp, command, data);
554 sfxge_tick(void *arg)
556 struct sfxge_softc *sc = arg;
558 sfxge_port_update_stats(sc);
559 sfxge_tx_update_stats(sc);
561 callout_reset(&sc->tick_callout, sfxge_stats_update_period,
566 sfxge_ifnet_fini(struct ifnet *ifp)
568 struct sfxge_softc *sc = ifp->if_softc;
570 callout_drain(&sc->tick_callout);
572 SFXGE_ADAPTER_LOCK(sc);
574 SFXGE_ADAPTER_UNLOCK(sc);
576 ifmedia_removeall(&sc->media);
582 sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
584 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
591 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
592 ifp->if_init = sfxge_if_init;
594 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
595 ifp->if_ioctl = sfxge_if_ioctl;
597 ifp->if_capabilities = SFXGE_CAP;
598 ifp->if_capenable = SFXGE_CAP_ENABLE;
599 ifp->if_hw_tsomax = SFXGE_TSO_MAX_SIZE;
600 ifp->if_hw_tsomaxsegcount = SFXGE_TX_MAPPING_MAX_SEG;
601 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
604 ifp->if_capabilities |= IFCAP_LRO;
605 ifp->if_capenable |= IFCAP_LRO;
608 if (encp->enc_hw_tx_insert_vlan_enabled) {
609 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
610 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING;
612 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
613 CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
615 ether_ifattach(ifp, encp->enc_mac_addr);
617 ifp->if_transmit = sfxge_if_transmit;
618 ifp->if_qflush = sfxge_if_qflush;
620 callout_init(&sc->tick_callout, B_TRUE);
622 DBGPRINT(sc->dev, "ifmedia_init");
623 if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
626 callout_reset(&sc->tick_callout, sfxge_stats_update_period,
632 ether_ifdetach(sc->ifnet);
637 sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp)
639 KASSERT(sc->buffer_table_next + n <=
640 efx_nic_cfg_get(sc->enp)->enc_buftbl_limit,
641 ("buffer table full"));
643 *idp = sc->buffer_table_next;
644 sc->buffer_table_next += n;
648 sfxge_bar_init(struct sfxge_softc *sc)
650 efsys_bar_t *esbp = &sc->bar;
652 esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR);
653 if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
654 &esbp->esb_rid, RF_ACTIVE)) == NULL) {
655 device_printf(sc->dev, "Cannot allocate BAR region %d\n",
659 esbp->esb_tag = rman_get_bustag(esbp->esb_res);
660 esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
662 SFXGE_BAR_LOCK_INIT(esbp, device_get_nameunit(sc->dev));
668 sfxge_bar_fini(struct sfxge_softc *sc)
670 efsys_bar_t *esbp = &sc->bar;
672 bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
674 SFXGE_BAR_LOCK_DESTROY(esbp);
678 sfxge_create(struct sfxge_softc *sc)
683 char rss_param_name[sizeof(SFXGE_PARAM(%d.max_rss_channels))];
684 #if EFSYS_OPT_MCDI_LOGGING
685 char mcdi_log_param_name[sizeof(SFXGE_PARAM(%d.mcdi_logging))];
690 SFXGE_ADAPTER_LOCK_INIT(sc, device_get_nameunit(sc->dev));
692 sc->max_rss_channels = 0;
693 snprintf(rss_param_name, sizeof(rss_param_name),
694 SFXGE_PARAM(%d.max_rss_channels),
695 (int)device_get_unit(dev));
696 TUNABLE_INT_FETCH(rss_param_name, &sc->max_rss_channels);
697 #if EFSYS_OPT_MCDI_LOGGING
698 sc->mcdi_logging = sfxge_mcdi_logging;
699 snprintf(mcdi_log_param_name, sizeof(mcdi_log_param_name),
700 SFXGE_PARAM(%d.mcdi_logging),
701 (int)device_get_unit(dev));
702 TUNABLE_INT_FETCH(mcdi_log_param_name, &sc->mcdi_logging);
705 sc->stats_node = SYSCTL_ADD_NODE(
706 device_get_sysctl_ctx(dev),
707 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
708 OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
709 if (sc->stats_node == NULL) {
714 TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);
716 (void) pci_enable_busmaster(dev);
718 /* Initialize DMA mappings. */
719 DBGPRINT(sc->dev, "dma_init...");
720 if ((error = sfxge_dma_init(sc)) != 0)
723 /* Map the device registers. */
724 DBGPRINT(sc->dev, "bar_init...");
725 if ((error = sfxge_bar_init(sc)) != 0)
728 error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
730 KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
732 DBGPRINT(sc->dev, "nic_create...");
734 /* Create the common code nic object. */
735 SFXGE_EFSYS_LOCK_INIT(&sc->enp_lock,
736 device_get_nameunit(sc->dev), "nic");
737 if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
738 &sc->bar, &sc->enp_lock, &enp)) != 0)
742 if (!ISP2(sfxge_rx_ring_entries) ||
743 (sfxge_rx_ring_entries < EFX_RXQ_MINNDESCS) ||
744 (sfxge_rx_ring_entries > EFX_RXQ_MAXNDESCS)) {
745 log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
746 SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
747 EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
749 goto fail_rx_ring_entries;
751 sc->rxq_entries = sfxge_rx_ring_entries;
753 if (!ISP2(sfxge_tx_ring_entries) ||
754 (sfxge_tx_ring_entries < EFX_TXQ_MINNDESCS) ||
755 (sfxge_tx_ring_entries > EFX_TXQ_MAXNDESCS(efx_nic_cfg_get(enp)))) {
756 log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
757 SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
758 EFX_TXQ_MINNDESCS, EFX_TXQ_MAXNDESCS(efx_nic_cfg_get(enp)));
760 goto fail_tx_ring_entries;
762 sc->txq_entries = sfxge_tx_ring_entries;
764 /* Initialize MCDI to talk to the microcontroller. */
765 DBGPRINT(sc->dev, "mcdi_init...");
766 if ((error = sfxge_mcdi_init(sc)) != 0)
769 /* Probe the NIC and build the configuration data area. */
770 DBGPRINT(sc->dev, "nic_probe...");
771 if ((error = efx_nic_probe(enp)) != 0)
774 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
775 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
776 OID_AUTO, "version", CTLFLAG_RD,
777 SFXGE_VERSION_STRING, 0,
780 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
781 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
782 OID_AUTO, "phy_type", CTLFLAG_RD,
783 NULL, efx_nic_cfg_get(enp)->enc_phy_type,
786 /* Initialize the NVRAM. */
787 DBGPRINT(sc->dev, "nvram_init...");
788 if ((error = efx_nvram_init(enp)) != 0)
791 /* Initialize the VPD. */
792 DBGPRINT(sc->dev, "vpd_init...");
793 if ((error = efx_vpd_init(enp)) != 0)
796 efx_mcdi_new_epoch(enp);
799 DBGPRINT(sc->dev, "nic_reset...");
800 if ((error = efx_nic_reset(enp)) != 0)
803 /* Initialize buffer table allocation. */
804 sc->buffer_table_next = 0;
807 * Guarantee minimum and estimate maximum number of event queues
808 * to take it into account when MSI-X interrupts are allocated.
809 * It initializes NIC and keeps it initialized on success.
811 if ((error = sfxge_estimate_rsrc_limits(sc)) != 0)
814 /* Set up interrupts. */
815 DBGPRINT(sc->dev, "intr_init...");
816 if ((error = sfxge_intr_init(sc)) != 0)
819 /* Initialize event processing state. */
820 DBGPRINT(sc->dev, "ev_init...");
821 if ((error = sfxge_ev_init(sc)) != 0)
824 /* Initialize port state. */
825 DBGPRINT(sc->dev, "port_init...");
826 if ((error = sfxge_port_init(sc)) != 0)
829 /* Initialize receive state. */
830 DBGPRINT(sc->dev, "rx_init...");
831 if ((error = sfxge_rx_init(sc)) != 0)
834 /* Initialize transmit state. */
835 DBGPRINT(sc->dev, "tx_init...");
836 if ((error = sfxge_tx_init(sc)) != 0)
839 sc->init_state = SFXGE_INITIALIZED;
841 DBGPRINT(sc->dev, "success");
857 efx_nic_fini(sc->enp);
866 efx_nic_unprobe(enp);
872 fail_tx_ring_entries:
873 fail_rx_ring_entries:
875 efx_nic_destroy(enp);
876 SFXGE_EFSYS_LOCK_DESTROY(&sc->enp_lock);
880 (void) pci_disable_busmaster(sc->dev);
883 DBGPRINT(sc->dev, "failed %d", error);
885 SFXGE_ADAPTER_LOCK_DESTROY(sc);
890 sfxge_destroy(struct sfxge_softc *sc)
894 /* Clean up transmit state. */
897 /* Clean up receive state. */
900 /* Clean up port state. */
903 /* Clean up event processing state. */
906 /* Clean up interrupts. */
909 /* Tear down common code subsystems. */
910 efx_nic_reset(sc->enp);
911 efx_vpd_fini(sc->enp);
912 efx_nvram_fini(sc->enp);
913 efx_nic_unprobe(sc->enp);
915 /* Tear down MCDI. */
918 /* Destroy common code context. */
921 efx_nic_destroy(enp);
923 /* Free DMA memory. */
926 /* Free mapped BARs. */
929 (void) pci_disable_busmaster(sc->dev);
931 taskqueue_drain(taskqueue_thread, &sc->task_reset);
933 /* Destroy the softc lock. */
934 SFXGE_ADAPTER_LOCK_DESTROY(sc);
938 sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
940 struct sfxge_softc *sc = arg1;
941 efx_vpd_value_t value;
944 value.evv_tag = arg2 >> 16;
945 value.evv_keyword = arg2 & 0xffff;
946 if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
950 return (SYSCTL_OUT(req, value.evv_value, value.evv_length));
954 sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list,
955 efx_vpd_tag_t tag, const char *keyword)
957 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
958 efx_vpd_value_t value;
960 /* Check whether VPD tag/keyword is present */
962 value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
963 if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0)
967 ctx, list, OID_AUTO, keyword, CTLTYPE_STRING|CTLFLAG_RD,
968 sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]),
969 sfxge_vpd_handler, "A", "");
973 sfxge_vpd_init(struct sfxge_softc *sc)
975 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
976 struct sysctl_oid *vpd_node;
977 struct sysctl_oid_list *vpd_list;
979 efx_vpd_value_t value;
982 if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0) {
984 * Unpriviledged functions deny VPD access.
985 * Simply skip VPD in this case.
991 sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK);
992 if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0)
995 /* Copy ID (product name) into device description, and log it. */
996 value.evv_tag = EFX_VPD_ID;
997 if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) {
998 value.evv_value[value.evv_length] = 0;
999 device_set_desc_copy(sc->dev, value.evv_value);
1000 device_printf(sc->dev, "%s\n", value.evv_value);
1003 vpd_node = SYSCTL_ADD_NODE(
1004 ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1005 OID_AUTO, "vpd", CTLFLAG_RD, NULL, "Vital Product Data");
1006 vpd_list = SYSCTL_CHILDREN(vpd_node);
1008 /* Add sysctls for all expected and any vendor-defined keywords. */
1009 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN");
1010 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC");
1011 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN");
1014 for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++)
1015 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
1016 for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
1017 sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
1023 free(sc->vpd_data, M_SFXGE);
1029 sfxge_vpd_fini(struct sfxge_softc *sc)
1031 free(sc->vpd_data, M_SFXGE);
1035 sfxge_reset(void *arg, int npending)
1037 struct sfxge_softc *sc;
1043 sc = (struct sfxge_softc *)arg;
1045 SFXGE_ADAPTER_LOCK(sc);
1047 if (sc->init_state != SFXGE_STARTED)
1051 efx_nic_reset(sc->enp);
1052 for (attempt = 0; attempt < sfxge_restart_attempts; ++attempt) {
1053 if ((rc = sfxge_start(sc)) == 0)
1056 device_printf(sc->dev, "start on reset failed (%d)\n", rc);
1060 device_printf(sc->dev, "reset failed; interface is now stopped\n");
1063 SFXGE_ADAPTER_UNLOCK(sc);
1067 sfxge_schedule_reset(struct sfxge_softc *sc)
1069 taskqueue_enqueue(taskqueue_thread, &sc->task_reset);
1073 sfxge_attach(device_t dev)
1075 struct sfxge_softc *sc;
1079 sc = device_get_softc(dev);
1082 /* Allocate ifnet. */
1083 ifp = if_alloc(IFT_ETHER);
1085 device_printf(dev, "Couldn't allocate ifnet\n");
1091 /* Initialize hardware. */
1092 DBGPRINT(sc->dev, "create nic");
1093 if ((error = sfxge_create(sc)) != 0)
1096 /* Create the ifnet for the port. */
1097 DBGPRINT(sc->dev, "init ifnet");
1098 if ((error = sfxge_ifnet_init(ifp, sc)) != 0)
1101 DBGPRINT(sc->dev, "init vpd");
1102 if ((error = sfxge_vpd_init(sc)) != 0)
1106 * NIC is initialized inside sfxge_create() and kept inialized
1107 * to be able to initialize port to discover media types in
1108 * sfxge_ifnet_init().
1110 efx_nic_fini(sc->enp);
1112 sc->init_state = SFXGE_REGISTERED;
1114 DBGPRINT(sc->dev, "success");
1118 sfxge_ifnet_fini(ifp);
1120 efx_nic_fini(sc->enp);
1127 DBGPRINT(sc->dev, "failed %d", error);
1132 sfxge_detach(device_t dev)
1134 struct sfxge_softc *sc;
1136 sc = device_get_softc(dev);
1140 /* Destroy the ifnet. */
1141 sfxge_ifnet_fini(sc->ifnet);
1143 /* Tear down hardware. */
1150 sfxge_probe(device_t dev)
1152 uint16_t pci_vendor_id;
1153 uint16_t pci_device_id;
1154 efx_family_t family;
1157 pci_vendor_id = pci_get_vendor(dev);
1158 pci_device_id = pci_get_device(dev);
1160 DBGPRINT(dev, "PCI ID %04x:%04x", pci_vendor_id, pci_device_id);
1161 rc = efx_family(pci_vendor_id, pci_device_id, &family);
1163 DBGPRINT(dev, "efx_family fail %d", rc);
1167 if (family == EFX_FAMILY_SIENA) {
1168 device_set_desc(dev, "Solarflare SFC9000 family");
1172 if (family == EFX_FAMILY_HUNTINGTON) {
1173 device_set_desc(dev, "Solarflare SFC9100 family");
1177 DBGPRINT(dev, "impossible controller family %d", family);
1181 static device_method_t sfxge_methods[] = {
1182 DEVMETHOD(device_probe, sfxge_probe),
1183 DEVMETHOD(device_attach, sfxge_attach),
1184 DEVMETHOD(device_detach, sfxge_detach),
1189 static devclass_t sfxge_devclass;
1191 static driver_t sfxge_driver = {
1194 sizeof(struct sfxge_softc)
1197 DRIVER_MODULE(sfxge, pci, sfxge_driver, sfxge_devclass, 0, 0);