2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1997, 1998, 1999
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
40 * Programming manual is available from:
41 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Department of Electical Engineering
45 * Columbia University, New York City
48 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
49 * controller designed with flexibility and reducing CPU load in mind.
50 * The Starfire offers high and low priority buffer queues, a
51 * producer/consumer index mechanism and several different buffer
52 * queue and completion queue descriptor types. Any one of a number
53 * of different driver designs can be used, depending on system and
54 * OS requirements. This driver makes use of type2 transmit frame
55 * descriptors to take full advantage of fragmented packets buffers
56 * and two RX buffer queues prioritized on size (one queue for small
57 * frames that will fit into a single mbuf, another with full size
58 * mbuf clusters for everything else). The producer/consumer indexes
59 * and completion queues are also used.
61 * One downside to the Starfire has to do with alignment: buffer
62 * queues must be aligned on 256-byte boundaries, and receive buffers
63 * must be aligned on longword boundaries. The receive buffer alignment
64 * causes problems on the strict alignment architecture, where the
65 * packet payload should be longword aligned. There is no simple way
68 * For receive filtering, the Starfire offers 16 perfect filter slots
69 * and a 512-bit hash table.
71 * The Starfire has no internal transceiver, relying instead on an
72 * external MII-based transceiver. Accessing registers on external
73 * PHYs is done through a special register map rather than with the
74 * usual bitbang MDIO method.
76 * Acesssing the registers on the Starfire is a little tricky. The
77 * Starfire has a 512K internal register space. When programmed for
78 * PCI memory mapped mode, the entire register space can be accessed
79 * directly. However in I/O space mode, only 256 bytes are directly
80 * mapped into PCI I/O space. The other registers can be accessed
81 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
82 * registers inside the 256-byte I/O window.
85 #ifdef HAVE_KERNEL_OPTION_HEADERS
86 #include "opt_device_polling.h"
89 #include <sys/param.h>
90 #include <sys/systm.h>
92 #include <sys/endian.h>
93 #include <sys/kernel.h>
94 #include <sys/malloc.h>
97 #include <sys/module.h>
98 #include <sys/socket.h>
99 #include <sys/sockio.h>
100 #include <sys/sysctl.h>
104 #include <net/if_var.h>
105 #include <net/if_arp.h>
106 #include <net/ethernet.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_types.h>
110 #include <net/if_vlan_var.h>
112 #include <dev/mii/mii.h>
113 #include <dev/mii/miivar.h>
115 #include <dev/pci/pcireg.h>
116 #include <dev/pci/pcivar.h>
118 #include <machine/bus.h>
120 #include <dev/sf/if_sfreg.h>
121 #include <dev/sf/starfire_rx.h>
122 #include <dev/sf/starfire_tx.h>
124 /* "device miibus" required. See GENERIC if you get errors here. */
125 #include "miibus_if.h"
127 MODULE_DEPEND(sf, pci, 1, 1, 1);
128 MODULE_DEPEND(sf, ether, 1, 1, 1);
129 MODULE_DEPEND(sf, miibus, 1, 1, 1);
132 #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
133 /* Define this to activate partial TCP/UDP checksum offload. */
134 #undef SF_PARTIAL_CSUM_SUPPORT
136 static struct sf_type sf_devs[] = {
137 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
138 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
139 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
140 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
141 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
142 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
143 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
144 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
145 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
146 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
147 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
148 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
149 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
150 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
153 static int sf_probe(device_t);
154 static int sf_attach(device_t);
155 static int sf_detach(device_t);
156 static int sf_shutdown(device_t);
157 static int sf_suspend(device_t);
158 static int sf_resume(device_t);
159 static void sf_intr(void *);
160 static void sf_tick(void *);
161 static void sf_stats_update(struct sf_softc *);
162 #ifndef __NO_STRICT_ALIGNMENT
163 static __inline void sf_fixup_rx(struct mbuf *);
165 static int sf_rxeof(struct sf_softc *);
166 static void sf_txeof(struct sf_softc *);
167 static int sf_encap(struct sf_softc *, struct mbuf **);
168 static void sf_start(struct ifnet *);
169 static void sf_start_locked(struct ifnet *);
170 static int sf_ioctl(struct ifnet *, u_long, caddr_t);
171 static void sf_download_fw(struct sf_softc *);
172 static void sf_init(void *);
173 static void sf_init_locked(struct sf_softc *);
174 static void sf_stop(struct sf_softc *);
175 static void sf_watchdog(struct sf_softc *);
176 static int sf_ifmedia_upd(struct ifnet *);
177 static int sf_ifmedia_upd_locked(struct ifnet *);
178 static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
179 static void sf_reset(struct sf_softc *);
180 static int sf_dma_alloc(struct sf_softc *);
181 static void sf_dma_free(struct sf_softc *);
182 static int sf_init_rx_ring(struct sf_softc *);
183 static void sf_init_tx_ring(struct sf_softc *);
184 static int sf_newbuf(struct sf_softc *, int);
185 static void sf_rxfilter(struct sf_softc *);
186 static int sf_setperf(struct sf_softc *, int, uint8_t *);
187 static int sf_sethash(struct sf_softc *, caddr_t, int);
189 static int sf_setvlan(struct sf_softc *, int, uint32_t);
192 static uint8_t sf_read_eeprom(struct sf_softc *, int);
194 static int sf_miibus_readreg(device_t, int, int);
195 static int sf_miibus_writereg(device_t, int, int, int);
196 static void sf_miibus_statchg(device_t);
197 #ifdef DEVICE_POLLING
198 static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
201 static uint32_t csr_read_4(struct sf_softc *, int);
202 static void csr_write_4(struct sf_softc *, int, uint32_t);
203 static void sf_txthresh_adjust(struct sf_softc *);
204 static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
205 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
206 static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
208 static device_method_t sf_methods[] = {
209 /* Device interface */
210 DEVMETHOD(device_probe, sf_probe),
211 DEVMETHOD(device_attach, sf_attach),
212 DEVMETHOD(device_detach, sf_detach),
213 DEVMETHOD(device_shutdown, sf_shutdown),
214 DEVMETHOD(device_suspend, sf_suspend),
215 DEVMETHOD(device_resume, sf_resume),
218 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
219 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
220 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
225 static driver_t sf_driver = {
228 sizeof(struct sf_softc),
231 static devclass_t sf_devclass;
233 DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
234 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
236 #define SF_SETBIT(sc, reg, x) \
237 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
239 #define SF_CLRBIT(sc, reg, x) \
240 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
243 csr_read_4(struct sf_softc *sc, int reg)
247 if (sc->sf_restype == SYS_RES_MEMORY)
248 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
250 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
251 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
258 sf_read_eeprom(struct sf_softc *sc, int reg)
262 val = (csr_read_4(sc, SF_EEADDR_BASE +
263 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
269 csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
272 if (sc->sf_restype == SYS_RES_MEMORY)
273 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
275 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
276 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
281 * Copy the address 'mac' into the perfect RX filter entry at
282 * offset 'idx.' The perfect filter only has 16 entries so do
286 sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
289 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
295 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
296 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
297 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
298 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
299 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
300 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
306 * Set the bit in the 512-bit hash table that corresponds to the
307 * specified mac address 'mac.' If 'prio' is nonzero, update the
308 * priority hash table instead of the filter hash table.
311 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
318 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
324 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
325 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
333 * Set a VLAN tag in the receive filter.
336 sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
339 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
342 csr_write_4(sc, SF_RXFILT_HASH_BASE +
343 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
350 sf_miibus_readreg(device_t dev, int phy, int reg)
356 sc = device_get_softc(dev);
358 for (i = 0; i < SF_TIMEOUT; i++) {
359 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
360 if ((val & SF_MII_DATAVALID) != 0)
367 val &= SF_MII_DATAPORT;
375 sf_miibus_writereg(device_t dev, int phy, int reg, int val)
381 sc = device_get_softc(dev);
383 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
385 for (i = 0; i < SF_TIMEOUT; i++) {
386 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
387 if ((busy & SF_MII_BUSY) == 0)
395 sf_miibus_statchg(device_t dev)
398 struct mii_data *mii;
402 sc = device_get_softc(dev);
403 mii = device_get_softc(sc->sf_miibus);
405 if (mii == NULL || ifp == NULL ||
406 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
410 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
411 (IFM_ACTIVE | IFM_AVALID)) {
412 switch (IFM_SUBTYPE(mii->mii_media_active)) {
420 if (sc->sf_link == 0)
423 val = csr_read_4(sc, SF_MACCFG_1);
424 val &= ~SF_MACCFG1_FULLDUPLEX;
425 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
426 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
427 val |= SF_MACCFG1_FULLDUPLEX;
428 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
430 /* Configure flow-control bits. */
431 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
432 IFM_ETH_RXPAUSE) != 0)
433 val |= SF_MACCFG1_RX_FLOWENB;
434 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
435 IFM_ETH_TXPAUSE) != 0)
436 val |= SF_MACCFG1_TX_FLOWENB;
439 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
441 /* Make sure to reset MAC to take changes effect. */
442 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
444 csr_write_4(sc, SF_MACCFG_1, val);
446 val = csr_read_4(sc, SF_TIMER_CTL);
447 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
448 val |= SF_TIMER_TIMES_TEN;
450 val &= ~SF_TIMER_TIMES_TEN;
451 csr_write_4(sc, SF_TIMER_CTL, val);
455 sf_rxfilter(struct sf_softc *sc)
459 struct ifmultiaddr *ifma;
460 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
465 /* First zot all the existing filters. */
466 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
467 sf_setperf(sc, i, dummy);
468 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
469 i += sizeof(uint32_t))
470 csr_write_4(sc, i, 0);
472 rxfilt = csr_read_4(sc, SF_RXFILT);
473 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
474 if ((ifp->if_flags & IFF_BROADCAST) != 0)
475 rxfilt |= SF_RXFILT_BROAD;
476 if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
477 (ifp->if_flags & IFF_PROMISC) != 0) {
478 if ((ifp->if_flags & IFF_PROMISC) != 0)
479 rxfilt |= SF_RXFILT_PROMISC;
480 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
481 rxfilt |= SF_RXFILT_ALLMULTI;
485 /* Now program new ones. */
487 /* XXX how do we maintain reverse semantics without impl */
489 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs,
491 if (ifma->ifma_addr->sa_family != AF_LINK)
494 * Program the first 15 multicast groups
495 * into the perfect filter. For all others,
496 * use the hash table.
498 if (i < SF_RXFILT_PERFECT_CNT) {
500 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
506 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
508 if_maddr_runlock(ifp);
511 csr_write_4(sc, SF_RXFILT, rxfilt);
518 sf_ifmedia_upd(struct ifnet *ifp)
525 error = sf_ifmedia_upd_locked(ifp);
531 sf_ifmedia_upd_locked(struct ifnet *ifp)
534 struct mii_data *mii;
535 struct mii_softc *miisc;
538 mii = device_get_softc(sc->sf_miibus);
539 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
541 return (mii_mediachg(mii));
545 * Report current media status.
548 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
551 struct mii_data *mii;
555 if ((ifp->if_flags & IFF_UP) == 0) {
560 mii = device_get_softc(sc->sf_miibus);
562 ifmr->ifm_active = mii->mii_media_active;
563 ifmr->ifm_status = mii->mii_media_status;
568 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
572 struct mii_data *mii;
576 ifr = (struct ifreq *)data;
582 if (ifp->if_flags & IFF_UP) {
583 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
584 if ((ifp->if_flags ^ sc->sf_if_flags) &
585 (IFF_PROMISC | IFF_ALLMULTI))
588 if (sc->sf_detach == 0)
592 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
595 sc->sf_if_flags = ifp->if_flags;
601 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
607 mii = device_get_softc(sc->sf_miibus);
608 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
611 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
612 #ifdef DEVICE_POLLING
613 if ((mask & IFCAP_POLLING) != 0) {
614 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
615 error = ether_poll_register(sf_poll, ifp);
619 /* Disable interrupts. */
620 csr_write_4(sc, SF_IMR, 0);
621 ifp->if_capenable |= IFCAP_POLLING;
624 error = ether_poll_deregister(ifp);
625 /* Enable interrupts. */
627 csr_write_4(sc, SF_IMR, SF_INTRS);
628 ifp->if_capenable &= ~IFCAP_POLLING;
632 #endif /* DEVICE_POLLING */
633 if ((mask & IFCAP_TXCSUM) != 0) {
634 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
636 ifp->if_capenable ^= IFCAP_TXCSUM;
637 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
638 ifp->if_hwassist |= SF_CSUM_FEATURES;
639 SF_SETBIT(sc, SF_GEN_ETH_CTL,
640 SF_ETHCTL_TXGFP_ENB);
642 ifp->if_hwassist &= ~SF_CSUM_FEATURES;
643 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
644 SF_ETHCTL_TXGFP_ENB);
649 if ((mask & IFCAP_RXCSUM) != 0) {
650 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
652 ifp->if_capenable ^= IFCAP_RXCSUM;
653 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
654 SF_SETBIT(sc, SF_GEN_ETH_CTL,
655 SF_ETHCTL_RXGFP_ENB);
657 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
658 SF_ETHCTL_RXGFP_ENB);
664 error = ether_ioctl(ifp, command, data);
672 sf_reset(struct sf_softc *sc)
676 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
677 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
679 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
681 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
683 for (i = 0; i < SF_TIMEOUT; i++) {
685 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
690 device_printf(sc->sf_dev, "reset never completed!\n");
692 /* Wait a little while for the chip to get its brains in order. */
697 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
698 * IDs against our list and return a device name if we find a match.
699 * We also check the subsystem ID so that we can identify exactly which
700 * NIC has been found, if possible.
703 sf_probe(device_t dev)
711 vid = pci_get_vendor(dev);
712 did = pci_get_device(dev);
713 sdid = pci_get_subdevice(dev);
716 for (i = 0; i < nitems(sf_devs); i++, t++) {
717 if (vid == t->sf_vid && did == t->sf_did) {
718 if (sdid == t->sf_sdid) {
719 device_set_desc(dev, t->sf_sname);
720 return (BUS_PROBE_DEFAULT);
725 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
726 /* unknown subdevice */
727 device_set_desc(dev, sf_devs[0].sf_name);
728 return (BUS_PROBE_DEFAULT);
735 * Attach the interface. Allocate softc structures, do ifmedia
736 * setup and ethernet/BPF attach.
739 sf_attach(device_t dev)
746 uint8_t eaddr[ETHER_ADDR_LEN];
748 sc = device_get_softc(dev);
751 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
753 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
756 * Map control/status registers.
758 pci_enable_busmaster(dev);
761 * Prefer memory space register mapping over I/O space as the
762 * hardware requires lots of register access to get various
763 * producer/consumer index during Tx/Rx operation. However this
764 * requires large memory space(512K) to map the entire register
767 sc->sf_rid = PCIR_BAR(0);
768 sc->sf_restype = SYS_RES_MEMORY;
769 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
771 if (sc->sf_res == NULL) {
772 reg = pci_read_config(dev, PCIR_BAR(0), 4);
773 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
774 sc->sf_rid = PCIR_BAR(2);
776 sc->sf_rid = PCIR_BAR(1);
777 sc->sf_restype = SYS_RES_IOPORT;
778 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
779 &sc->sf_rid, RF_ACTIVE);
780 if (sc->sf_res == NULL) {
781 device_printf(dev, "couldn't allocate resources\n");
782 mtx_destroy(&sc->sf_mtx);
787 device_printf(dev, "using %s space register mapping\n",
788 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
790 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
793 * If cache line size is 0, MWI is not used at all, so set
794 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
798 device_printf(dev, "setting PCI cache line size to %u\n", reg);
799 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
802 device_printf(dev, "PCI cache line size : %u\n", reg);
805 reg = pci_read_config(dev, PCIR_COMMAND, 2);
806 reg |= PCIM_CMD_MWRICEN;
807 pci_write_config(dev, PCIR_COMMAND, reg, 2);
809 /* Allocate interrupt. */
811 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
812 RF_SHAREABLE | RF_ACTIVE);
814 if (sc->sf_irq == NULL) {
815 device_printf(dev, "couldn't map interrupt\n");
820 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
821 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
823 sf_sysctl_stats, "I", "Statistics");
825 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
826 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
828 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
829 "sf interrupt moderation");
830 /* Pull in device tunables. */
831 sc->sf_int_mod = SF_IM_DEFAULT;
832 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
833 "int_mod", &sc->sf_int_mod);
835 if (sc->sf_int_mod < SF_IM_MIN ||
836 sc->sf_int_mod > SF_IM_MAX) {
837 device_printf(dev, "int_mod value out of range; "
838 "using default: %d\n", SF_IM_DEFAULT);
839 sc->sf_int_mod = SF_IM_DEFAULT;
843 /* Reset the adapter. */
847 * Get station address from the EEPROM.
849 for (i = 0; i < ETHER_ADDR_LEN; i++)
851 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
853 /* Allocate DMA resources. */
854 if (sf_dma_alloc(sc) != 0) {
859 sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
861 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
863 device_printf(dev, "can not allocate ifnet structure\n");
869 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
870 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
872 device_printf(dev, "attaching PHYs failed\n");
877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879 ifp->if_ioctl = sf_ioctl;
880 ifp->if_start = sf_start;
881 ifp->if_init = sf_init;
882 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
883 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
884 IFQ_SET_READY(&ifp->if_snd);
886 * With the help of firmware, AIC-6915 supports
887 * Tx/Rx TCP/UDP checksum offload.
889 ifp->if_hwassist = SF_CSUM_FEATURES;
890 ifp->if_capabilities = IFCAP_HWCSUM;
893 * Call MI attach routine.
895 ether_ifattach(ifp, eaddr);
897 /* VLAN capability setup. */
898 ifp->if_capabilities |= IFCAP_VLAN_MTU;
899 ifp->if_capenable = ifp->if_capabilities;
900 #ifdef DEVICE_POLLING
901 ifp->if_capabilities |= IFCAP_POLLING;
904 * Tell the upper layer(s) we support long frames.
905 * Must appear after the call to ether_ifattach() because
906 * ether_ifattach() sets ifi_hdrlen to the default value.
908 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
910 /* Hook interrupt last to avoid having to lock softc */
911 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
912 NULL, sf_intr, sc, &sc->sf_intrhand);
915 device_printf(dev, "couldn't set up irq\n");
928 * Shutdown hardware and free up resources. This can be called any
929 * time after the mutex has been initialized. It is called in both
930 * the error case in attach and the normal detach case so it needs
931 * to be careful about only freeing resources that have actually been
935 sf_detach(device_t dev)
940 sc = device_get_softc(dev);
943 #ifdef DEVICE_POLLING
944 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
945 ether_poll_deregister(ifp);
948 /* These should only be active if attach succeeded */
949 if (device_is_attached(dev)) {
954 callout_drain(&sc->sf_co);
959 device_delete_child(dev, sc->sf_miibus);
960 sc->sf_miibus = NULL;
962 bus_generic_detach(dev);
964 if (sc->sf_intrhand != NULL)
965 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
966 if (sc->sf_irq != NULL)
967 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
968 if (sc->sf_res != NULL)
969 bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
976 mtx_destroy(&sc->sf_mtx);
981 struct sf_dmamap_arg {
982 bus_addr_t sf_busaddr;
986 sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
988 struct sf_dmamap_arg *ctx;
993 ctx->sf_busaddr = segs[0].ds_addr;
997 sf_dma_alloc(struct sf_softc *sc)
999 struct sf_dmamap_arg ctx;
1000 struct sf_txdesc *txd;
1001 struct sf_rxdesc *rxd;
1003 bus_addr_t rx_ring_end, rx_cring_end;
1004 bus_addr_t tx_ring_end, tx_cring_end;
1007 lowaddr = BUS_SPACE_MAXADDR;
1010 /* Create parent DMA tag. */
1011 error = bus_dma_tag_create(
1012 bus_get_dma_tag(sc->sf_dev), /* parent */
1013 1, 0, /* alignment, boundary */
1014 lowaddr, /* lowaddr */
1015 BUS_SPACE_MAXADDR, /* highaddr */
1016 NULL, NULL, /* filter, filterarg */
1017 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1019 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1021 NULL, NULL, /* lockfunc, lockarg */
1022 &sc->sf_cdata.sf_parent_tag);
1024 device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1027 /* Create tag for Tx ring. */
1028 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1029 SF_RING_ALIGN, 0, /* alignment, boundary */
1030 BUS_SPACE_MAXADDR, /* lowaddr */
1031 BUS_SPACE_MAXADDR, /* highaddr */
1032 NULL, NULL, /* filter, filterarg */
1033 SF_TX_DLIST_SIZE, /* maxsize */
1035 SF_TX_DLIST_SIZE, /* maxsegsize */
1037 NULL, NULL, /* lockfunc, lockarg */
1038 &sc->sf_cdata.sf_tx_ring_tag);
1040 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1044 /* Create tag for Tx completion ring. */
1045 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1046 SF_RING_ALIGN, 0, /* alignment, boundary */
1047 BUS_SPACE_MAXADDR, /* lowaddr */
1048 BUS_SPACE_MAXADDR, /* highaddr */
1049 NULL, NULL, /* filter, filterarg */
1050 SF_TX_CLIST_SIZE, /* maxsize */
1052 SF_TX_CLIST_SIZE, /* maxsegsize */
1054 NULL, NULL, /* lockfunc, lockarg */
1055 &sc->sf_cdata.sf_tx_cring_tag);
1057 device_printf(sc->sf_dev,
1058 "failed to create Tx completion ring DMA tag\n");
1062 /* Create tag for Rx ring. */
1063 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1064 SF_RING_ALIGN, 0, /* alignment, boundary */
1065 BUS_SPACE_MAXADDR, /* lowaddr */
1066 BUS_SPACE_MAXADDR, /* highaddr */
1067 NULL, NULL, /* filter, filterarg */
1068 SF_RX_DLIST_SIZE, /* maxsize */
1070 SF_RX_DLIST_SIZE, /* maxsegsize */
1072 NULL, NULL, /* lockfunc, lockarg */
1073 &sc->sf_cdata.sf_rx_ring_tag);
1075 device_printf(sc->sf_dev,
1076 "failed to create Rx ring DMA tag\n");
1080 /* Create tag for Rx completion ring. */
1081 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1082 SF_RING_ALIGN, 0, /* alignment, boundary */
1083 BUS_SPACE_MAXADDR, /* lowaddr */
1084 BUS_SPACE_MAXADDR, /* highaddr */
1085 NULL, NULL, /* filter, filterarg */
1086 SF_RX_CLIST_SIZE, /* maxsize */
1088 SF_RX_CLIST_SIZE, /* maxsegsize */
1090 NULL, NULL, /* lockfunc, lockarg */
1091 &sc->sf_cdata.sf_rx_cring_tag);
1093 device_printf(sc->sf_dev,
1094 "failed to create Rx completion ring DMA tag\n");
1098 /* Create tag for Tx buffers. */
1099 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1100 1, 0, /* alignment, boundary */
1101 BUS_SPACE_MAXADDR, /* lowaddr */
1102 BUS_SPACE_MAXADDR, /* highaddr */
1103 NULL, NULL, /* filter, filterarg */
1104 MCLBYTES * SF_MAXTXSEGS, /* maxsize */
1105 SF_MAXTXSEGS, /* nsegments */
1106 MCLBYTES, /* maxsegsize */
1108 NULL, NULL, /* lockfunc, lockarg */
1109 &sc->sf_cdata.sf_tx_tag);
1111 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1115 /* Create tag for Rx buffers. */
1116 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1117 SF_RX_ALIGN, 0, /* alignment, boundary */
1118 BUS_SPACE_MAXADDR, /* lowaddr */
1119 BUS_SPACE_MAXADDR, /* highaddr */
1120 NULL, NULL, /* filter, filterarg */
1121 MCLBYTES, /* maxsize */
1123 MCLBYTES, /* maxsegsize */
1125 NULL, NULL, /* lockfunc, lockarg */
1126 &sc->sf_cdata.sf_rx_tag);
1128 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1132 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1133 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1134 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1135 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1137 device_printf(sc->sf_dev,
1138 "failed to allocate DMA'able memory for Tx ring\n");
1143 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1144 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1145 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1146 if (error != 0 || ctx.sf_busaddr == 0) {
1147 device_printf(sc->sf_dev,
1148 "failed to load DMA'able memory for Tx ring\n");
1151 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1154 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1156 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1157 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1158 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1160 device_printf(sc->sf_dev,
1161 "failed to allocate DMA'able memory for "
1162 "Tx completion ring\n");
1167 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1168 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1169 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1170 if (error != 0 || ctx.sf_busaddr == 0) {
1171 device_printf(sc->sf_dev,
1172 "failed to load DMA'able memory for Tx completion ring\n");
1175 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1177 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1178 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1179 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1180 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1182 device_printf(sc->sf_dev,
1183 "failed to allocate DMA'able memory for Rx ring\n");
1188 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1189 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1190 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1191 if (error != 0 || ctx.sf_busaddr == 0) {
1192 device_printf(sc->sf_dev,
1193 "failed to load DMA'able memory for Rx ring\n");
1196 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1199 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1201 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1202 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1203 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1205 device_printf(sc->sf_dev,
1206 "failed to allocate DMA'able memory for "
1207 "Rx completion ring\n");
1212 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1213 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1214 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1215 if (error != 0 || ctx.sf_busaddr == 0) {
1216 device_printf(sc->sf_dev,
1217 "failed to load DMA'able memory for Rx completion ring\n");
1220 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1223 * Tx desciptor ring and Tx completion ring should be addressed in
1224 * the same 4GB space. The same rule applys to Rx ring and Rx
1225 * completion ring. Unfortunately there is no way to specify this
1226 * boundary restriction with bus_dma(9). So just try to allocate
1227 * without the restriction and check the restriction was satisfied.
1228 * If not, fall back to 32bit dma addressing mode which always
1229 * guarantees the restriction.
1231 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1232 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1233 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1234 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1235 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1236 SF_ADDR_HI(tx_cring_end)) ||
1237 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1238 SF_ADDR_HI(tx_ring_end)) ||
1239 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1240 SF_ADDR_HI(rx_cring_end)) ||
1241 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1242 SF_ADDR_HI(rx_ring_end))) {
1243 device_printf(sc->sf_dev,
1244 "switching to 32bit DMA mode\n");
1246 /* Limit DMA address space to 32bit and try again. */
1247 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1251 /* Create DMA maps for Tx buffers. */
1252 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1253 txd = &sc->sf_cdata.sf_txdesc[i];
1256 txd->tx_dmamap = NULL;
1257 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1260 device_printf(sc->sf_dev,
1261 "failed to create Tx dmamap\n");
1265 /* Create DMA maps for Rx buffers. */
1266 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1267 &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1268 device_printf(sc->sf_dev,
1269 "failed to create spare Rx dmamap\n");
1272 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1273 rxd = &sc->sf_cdata.sf_rxdesc[i];
1275 rxd->rx_dmamap = NULL;
1276 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1279 device_printf(sc->sf_dev,
1280 "failed to create Rx dmamap\n");
1290 sf_dma_free(struct sf_softc *sc)
1292 struct sf_txdesc *txd;
1293 struct sf_rxdesc *rxd;
1297 if (sc->sf_cdata.sf_tx_ring_tag) {
1298 if (sc->sf_rdata.sf_tx_ring_paddr)
1299 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1300 sc->sf_cdata.sf_tx_ring_map);
1301 if (sc->sf_rdata.sf_tx_ring)
1302 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1303 sc->sf_rdata.sf_tx_ring,
1304 sc->sf_cdata.sf_tx_ring_map);
1305 sc->sf_rdata.sf_tx_ring = NULL;
1306 sc->sf_rdata.sf_tx_ring_paddr = 0;
1307 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1308 sc->sf_cdata.sf_tx_ring_tag = NULL;
1310 /* Tx completion ring. */
1311 if (sc->sf_cdata.sf_tx_cring_tag) {
1312 if (sc->sf_rdata.sf_tx_cring_paddr)
1313 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1314 sc->sf_cdata.sf_tx_cring_map);
1315 if (sc->sf_rdata.sf_tx_cring)
1316 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1317 sc->sf_rdata.sf_tx_cring,
1318 sc->sf_cdata.sf_tx_cring_map);
1319 sc->sf_rdata.sf_tx_cring = NULL;
1320 sc->sf_rdata.sf_tx_cring_paddr = 0;
1321 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1322 sc->sf_cdata.sf_tx_cring_tag = NULL;
1325 if (sc->sf_cdata.sf_rx_ring_tag) {
1326 if (sc->sf_rdata.sf_rx_ring_paddr)
1327 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1328 sc->sf_cdata.sf_rx_ring_map);
1329 if (sc->sf_rdata.sf_rx_ring)
1330 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1331 sc->sf_rdata.sf_rx_ring,
1332 sc->sf_cdata.sf_rx_ring_map);
1333 sc->sf_rdata.sf_rx_ring = NULL;
1334 sc->sf_rdata.sf_rx_ring_paddr = 0;
1335 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1336 sc->sf_cdata.sf_rx_ring_tag = NULL;
1338 /* Rx completion ring. */
1339 if (sc->sf_cdata.sf_rx_cring_tag) {
1340 if (sc->sf_rdata.sf_rx_cring_paddr)
1341 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1342 sc->sf_cdata.sf_rx_cring_map);
1343 if (sc->sf_rdata.sf_rx_cring)
1344 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1345 sc->sf_rdata.sf_rx_cring,
1346 sc->sf_cdata.sf_rx_cring_map);
1347 sc->sf_rdata.sf_rx_cring = NULL;
1348 sc->sf_rdata.sf_rx_cring_paddr = 0;
1349 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1350 sc->sf_cdata.sf_rx_cring_tag = NULL;
1353 if (sc->sf_cdata.sf_tx_tag) {
1354 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1355 txd = &sc->sf_cdata.sf_txdesc[i];
1356 if (txd->tx_dmamap) {
1357 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1359 txd->tx_dmamap = NULL;
1362 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1363 sc->sf_cdata.sf_tx_tag = NULL;
1366 if (sc->sf_cdata.sf_rx_tag) {
1367 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1368 rxd = &sc->sf_cdata.sf_rxdesc[i];
1369 if (rxd->rx_dmamap) {
1370 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1372 rxd->rx_dmamap = NULL;
1375 if (sc->sf_cdata.sf_rx_sparemap) {
1376 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1377 sc->sf_cdata.sf_rx_sparemap);
1378 sc->sf_cdata.sf_rx_sparemap = 0;
1380 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1381 sc->sf_cdata.sf_rx_tag = NULL;
1384 if (sc->sf_cdata.sf_parent_tag) {
1385 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1386 sc->sf_cdata.sf_parent_tag = NULL;
1391 sf_init_rx_ring(struct sf_softc *sc)
1393 struct sf_ring_data *rd;
1396 sc->sf_cdata.sf_rxc_cons = 0;
1399 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1400 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1402 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1403 if (sf_newbuf(sc, i) != 0)
1407 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1408 sc->sf_cdata.sf_rx_cring_map,
1409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1410 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1411 sc->sf_cdata.sf_rx_ring_map,
1412 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418 sf_init_tx_ring(struct sf_softc *sc)
1420 struct sf_ring_data *rd;
1423 sc->sf_cdata.sf_tx_prod = 0;
1424 sc->sf_cdata.sf_tx_cnt = 0;
1425 sc->sf_cdata.sf_txc_cons = 0;
1428 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1429 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1430 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1431 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1432 sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1433 sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1435 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1437 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1438 sc->sf_cdata.sf_tx_ring_map,
1439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1440 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1441 sc->sf_cdata.sf_tx_cring_map,
1442 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1446 * Initialize an RX descriptor and attach an MBUF cluster.
1449 sf_newbuf(struct sf_softc *sc, int idx)
1451 struct sf_rx_rdesc *desc;
1452 struct sf_rxdesc *rxd;
1454 bus_dma_segment_t segs[1];
1458 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1461 m->m_len = m->m_pkthdr.len = MCLBYTES;
1462 m_adj(m, sizeof(uint32_t));
1464 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1465 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1469 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1471 rxd = &sc->sf_cdata.sf_rxdesc[idx];
1472 if (rxd->rx_m != NULL) {
1473 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1474 BUS_DMASYNC_POSTREAD);
1475 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1477 map = rxd->rx_dmamap;
1478 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1479 sc->sf_cdata.sf_rx_sparemap = map;
1480 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1481 BUS_DMASYNC_PREREAD);
1483 desc = &sc->sf_rdata.sf_rx_ring[idx];
1484 desc->sf_addr = htole64(segs[0].ds_addr);
1489 #ifndef __NO_STRICT_ALIGNMENT
1490 static __inline void
1491 sf_fixup_rx(struct mbuf *m)
1494 uint16_t *src, *dst;
1496 src = mtod(m, uint16_t *);
1499 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1502 m->m_data -= ETHER_ALIGN;
1507 * The starfire is programmed to use 'normal' mode for packet reception,
1508 * which means we use the consumer/producer model for both the buffer
1509 * descriptor queue and the completion descriptor queue. The only problem
1510 * with this is that it involves a lot of register accesses: we have to
1511 * read the RX completion consumer and producer indexes and the RX buffer
1512 * producer index, plus the RX completion consumer and RX buffer producer
1513 * indexes have to be updated. It would have been easier if Adaptec had
1514 * put each index in a separate register, especially given that the damn
1515 * NIC has a 512K register space.
1517 * In spite of all the lovely features that Adaptec crammed into the 6915,
1518 * it is marred by one truly stupid design flaw, which is that receive
1519 * buffer addresses must be aligned on a longword boundary. This forces
1520 * the packet payload to be unaligned, which is suboptimal on the x86 and
1521 * completely unusable on the Alpha. Our only recourse is to copy received
1522 * packets into properly aligned buffers before handing them off.
1525 sf_rxeof(struct sf_softc *sc)
1529 struct sf_rxdesc *rxd;
1530 struct sf_rx_rcdesc *cur_cmp;
1531 int cons, eidx, prog, rx_npkts;
1532 uint32_t status, status2;
1539 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1540 sc->sf_cdata.sf_rx_ring_map,
1541 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1542 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1543 sc->sf_cdata.sf_rx_cring_map,
1544 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1547 * To reduce register access, directly read Receive completion
1552 for (cons = sc->sf_cdata.sf_rxc_cons;
1553 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1554 SF_INC(cons, SF_RX_CLIST_CNT)) {
1555 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1556 status = le32toh(cur_cmp->sf_rx_status1);
1559 #ifdef DEVICE_POLLING
1560 if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1561 if (sc->rxcycles <= 0)
1567 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1568 rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1572 * Note, IFCOUNTER_IPACKETS and IFCOUNTER_IERRORS
1573 * are handled in sf_stats_update().
1575 if ((status & SF_RXSTAT1_OK) == 0) {
1576 cur_cmp->sf_rx_status1 = 0;
1580 if (sf_newbuf(sc, eidx) != 0) {
1581 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1582 cur_cmp->sf_rx_status1 = 0;
1586 /* AIC-6915 supports TCP/UDP checksum offload. */
1587 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1588 status2 = le32toh(cur_cmp->sf_rx_status2);
1590 * Sometimes AIC-6915 generates an interrupt to
1591 * warn RxGFP stall with bad checksum bit set
1592 * in status word. I'm not sure what conditioan
1593 * triggers it but recevied packet's checksum
1594 * was correct even though AIC-6915 does not
1595 * agree on this. This may be an indication of
1596 * firmware bug. To fix the issue, do not rely
1597 * on bad checksum bit in status word and let
1598 * upper layer verify integrity of received
1600 * Another nice feature of AIC-6915 is hardware
1601 * assistance of checksum calculation by
1602 * providing partial checksum value for received
1603 * frame. The partial checksum value can be used
1604 * to accelerate checksum computation for
1605 * fragmented TCP/UDP packets. Upper network
1606 * stack already takes advantage of the partial
1607 * checksum value in IP reassembly stage. But
1608 * I'm not sure the correctness of the partial
1609 * hardware checksum assistance as frequent
1610 * RxGFP stalls are seen on non-fragmented
1611 * frames. Due to the nature of the complexity
1612 * of checksum computation code in firmware it's
1613 * possible to see another bug in RxGFP so
1614 * ignore checksum assistance for fragmented
1615 * frames. This can be changed in future.
1617 if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1618 if ((status2 & (SF_RXSTAT2_TCP |
1619 SF_RXSTAT2_UDP)) != 0) {
1620 if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1621 m->m_pkthdr.csum_flags =
1624 m->m_pkthdr.csum_data = 0xffff;
1628 #ifdef SF_PARTIAL_CSUM_SUPPORT
1629 else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1630 if ((status2 & (SF_RXSTAT2_TCP |
1631 SF_RXSTAT2_UDP)) != 0) {
1632 if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1633 m->m_pkthdr.csum_flags =
1635 m->m_pkthdr.csum_data =
1637 SF_RX_CMPDESC_CSUM2);
1644 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1645 #ifndef __NO_STRICT_ALIGNMENT
1648 m->m_pkthdr.rcvif = ifp;
1651 (*ifp->if_input)(ifp, m);
1655 /* Clear completion status. */
1656 cur_cmp->sf_rx_status1 = 0;
1660 sc->sf_cdata.sf_rxc_cons = cons;
1661 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1662 sc->sf_cdata.sf_rx_ring_map,
1663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1664 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1665 sc->sf_cdata.sf_rx_cring_map,
1666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1668 /* Update Rx completion Q1 consumer index. */
1669 csr_write_4(sc, SF_CQ_CONSIDX,
1670 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1671 (cons & SF_CQ_CONSIDX_RXQ1));
1672 /* Update Rx descriptor Q1 ptr. */
1673 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1674 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1675 (eidx & SF_RXDQ_PRODIDX));
1681 * Read the transmit status from the completion queue and release
1682 * mbufs. Note that the buffer descriptor index in the completion
1683 * descriptor is an offset from the start of the transmit buffer
1684 * descriptor list in bytes. This is important because the manual
1685 * gives the impression that it should match the producer/consumer
1686 * index, which is the offset in 8 byte blocks.
1689 sf_txeof(struct sf_softc *sc)
1691 struct sf_txdesc *txd;
1692 struct sf_tx_rcdesc *cur_cmp;
1695 int cons, idx, prod;
1701 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1702 sc->sf_cdata.sf_tx_cring_map,
1703 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1705 cons = sc->sf_cdata.sf_txc_cons;
1706 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1710 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1711 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1712 status = le32toh(cur_cmp->sf_tx_status1);
1715 switch (status & SF_TX_CMPDESC_TYPE) {
1716 case SF_TXCMPTYPE_TX:
1717 /* Tx complete entry. */
1719 case SF_TXCMPTYPE_DMA:
1720 /* DMA complete entry. */
1721 idx = status & SF_TX_CMPDESC_IDX;
1722 idx = idx / sizeof(struct sf_tx_rdesc);
1724 * We don't need to check Tx status here.
1725 * SF_ISR_TX_LOFIFO intr would handle this.
1726 * Note, IFCOUNTER_OPACKETS, IFCOUNTER_COLLISIONS
1727 * and IFCOUNTER_OERROR are handled in
1728 * sf_stats_update().
1730 txd = &sc->sf_cdata.sf_txdesc[idx];
1731 if (txd->tx_m != NULL) {
1732 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1734 BUS_DMASYNC_POSTWRITE);
1735 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1740 sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1741 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1742 ("%s: Active Tx desc counter was garbled\n",
1745 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1748 /* It should not happen. */
1749 device_printf(sc->sf_dev,
1750 "unknown Tx completion type : 0x%08x : %d : %d\n",
1751 status, cons, prod);
1754 cur_cmp->sf_tx_status1 = 0;
1757 sc->sf_cdata.sf_txc_cons = cons;
1758 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1759 sc->sf_cdata.sf_tx_cring_map,
1760 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1762 if (sc->sf_cdata.sf_tx_cnt == 0)
1763 sc->sf_watchdog_timer = 0;
1765 /* Update Tx completion consumer index. */
1766 csr_write_4(sc, SF_CQ_CONSIDX,
1767 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1768 ((cons << 16) & 0xffff0000));
1772 sf_txthresh_adjust(struct sf_softc *sc)
1776 device_printf(sc->sf_dev, "Tx underrun -- ");
1777 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1778 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1779 /* Increase Tx threshold 256 bytes. */
1780 sc->sf_txthresh += 16;
1781 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1782 sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1783 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1784 txfctl |= sc->sf_txthresh;
1785 printf("increasing Tx threshold to %d bytes\n",
1786 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1787 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1792 #ifdef DEVICE_POLLING
1794 sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1796 struct sf_softc *sc;
1804 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1809 sc->rxcycles = count;
1810 rx_npkts = sf_rxeof(sc);
1812 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1813 sf_start_locked(ifp);
1815 if (cmd == POLL_AND_CHECK_STATUS) {
1816 /* Reading the ISR register clears all interrrupts. */
1817 status = csr_read_4(sc, SF_ISR);
1819 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1820 if ((status & SF_ISR_STATSOFLOW) != 0)
1821 sf_stats_update(sc);
1822 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1823 sf_txthresh_adjust(sc);
1824 else if ((status & SF_ISR_DMAERR) != 0) {
1825 device_printf(sc->sf_dev,
1826 "DMA error, resetting\n");
1827 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1831 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1832 sc->sf_statistics.sf_tx_gfp_stall++;
1834 device_printf(sc->sf_dev,
1835 "TxGFP is not responding!\n");
1837 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1838 sc->sf_statistics.sf_rx_gfp_stall++;
1840 device_printf(sc->sf_dev,
1841 "RxGFP is not responding!\n");
1850 #endif /* DEVICE_POLLING */
1855 struct sf_softc *sc;
1860 sc = (struct sf_softc *)arg;
1863 if (sc->sf_suspended != 0)
1866 /* Reading the ISR register clears all interrrupts. */
1867 status = csr_read_4(sc, SF_ISR);
1868 if (status == 0 || status == 0xffffffff ||
1869 (status & SF_ISR_PCIINT_ASSERTED) == 0)
1873 #ifdef DEVICE_POLLING
1874 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1878 /* Disable interrupts. */
1879 csr_write_4(sc, SF_IMR, 0x00000000);
1881 for (cnt = 32; (status & SF_INTRS) != 0;) {
1882 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1884 if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1887 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1888 SF_ISR_TX_QUEUEDONE)) != 0)
1891 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1892 if ((status & SF_ISR_STATSOFLOW) != 0)
1893 sf_stats_update(sc);
1894 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1895 sf_txthresh_adjust(sc);
1896 else if ((status & SF_ISR_DMAERR) != 0) {
1897 device_printf(sc->sf_dev,
1898 "DMA error, resetting\n");
1899 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1903 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1904 sc->sf_statistics.sf_tx_gfp_stall++;
1906 device_printf(sc->sf_dev,
1907 "TxGFP is not responding!\n");
1910 else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1911 sc->sf_statistics.sf_rx_gfp_stall++;
1913 device_printf(sc->sf_dev,
1914 "RxGFP is not responding!\n");
1918 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1919 sf_start_locked(ifp);
1922 /* Reading the ISR register clears all interrrupts. */
1923 status = csr_read_4(sc, SF_ISR);
1926 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1927 /* Re-enable interrupts. */
1928 csr_write_4(sc, SF_IMR, SF_INTRS);
1936 sf_download_fw(struct sf_softc *sc)
1943 * A FP instruction is composed of 48bits so we have to
1944 * write it with two parts.
1948 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1949 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1950 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1951 gfpinst = p[0] << 8 | p[1];
1952 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1953 p += SF_GFP_INST_BYTES;
1957 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1961 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1962 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1963 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1964 gfpinst = p[0] << 8 | p[1];
1965 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1966 p += SF_GFP_INST_BYTES;
1970 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1976 struct sf_softc *sc;
1978 sc = (struct sf_softc *)xsc;
1985 sf_init_locked(struct sf_softc *sc)
1988 uint8_t eaddr[ETHER_ADDR_LEN];
1994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1998 /* Reset the hardware to a known state. */
2001 /* Init all the receive filter registers */
2002 for (i = SF_RXFILT_PERFECT_BASE;
2003 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
2004 csr_write_4(sc, i, 0);
2006 /* Empty stats counter registers. */
2007 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2008 csr_write_4(sc, i, 0);
2010 /* Init our MAC address. */
2011 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2012 csr_write_4(sc, SF_PAR0,
2013 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2014 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2015 sf_setperf(sc, 0, eaddr);
2017 if (sf_init_rx_ring(sc) == ENOBUFS) {
2018 device_printf(sc->sf_dev,
2019 "initialization failed: no memory for rx buffers\n");
2024 sf_init_tx_ring(sc);
2027 * 16 perfect address filtering.
2028 * Hash only multicast destination address, Accept matching
2029 * frames regardless of VLAN ID.
2031 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2038 /* Init the completion queue indexes. */
2039 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2040 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2042 /* Init the RX completion queue. */
2043 addr = sc->sf_rdata.sf_rx_cring_paddr;
2044 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2045 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2046 if (SF_ADDR_HI(addr) != 0)
2047 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2048 /* Set RX completion queue type 2. */
2049 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2050 csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2053 * Init RX DMA control.
2054 * default RxHighPriority Threshold,
2055 * default RxBurstSize, 128bytes.
2057 SF_SETBIT(sc, SF_RXDMA_CTL,
2058 SF_RXDMA_REPORTBADPKTS |
2059 (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2062 /* Init the RX buffer descriptor queue. */
2063 addr = sc->sf_rdata.sf_rx_ring_paddr;
2064 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2065 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2067 /* Set RX queue buffer length. */
2068 csr_write_4(sc, SF_RXDQ_CTL_1,
2069 ((MCLBYTES - sizeof(uint32_t)) << 16) |
2070 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2072 if (SF_ADDR_HI(addr) != 0)
2073 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2074 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2075 csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2077 /* Init the TX completion queue */
2078 addr = sc->sf_rdata.sf_tx_cring_paddr;
2079 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2080 if (SF_ADDR_HI(addr) != 0)
2081 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2083 /* Init the TX buffer descriptor queue. */
2084 addr = sc->sf_rdata.sf_tx_ring_paddr;
2085 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2086 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2087 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2088 csr_write_4(sc, SF_TX_FRAMCTL,
2089 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2090 csr_write_4(sc, SF_TXDQ_CTL,
2091 SF_TXDMA_HIPRIO_THRESH << 24 |
2092 SF_TXSKIPLEN_0BYTES << 16 |
2093 SF_TXDDMA_BURST << 8 |
2094 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2095 if (SF_ADDR_HI(addr) != 0)
2096 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2098 /* Set VLAN Type register. */
2099 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2101 /* Set TxPause Timer. */
2102 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2104 /* Enable autopadding of short TX frames. */
2105 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2106 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2107 /* Make sure to reset MAC to take changes effect. */
2108 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2110 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2112 /* Enable PCI bus master. */
2113 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2115 /* Load StarFire firmware. */
2118 /* Intialize interrupt moderation. */
2119 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2120 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2122 #ifdef DEVICE_POLLING
2123 /* Disable interrupts if we are polling. */
2124 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2125 csr_write_4(sc, SF_IMR, 0x00000000);
2128 /* Enable interrupts. */
2129 csr_write_4(sc, SF_IMR, SF_INTRS);
2130 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2132 /* Enable the RX and TX engines. */
2133 csr_write_4(sc, SF_GEN_ETH_CTL,
2134 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2135 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2137 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2138 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2140 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2141 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2142 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2144 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2146 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2147 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2150 sf_ifmedia_upd_locked(ifp);
2152 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2156 sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2158 struct sf_txdesc *txd;
2159 struct sf_tx_rdesc *desc;
2162 bus_dma_segment_t txsegs[SF_MAXTXSEGS];
2163 int error, i, nsegs, prod, si;
2169 prod = sc->sf_cdata.sf_tx_prod;
2170 txd = &sc->sf_cdata.sf_txdesc[prod];
2171 map = txd->tx_dmamap;
2172 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2173 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2174 if (error == EFBIG) {
2175 m = m_collapse(*m_head, M_NOWAIT, SF_MAXTXSEGS);
2182 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2183 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2189 } else if (error != 0)
2197 /* Check number of available descriptors. */
2198 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2199 if (avail < nsegs) {
2200 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2204 if (prod + nsegs >= SF_TX_DLIST_CNT) {
2205 nskip = SF_TX_DLIST_CNT - prod - 1;
2206 if (avail < nsegs + nskip) {
2207 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2212 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2215 for (i = 0; i < nsegs; i++) {
2216 desc = &sc->sf_rdata.sf_tx_ring[prod];
2217 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2218 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2219 desc->sf_tx_reserved = 0;
2220 desc->sf_addr = htole64(txsegs[i].ds_addr);
2221 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2223 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2226 SF_INC(prod, SF_TX_DLIST_CNT);
2228 /* Update producer index. */
2229 sc->sf_cdata.sf_tx_prod = prod;
2230 sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2232 desc = &sc->sf_rdata.sf_tx_ring[si];
2233 /* Check TDP/UDP checksum offload request. */
2234 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2235 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2237 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2239 txd->tx_dmamap = map;
2241 txd->ndesc = nsegs + nskip;
2247 sf_start(struct ifnet *ifp)
2249 struct sf_softc *sc;
2253 sf_start_locked(ifp);
2258 sf_start_locked(struct ifnet *ifp)
2260 struct sf_softc *sc;
2261 struct mbuf *m_head;
2267 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2268 IFF_DRV_RUNNING || sc->sf_link == 0)
2272 * Since we don't know when descriptor wrap occurrs in advance
2273 * limit available number of active Tx descriptor counter to be
2274 * higher than maximum number of DMA segments allowed in driver.
2276 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2277 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2278 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2282 * Pack the data into the transmit ring. If we
2283 * don't have room, set the OACTIVE flag and wait
2284 * for the NIC to drain the ring.
2286 if (sf_encap(sc, &m_head)) {
2289 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2290 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2296 * If there's a BPF listener, bounce a copy of this frame
2299 ETHER_BPF_MTAP(ifp, m_head);
2303 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2304 sc->sf_cdata.sf_tx_ring_map,
2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2306 /* Kick transmit. */
2307 csr_write_4(sc, SF_TXDQ_PRODIDX,
2308 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2310 /* Set a timeout in case the chip goes out to lunch. */
2311 sc->sf_watchdog_timer = 5;
2316 sf_stop(struct sf_softc *sc)
2318 struct sf_txdesc *txd;
2319 struct sf_rxdesc *rxd;
2327 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2329 callout_stop(&sc->sf_co);
2330 sc->sf_watchdog_timer = 0;
2332 /* Reading the ISR register clears all interrrupts. */
2333 csr_read_4(sc, SF_ISR);
2334 /* Disable further interrupts. */
2335 csr_write_4(sc, SF_IMR, 0);
2337 /* Disable Tx/Rx egine. */
2338 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2340 /* Give hardware chance to drain active DMA cycles. */
2343 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2344 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2345 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2346 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2347 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2348 csr_write_4(sc, SF_TXCQ_CTL, 0);
2349 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2350 csr_write_4(sc, SF_TXDQ_CTL, 0);
2353 * Free RX and TX mbufs still in the queues.
2355 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2356 rxd = &sc->sf_cdata.sf_rxdesc[i];
2357 if (rxd->rx_m != NULL) {
2358 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2359 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2360 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2366 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2367 txd = &sc->sf_cdata.sf_txdesc[i];
2368 if (txd->tx_m != NULL) {
2369 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2370 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2371 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2383 struct sf_softc *sc;
2384 struct mii_data *mii;
2388 mii = device_get_softc(sc->sf_miibus);
2390 sf_stats_update(sc);
2392 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2396 * Note: it is important that this function not be interrupted. We
2397 * use a two-stage register access scheme: if we are interrupted in
2398 * between setting the indirect address register and reading from the
2399 * indirect data register, the contents of the address register could
2400 * be changed out from under us.
2403 sf_stats_update(struct sf_softc *sc)
2406 struct sf_stats now, *stats, *nstats;
2414 stats->sf_tx_frames =
2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2416 stats->sf_tx_single_colls =
2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2418 stats->sf_tx_multi_colls =
2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2420 stats->sf_tx_crcerrs =
2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2422 stats->sf_tx_bytes =
2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2424 stats->sf_tx_deferred =
2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2426 stats->sf_tx_late_colls =
2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2428 stats->sf_tx_pause_frames =
2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2430 stats->sf_tx_control_frames =
2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2432 stats->sf_tx_excess_colls =
2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2434 stats->sf_tx_excess_defer =
2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2436 stats->sf_tx_mcast_frames =
2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2438 stats->sf_tx_bcast_frames =
2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2440 stats->sf_tx_frames_lost =
2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2442 stats->sf_rx_frames =
2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2444 stats->sf_rx_crcerrs =
2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2446 stats->sf_rx_alignerrs =
2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2448 stats->sf_rx_bytes =
2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2450 stats->sf_rx_pause_frames =
2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2452 stats->sf_rx_control_frames =
2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2454 stats->sf_rx_unsup_control_frames =
2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2456 stats->sf_rx_giants =
2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2458 stats->sf_rx_runts =
2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2460 stats->sf_rx_jabbererrs =
2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2462 stats->sf_rx_fragments =
2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2464 stats->sf_rx_pkts_64 =
2465 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2466 stats->sf_rx_pkts_65_127 =
2467 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2468 stats->sf_rx_pkts_128_255 =
2469 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2470 stats->sf_rx_pkts_256_511 =
2471 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2472 stats->sf_rx_pkts_512_1023 =
2473 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2474 stats->sf_rx_pkts_1024_1518 =
2475 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2476 stats->sf_rx_frames_lost =
2477 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2478 /* Lower 16bits are valid. */
2479 stats->sf_tx_underruns =
2480 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2482 /* Empty stats counter registers. */
2483 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2484 csr_write_4(sc, i, 0);
2486 if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)stats->sf_tx_frames);
2488 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2489 (u_long)stats->sf_tx_single_colls +
2490 (u_long)stats->sf_tx_multi_colls);
2492 if_inc_counter(ifp, IFCOUNTER_OERRORS,
2493 (u_long)stats->sf_tx_excess_colls +
2494 (u_long)stats->sf_tx_excess_defer +
2495 (u_long)stats->sf_tx_frames_lost);
2497 if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)stats->sf_rx_frames);
2499 if_inc_counter(ifp, IFCOUNTER_IERRORS,
2500 (u_long)stats->sf_rx_crcerrs +
2501 (u_long)stats->sf_rx_alignerrs +
2502 (u_long)stats->sf_rx_giants +
2503 (u_long)stats->sf_rx_runts +
2504 (u_long)stats->sf_rx_jabbererrs +
2505 (u_long)stats->sf_rx_frames_lost);
2507 nstats = &sc->sf_statistics;
2509 nstats->sf_tx_frames += stats->sf_tx_frames;
2510 nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2511 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2512 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2513 nstats->sf_tx_bytes += stats->sf_tx_bytes;
2514 nstats->sf_tx_deferred += stats->sf_tx_deferred;
2515 nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2516 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2517 nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2518 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2519 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2520 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2521 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2522 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2523 nstats->sf_rx_frames += stats->sf_rx_frames;
2524 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2525 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2526 nstats->sf_rx_bytes += stats->sf_rx_bytes;
2527 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2528 nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2529 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2530 nstats->sf_rx_giants += stats->sf_rx_giants;
2531 nstats->sf_rx_runts += stats->sf_rx_runts;
2532 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2533 nstats->sf_rx_fragments += stats->sf_rx_fragments;
2534 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2535 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2536 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2537 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2538 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2539 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2540 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2541 nstats->sf_tx_underruns += stats->sf_tx_underruns;
2545 sf_watchdog(struct sf_softc *sc)
2551 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2556 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2557 if (sc->sf_link == 0) {
2559 if_printf(sc->sf_ifp, "watchdog timeout "
2562 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2563 sc->sf_cdata.sf_tx_cnt);
2565 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2568 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2569 sf_start_locked(ifp);
2573 sf_shutdown(device_t dev)
2575 struct sf_softc *sc;
2577 sc = device_get_softc(dev);
2587 sf_suspend(device_t dev)
2589 struct sf_softc *sc;
2591 sc = device_get_softc(dev);
2595 sc->sf_suspended = 1;
2596 bus_generic_suspend(dev);
2603 sf_resume(device_t dev)
2605 struct sf_softc *sc;
2608 sc = device_get_softc(dev);
2611 bus_generic_resume(dev);
2613 if ((ifp->if_flags & IFF_UP) != 0)
2616 sc->sf_suspended = 0;
2623 sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2625 struct sf_softc *sc;
2626 struct sf_stats *stats;
2631 error = sysctl_handle_int(oidp, &result, 0, req);
2633 if (error != 0 || req->newptr == NULL)
2639 sc = (struct sf_softc *)arg1;
2640 stats = &sc->sf_statistics;
2642 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2643 printf("Transmit good frames : %ju\n",
2644 (uintmax_t)stats->sf_tx_frames);
2645 printf("Transmit good octets : %ju\n",
2646 (uintmax_t)stats->sf_tx_bytes);
2647 printf("Transmit single collisions : %u\n",
2648 stats->sf_tx_single_colls);
2649 printf("Transmit multiple collisions : %u\n",
2650 stats->sf_tx_multi_colls);
2651 printf("Transmit late collisions : %u\n",
2652 stats->sf_tx_late_colls);
2653 printf("Transmit abort due to excessive collisions : %u\n",
2654 stats->sf_tx_excess_colls);
2655 printf("Transmit CRC errors : %u\n",
2656 stats->sf_tx_crcerrs);
2657 printf("Transmit deferrals : %u\n",
2658 stats->sf_tx_deferred);
2659 printf("Transmit abort due to excessive deferrals : %u\n",
2660 stats->sf_tx_excess_defer);
2661 printf("Transmit pause control frames : %u\n",
2662 stats->sf_tx_pause_frames);
2663 printf("Transmit control frames : %u\n",
2664 stats->sf_tx_control_frames);
2665 printf("Transmit good multicast frames : %u\n",
2666 stats->sf_tx_mcast_frames);
2667 printf("Transmit good broadcast frames : %u\n",
2668 stats->sf_tx_bcast_frames);
2669 printf("Transmit frames lost due to internal transmit errors : %u\n",
2670 stats->sf_tx_frames_lost);
2671 printf("Transmit FIFO underflows : %u\n",
2672 stats->sf_tx_underruns);
2673 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2674 printf("Receive good frames : %ju\n",
2675 (uint64_t)stats->sf_rx_frames);
2676 printf("Receive good octets : %ju\n",
2677 (uint64_t)stats->sf_rx_bytes);
2678 printf("Receive CRC errors : %u\n",
2679 stats->sf_rx_crcerrs);
2680 printf("Receive alignment errors : %u\n",
2681 stats->sf_rx_alignerrs);
2682 printf("Receive pause frames : %u\n",
2683 stats->sf_rx_pause_frames);
2684 printf("Receive control frames : %u\n",
2685 stats->sf_rx_control_frames);
2686 printf("Receive control frames with unsupported opcode : %u\n",
2687 stats->sf_rx_unsup_control_frames);
2688 printf("Receive frames too long : %u\n",
2689 stats->sf_rx_giants);
2690 printf("Receive frames too short : %u\n",
2691 stats->sf_rx_runts);
2692 printf("Receive frames jabber errors : %u\n",
2693 stats->sf_rx_jabbererrs);
2694 printf("Receive frames fragments : %u\n",
2695 stats->sf_rx_fragments);
2696 printf("Receive packets 64 bytes : %ju\n",
2697 (uint64_t)stats->sf_rx_pkts_64);
2698 printf("Receive packets 65 to 127 bytes : %ju\n",
2699 (uint64_t)stats->sf_rx_pkts_65_127);
2700 printf("Receive packets 128 to 255 bytes : %ju\n",
2701 (uint64_t)stats->sf_rx_pkts_128_255);
2702 printf("Receive packets 256 to 511 bytes : %ju\n",
2703 (uint64_t)stats->sf_rx_pkts_256_511);
2704 printf("Receive packets 512 to 1023 bytes : %ju\n",
2705 (uint64_t)stats->sf_rx_pkts_512_1023);
2706 printf("Receive packets 1024 to 1518 bytes : %ju\n",
2707 (uint64_t)stats->sf_rx_pkts_1024_1518);
2708 printf("Receive frames lost due to internal receive errors : %u\n",
2709 stats->sf_rx_frames_lost);
2710 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2716 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2722 value = *(int *)arg1;
2723 error = sysctl_handle_int(oidp, &value, 0, req);
2724 if (error || !req->newptr)
2726 if (value < low || value > high)
2728 *(int *)arg1 = value;
2734 sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2737 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));