2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.
74 * Acesssing the registers on the Starfire is a little tricky. The
75 * Starfire has a 512K internal register space. When programmed for
76 * PCI memory mapped mode, the entire register space can be accessed
77 * directly. However in I/O space mode, only 256 bytes are directly
78 * mapped into PCI I/O space. The other registers can be accessed
79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80 * registers inside the 256-byte I/O window.
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
87 #include <sys/param.h>
88 #include <sys/systm.h>
90 #include <sys/endian.h>
91 #include <sys/kernel.h>
92 #include <sys/malloc.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/sysctl.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_types.h>
107 #include <net/if_vlan_var.h>
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
115 #include <machine/bus.h>
117 #include <dev/sf/if_sfreg.h>
118 #include <dev/sf/starfire_rx.h>
119 #include <dev/sf/starfire_tx.h>
121 /* "device miibus" required. See GENERIC if you get errors here. */
122 #include "miibus_if.h"
124 MODULE_DEPEND(sf, pci, 1, 1, 1);
125 MODULE_DEPEND(sf, ether, 1, 1, 1);
126 MODULE_DEPEND(sf, miibus, 1, 1, 1);
129 #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
130 /* Define this to activate partial TCP/UDP checksum offload. */
131 #undef SF_PARTIAL_CSUM_SUPPORT
133 static struct sf_type sf_devs[] = {
134 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
135 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
136 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
137 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
138 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
139 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
140 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
141 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
142 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
143 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
144 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
145 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
146 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
147 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
150 static int sf_probe(device_t);
151 static int sf_attach(device_t);
152 static int sf_detach(device_t);
153 static int sf_shutdown(device_t);
154 static int sf_suspend(device_t);
155 static int sf_resume(device_t);
156 static void sf_intr(void *);
157 static void sf_tick(void *);
158 static void sf_stats_update(struct sf_softc *);
159 #ifndef __NO_STRICT_ALIGNMENT
160 static __inline void sf_fixup_rx(struct mbuf *);
162 static int sf_rxeof(struct sf_softc *);
163 static void sf_txeof(struct sf_softc *);
164 static int sf_encap(struct sf_softc *, struct mbuf **);
165 static void sf_start(struct ifnet *);
166 static void sf_start_locked(struct ifnet *);
167 static int sf_ioctl(struct ifnet *, u_long, caddr_t);
168 static void sf_download_fw(struct sf_softc *);
169 static void sf_init(void *);
170 static void sf_init_locked(struct sf_softc *);
171 static void sf_stop(struct sf_softc *);
172 static void sf_watchdog(struct sf_softc *);
173 static int sf_ifmedia_upd(struct ifnet *);
174 static int sf_ifmedia_upd_locked(struct ifnet *);
175 static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176 static void sf_reset(struct sf_softc *);
177 static int sf_dma_alloc(struct sf_softc *);
178 static void sf_dma_free(struct sf_softc *);
179 static int sf_init_rx_ring(struct sf_softc *);
180 static void sf_init_tx_ring(struct sf_softc *);
181 static int sf_newbuf(struct sf_softc *, int);
182 static void sf_rxfilter(struct sf_softc *);
183 static int sf_setperf(struct sf_softc *, int, uint8_t *);
184 static int sf_sethash(struct sf_softc *, caddr_t, int);
186 static int sf_setvlan(struct sf_softc *, int, uint32_t);
189 static uint8_t sf_read_eeprom(struct sf_softc *, int);
191 static int sf_miibus_readreg(device_t, int, int);
192 static int sf_miibus_writereg(device_t, int, int, int);
193 static void sf_miibus_statchg(device_t);
194 #ifdef DEVICE_POLLING
195 static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
198 static uint32_t csr_read_4(struct sf_softc *, int);
199 static void csr_write_4(struct sf_softc *, int, uint32_t);
200 static void sf_txthresh_adjust(struct sf_softc *);
201 static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
202 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
203 static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
205 static device_method_t sf_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, sf_probe),
208 DEVMETHOD(device_attach, sf_attach),
209 DEVMETHOD(device_detach, sf_detach),
210 DEVMETHOD(device_shutdown, sf_shutdown),
211 DEVMETHOD(device_suspend, sf_suspend),
212 DEVMETHOD(device_resume, sf_resume),
215 DEVMETHOD(miibus_readreg, sf_miibus_readreg),
216 DEVMETHOD(miibus_writereg, sf_miibus_writereg),
217 DEVMETHOD(miibus_statchg, sf_miibus_statchg),
222 static driver_t sf_driver = {
225 sizeof(struct sf_softc),
228 static devclass_t sf_devclass;
230 DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
231 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
233 #define SF_SETBIT(sc, reg, x) \
234 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
236 #define SF_CLRBIT(sc, reg, x) \
237 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
240 csr_read_4(struct sf_softc *sc, int reg)
244 if (sc->sf_restype == SYS_RES_MEMORY)
245 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
247 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
248 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
255 sf_read_eeprom(struct sf_softc *sc, int reg)
259 val = (csr_read_4(sc, SF_EEADDR_BASE +
260 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
266 csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
269 if (sc->sf_restype == SYS_RES_MEMORY)
270 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
272 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
273 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
278 * Copy the address 'mac' into the perfect RX filter entry at
279 * offset 'idx.' The perfect filter only has 16 entries so do
283 sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
286 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
292 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
293 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
294 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
295 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
297 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
303 * Set the bit in the 512-bit hash table that corresponds to the
304 * specified mac address 'mac.' If 'prio' is nonzero, update the
305 * priority hash table instead of the filter hash table.
308 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
315 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
318 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
319 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
330 * Set a VLAN tag in the receive filter.
333 sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
336 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
339 csr_write_4(sc, SF_RXFILT_HASH_BASE +
340 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
347 sf_miibus_readreg(device_t dev, int phy, int reg)
353 sc = device_get_softc(dev);
355 for (i = 0; i < SF_TIMEOUT; i++) {
356 val = csr_read_4(sc, SF_PHY_REG(phy, reg));
357 if ((val & SF_MII_DATAVALID) != 0)
364 val &= SF_MII_DATAPORT;
372 sf_miibus_writereg(device_t dev, int phy, int reg, int val)
378 sc = device_get_softc(dev);
380 csr_write_4(sc, SF_PHY_REG(phy, reg), val);
382 for (i = 0; i < SF_TIMEOUT; i++) {
383 busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
384 if ((busy & SF_MII_BUSY) == 0)
392 sf_miibus_statchg(device_t dev)
395 struct mii_data *mii;
399 sc = device_get_softc(dev);
400 mii = device_get_softc(sc->sf_miibus);
402 if (mii == NULL || ifp == NULL ||
403 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
406 if (mii->mii_media_status & IFM_ACTIVE) {
407 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
412 val = csr_read_4(sc, SF_MACCFG_1);
413 val &= ~SF_MACCFG1_FULLDUPLEX;
414 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
415 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
416 val |= SF_MACCFG1_FULLDUPLEX;
417 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
419 /* Configure flow-control bits. */
420 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
421 IFM_ETH_RXPAUSE) != 0)
422 val |= SF_MACCFG1_RX_FLOWENB;
423 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
424 IFM_ETH_TXPAUSE) != 0)
425 val |= SF_MACCFG1_TX_FLOWENB;
428 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
430 /* Make sure to reset MAC to take changes effect. */
431 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
433 csr_write_4(sc, SF_MACCFG_1, val);
435 val = csr_read_4(sc, SF_TIMER_CTL);
436 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
437 val |= SF_TIMER_TIMES_TEN;
439 val &= ~SF_TIMER_TIMES_TEN;
440 csr_write_4(sc, SF_TIMER_CTL, val);
444 sf_rxfilter(struct sf_softc *sc)
448 struct ifmultiaddr *ifma;
449 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
454 /* First zot all the existing filters. */
455 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
456 sf_setperf(sc, i, dummy);
457 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
458 i += sizeof(uint32_t))
459 csr_write_4(sc, i, 0);
461 rxfilt = csr_read_4(sc, SF_RXFILT);
462 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
463 if ((ifp->if_flags & IFF_BROADCAST) != 0)
464 rxfilt |= SF_RXFILT_BROAD;
465 if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
466 (ifp->if_flags & IFF_PROMISC) != 0) {
467 if ((ifp->if_flags & IFF_PROMISC) != 0)
468 rxfilt |= SF_RXFILT_PROMISC;
469 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
470 rxfilt |= SF_RXFILT_ALLMULTI;
474 /* Now program new ones. */
477 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
479 if (ifma->ifma_addr->sa_family != AF_LINK)
482 * Program the first 15 multicast groups
483 * into the perfect filter. For all others,
484 * use the hash table.
486 if (i < SF_RXFILT_PERFECT_CNT) {
488 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
494 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
496 if_maddr_runlock(ifp);
499 csr_write_4(sc, SF_RXFILT, rxfilt);
506 sf_ifmedia_upd(struct ifnet *ifp)
513 error = sf_ifmedia_upd_locked(ifp);
519 sf_ifmedia_upd_locked(struct ifnet *ifp)
522 struct mii_data *mii;
523 struct mii_softc *miisc;
526 mii = device_get_softc(sc->sf_miibus);
527 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
528 mii_phy_reset(miisc);
529 return (mii_mediachg(mii));
533 * Report current media status.
536 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
539 struct mii_data *mii;
543 if ((ifp->if_flags & IFF_UP) == 0) {
548 mii = device_get_softc(sc->sf_miibus);
550 ifmr->ifm_active = mii->mii_media_active;
551 ifmr->ifm_status = mii->mii_media_status;
556 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
560 struct mii_data *mii;
564 ifr = (struct ifreq *)data;
570 if (ifp->if_flags & IFF_UP) {
571 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
572 if ((ifp->if_flags ^ sc->sf_if_flags) &
573 (IFF_PROMISC | IFF_ALLMULTI))
576 if (sc->sf_detach == 0)
580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
583 sc->sf_if_flags = ifp->if_flags;
589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
595 mii = device_get_softc(sc->sf_miibus);
596 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
599 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
600 #ifdef DEVICE_POLLING
601 if ((mask & IFCAP_POLLING) != 0) {
602 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
603 error = ether_poll_register(sf_poll, ifp);
607 /* Disable interrupts. */
608 csr_write_4(sc, SF_IMR, 0);
609 ifp->if_capenable |= IFCAP_POLLING;
612 error = ether_poll_deregister(ifp);
613 /* Enable interrupts. */
615 csr_write_4(sc, SF_IMR, SF_INTRS);
616 ifp->if_capenable &= ~IFCAP_POLLING;
620 #endif /* DEVICE_POLLING */
621 if ((mask & IFCAP_TXCSUM) != 0) {
622 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
624 ifp->if_capenable ^= IFCAP_TXCSUM;
625 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
626 ifp->if_hwassist |= SF_CSUM_FEATURES;
627 SF_SETBIT(sc, SF_GEN_ETH_CTL,
628 SF_ETHCTL_TXGFP_ENB);
630 ifp->if_hwassist &= ~SF_CSUM_FEATURES;
631 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
632 SF_ETHCTL_TXGFP_ENB);
637 if ((mask & IFCAP_RXCSUM) != 0) {
638 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
640 ifp->if_capenable ^= IFCAP_RXCSUM;
641 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
642 SF_SETBIT(sc, SF_GEN_ETH_CTL,
643 SF_ETHCTL_RXGFP_ENB);
645 SF_CLRBIT(sc, SF_GEN_ETH_CTL,
646 SF_ETHCTL_RXGFP_ENB);
652 error = ether_ioctl(ifp, command, data);
660 sf_reset(struct sf_softc *sc)
664 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
665 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
667 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
669 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
671 for (i = 0; i < SF_TIMEOUT; i++) {
673 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
678 device_printf(sc->sf_dev, "reset never completed!\n");
680 /* Wait a little while for the chip to get its brains in order. */
685 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
686 * IDs against our list and return a device name if we find a match.
687 * We also check the subsystem ID so that we can identify exactly which
688 * NIC has been found, if possible.
691 sf_probe(device_t dev)
699 vid = pci_get_vendor(dev);
700 did = pci_get_device(dev);
701 sdid = pci_get_subdevice(dev);
704 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
705 if (vid == t->sf_vid && did == t->sf_did) {
706 if (sdid == t->sf_sdid) {
707 device_set_desc(dev, t->sf_sname);
708 return (BUS_PROBE_DEFAULT);
713 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
714 /* unkown subdevice */
715 device_set_desc(dev, sf_devs[0].sf_name);
716 return (BUS_PROBE_DEFAULT);
723 * Attach the interface. Allocate softc structures, do ifmedia
724 * setup and ethernet/BPF attach.
727 sf_attach(device_t dev)
734 uint8_t eaddr[ETHER_ADDR_LEN];
736 sc = device_get_softc(dev);
739 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
741 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
744 * Map control/status registers.
746 pci_enable_busmaster(dev);
749 * Prefer memory space register mapping over I/O space as the
750 * hardware requires lots of register access to get various
751 * producer/consumer index during Tx/Rx operation. However this
752 * requires large memory space(512K) to map the entire register
755 sc->sf_rid = PCIR_BAR(0);
756 sc->sf_restype = SYS_RES_MEMORY;
757 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
759 if (sc->sf_res == NULL) {
760 reg = pci_read_config(dev, PCIR_BAR(0), 4);
761 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
762 sc->sf_rid = PCIR_BAR(2);
764 sc->sf_rid = PCIR_BAR(1);
765 sc->sf_restype = SYS_RES_IOPORT;
766 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
767 &sc->sf_rid, RF_ACTIVE);
768 if (sc->sf_res == NULL) {
769 device_printf(dev, "couldn't allocate resources\n");
770 mtx_destroy(&sc->sf_mtx);
775 device_printf(dev, "using %s space register mapping\n",
776 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
778 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
781 * If cache line size is 0, MWI is not used at all, so set
782 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
786 device_printf(dev, "setting PCI cache line size to %u\n", reg);
787 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
790 device_printf(dev, "PCI cache line size : %u\n", reg);
793 reg = pci_read_config(dev, PCIR_COMMAND, 2);
794 reg |= PCIM_CMD_MWRICEN;
795 pci_write_config(dev, PCIR_COMMAND, reg, 2);
797 /* Allocate interrupt. */
799 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
800 RF_SHAREABLE | RF_ACTIVE);
802 if (sc->sf_irq == NULL) {
803 device_printf(dev, "couldn't map interrupt\n");
808 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
809 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
810 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
811 sf_sysctl_stats, "I", "Statistics");
813 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
814 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
815 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
816 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
817 "sf interrupt moderation");
818 /* Pull in device tunables. */
819 sc->sf_int_mod = SF_IM_DEFAULT;
820 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
821 "int_mod", &sc->sf_int_mod);
823 if (sc->sf_int_mod < SF_IM_MIN ||
824 sc->sf_int_mod > SF_IM_MAX) {
825 device_printf(dev, "int_mod value out of range; "
826 "using default: %d\n", SF_IM_DEFAULT);
827 sc->sf_int_mod = SF_IM_DEFAULT;
831 /* Reset the adapter. */
835 * Get station address from the EEPROM.
837 for (i = 0; i < ETHER_ADDR_LEN; i++)
839 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
841 /* Allocate DMA resources. */
842 if (sf_dma_alloc(sc) != 0) {
847 sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
849 ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
851 device_printf(dev, "can not allocate ifnet structure\n");
857 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
858 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
860 device_printf(dev, "attaching PHYs failed\n");
865 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
866 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
867 ifp->if_ioctl = sf_ioctl;
868 ifp->if_start = sf_start;
869 ifp->if_init = sf_init;
870 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
871 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
872 IFQ_SET_READY(&ifp->if_snd);
874 * With the help of firmware, AIC-6915 supports
875 * Tx/Rx TCP/UDP checksum offload.
877 ifp->if_hwassist = SF_CSUM_FEATURES;
878 ifp->if_capabilities = IFCAP_HWCSUM;
881 * Call MI attach routine.
883 ether_ifattach(ifp, eaddr);
885 /* VLAN capability setup. */
886 ifp->if_capabilities |= IFCAP_VLAN_MTU;
887 ifp->if_capenable = ifp->if_capabilities;
888 #ifdef DEVICE_POLLING
889 ifp->if_capabilities |= IFCAP_POLLING;
892 * Tell the upper layer(s) we support long frames.
893 * Must appear after the call to ether_ifattach() because
894 * ether_ifattach() sets ifi_hdrlen to the default value.
896 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
898 /* Hook interrupt last to avoid having to lock softc */
899 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
900 NULL, sf_intr, sc, &sc->sf_intrhand);
903 device_printf(dev, "couldn't set up irq\n");
916 * Shutdown hardware and free up resources. This can be called any
917 * time after the mutex has been initialized. It is called in both
918 * the error case in attach and the normal detach case so it needs
919 * to be careful about only freeing resources that have actually been
923 sf_detach(device_t dev)
928 sc = device_get_softc(dev);
931 #ifdef DEVICE_POLLING
932 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
933 ether_poll_deregister(ifp);
936 /* These should only be active if attach succeeded */
937 if (device_is_attached(dev)) {
942 callout_drain(&sc->sf_co);
947 device_delete_child(dev, sc->sf_miibus);
948 sc->sf_miibus = NULL;
950 bus_generic_detach(dev);
952 if (sc->sf_intrhand != NULL)
953 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
954 if (sc->sf_irq != NULL)
955 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
956 if (sc->sf_res != NULL)
957 bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
964 mtx_destroy(&sc->sf_mtx);
969 struct sf_dmamap_arg {
970 bus_addr_t sf_busaddr;
974 sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
976 struct sf_dmamap_arg *ctx;
981 ctx->sf_busaddr = segs[0].ds_addr;
985 sf_dma_alloc(struct sf_softc *sc)
987 struct sf_dmamap_arg ctx;
988 struct sf_txdesc *txd;
989 struct sf_rxdesc *rxd;
991 bus_addr_t rx_ring_end, rx_cring_end;
992 bus_addr_t tx_ring_end, tx_cring_end;
995 lowaddr = BUS_SPACE_MAXADDR;
998 /* Create parent DMA tag. */
999 error = bus_dma_tag_create(
1000 bus_get_dma_tag(sc->sf_dev), /* parent */
1001 1, 0, /* alignment, boundary */
1002 lowaddr, /* lowaddr */
1003 BUS_SPACE_MAXADDR, /* highaddr */
1004 NULL, NULL, /* filter, filterarg */
1005 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1007 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1009 NULL, NULL, /* lockfunc, lockarg */
1010 &sc->sf_cdata.sf_parent_tag);
1012 device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1015 /* Create tag for Tx ring. */
1016 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1017 SF_RING_ALIGN, 0, /* alignment, boundary */
1018 BUS_SPACE_MAXADDR, /* lowaddr */
1019 BUS_SPACE_MAXADDR, /* highaddr */
1020 NULL, NULL, /* filter, filterarg */
1021 SF_TX_DLIST_SIZE, /* maxsize */
1023 SF_TX_DLIST_SIZE, /* maxsegsize */
1025 NULL, NULL, /* lockfunc, lockarg */
1026 &sc->sf_cdata.sf_tx_ring_tag);
1028 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1032 /* Create tag for Tx completion ring. */
1033 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1034 SF_RING_ALIGN, 0, /* alignment, boundary */
1035 BUS_SPACE_MAXADDR, /* lowaddr */
1036 BUS_SPACE_MAXADDR, /* highaddr */
1037 NULL, NULL, /* filter, filterarg */
1038 SF_TX_CLIST_SIZE, /* maxsize */
1040 SF_TX_CLIST_SIZE, /* maxsegsize */
1042 NULL, NULL, /* lockfunc, lockarg */
1043 &sc->sf_cdata.sf_tx_cring_tag);
1045 device_printf(sc->sf_dev,
1046 "failed to create Tx completion ring DMA tag\n");
1050 /* Create tag for Rx ring. */
1051 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1052 SF_RING_ALIGN, 0, /* alignment, boundary */
1053 BUS_SPACE_MAXADDR, /* lowaddr */
1054 BUS_SPACE_MAXADDR, /* highaddr */
1055 NULL, NULL, /* filter, filterarg */
1056 SF_RX_DLIST_SIZE, /* maxsize */
1058 SF_RX_DLIST_SIZE, /* maxsegsize */
1060 NULL, NULL, /* lockfunc, lockarg */
1061 &sc->sf_cdata.sf_rx_ring_tag);
1063 device_printf(sc->sf_dev,
1064 "failed to create Rx ring DMA tag\n");
1068 /* Create tag for Rx completion ring. */
1069 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1070 SF_RING_ALIGN, 0, /* alignment, boundary */
1071 BUS_SPACE_MAXADDR, /* lowaddr */
1072 BUS_SPACE_MAXADDR, /* highaddr */
1073 NULL, NULL, /* filter, filterarg */
1074 SF_RX_CLIST_SIZE, /* maxsize */
1076 SF_RX_CLIST_SIZE, /* maxsegsize */
1078 NULL, NULL, /* lockfunc, lockarg */
1079 &sc->sf_cdata.sf_rx_cring_tag);
1081 device_printf(sc->sf_dev,
1082 "failed to create Rx completion ring DMA tag\n");
1086 /* Create tag for Tx buffers. */
1087 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1088 1, 0, /* alignment, boundary */
1089 BUS_SPACE_MAXADDR, /* lowaddr */
1090 BUS_SPACE_MAXADDR, /* highaddr */
1091 NULL, NULL, /* filter, filterarg */
1092 MCLBYTES * SF_MAXTXSEGS, /* maxsize */
1093 SF_MAXTXSEGS, /* nsegments */
1094 MCLBYTES, /* maxsegsize */
1096 NULL, NULL, /* lockfunc, lockarg */
1097 &sc->sf_cdata.sf_tx_tag);
1099 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1103 /* Create tag for Rx buffers. */
1104 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1105 SF_RX_ALIGN, 0, /* alignment, boundary */
1106 BUS_SPACE_MAXADDR, /* lowaddr */
1107 BUS_SPACE_MAXADDR, /* highaddr */
1108 NULL, NULL, /* filter, filterarg */
1109 MCLBYTES, /* maxsize */
1111 MCLBYTES, /* maxsegsize */
1113 NULL, NULL, /* lockfunc, lockarg */
1114 &sc->sf_cdata.sf_rx_tag);
1116 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1120 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1121 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1122 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1123 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1125 device_printf(sc->sf_dev,
1126 "failed to allocate DMA'able memory for Tx ring\n");
1131 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1132 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1133 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1134 if (error != 0 || ctx.sf_busaddr == 0) {
1135 device_printf(sc->sf_dev,
1136 "failed to load DMA'able memory for Tx ring\n");
1139 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1142 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1144 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1145 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1146 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1148 device_printf(sc->sf_dev,
1149 "failed to allocate DMA'able memory for "
1150 "Tx completion ring\n");
1155 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1156 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1157 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1158 if (error != 0 || ctx.sf_busaddr == 0) {
1159 device_printf(sc->sf_dev,
1160 "failed to load DMA'able memory for Tx completion ring\n");
1163 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1165 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1166 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1167 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1168 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1170 device_printf(sc->sf_dev,
1171 "failed to allocate DMA'able memory for Rx ring\n");
1176 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1177 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1178 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1179 if (error != 0 || ctx.sf_busaddr == 0) {
1180 device_printf(sc->sf_dev,
1181 "failed to load DMA'able memory for Rx ring\n");
1184 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1187 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1189 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1190 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1191 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1193 device_printf(sc->sf_dev,
1194 "failed to allocate DMA'able memory for "
1195 "Rx completion ring\n");
1200 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1201 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1202 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1203 if (error != 0 || ctx.sf_busaddr == 0) {
1204 device_printf(sc->sf_dev,
1205 "failed to load DMA'able memory for Rx completion ring\n");
1208 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1211 * Tx desciptor ring and Tx completion ring should be addressed in
1212 * the same 4GB space. The same rule applys to Rx ring and Rx
1213 * completion ring. Unfortunately there is no way to specify this
1214 * boundary restriction with bus_dma(9). So just try to allocate
1215 * without the restriction and check the restriction was satisfied.
1216 * If not, fall back to 32bit dma addressing mode which always
1217 * guarantees the restriction.
1219 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1220 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1221 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1222 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1223 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1224 SF_ADDR_HI(tx_cring_end)) ||
1225 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1226 SF_ADDR_HI(tx_ring_end)) ||
1227 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1228 SF_ADDR_HI(rx_cring_end)) ||
1229 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1230 SF_ADDR_HI(rx_ring_end))) {
1231 device_printf(sc->sf_dev,
1232 "switching to 32bit DMA mode\n");
1234 /* Limit DMA address space to 32bit and try again. */
1235 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1239 /* Create DMA maps for Tx buffers. */
1240 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1241 txd = &sc->sf_cdata.sf_txdesc[i];
1244 txd->tx_dmamap = NULL;
1245 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1248 device_printf(sc->sf_dev,
1249 "failed to create Tx dmamap\n");
1253 /* Create DMA maps for Rx buffers. */
1254 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1255 &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1256 device_printf(sc->sf_dev,
1257 "failed to create spare Rx dmamap\n");
1260 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1261 rxd = &sc->sf_cdata.sf_rxdesc[i];
1263 rxd->rx_dmamap = NULL;
1264 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1267 device_printf(sc->sf_dev,
1268 "failed to create Rx dmamap\n");
1278 sf_dma_free(struct sf_softc *sc)
1280 struct sf_txdesc *txd;
1281 struct sf_rxdesc *rxd;
1285 if (sc->sf_cdata.sf_tx_ring_tag) {
1286 if (sc->sf_cdata.sf_tx_ring_map)
1287 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1288 sc->sf_cdata.sf_tx_ring_map);
1289 if (sc->sf_cdata.sf_tx_ring_map &&
1290 sc->sf_rdata.sf_tx_ring)
1291 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1292 sc->sf_rdata.sf_tx_ring,
1293 sc->sf_cdata.sf_tx_ring_map);
1294 sc->sf_rdata.sf_tx_ring = NULL;
1295 sc->sf_cdata.sf_tx_ring_map = NULL;
1296 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1297 sc->sf_cdata.sf_tx_ring_tag = NULL;
1299 /* Tx completion ring. */
1300 if (sc->sf_cdata.sf_tx_cring_tag) {
1301 if (sc->sf_cdata.sf_tx_cring_map)
1302 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1303 sc->sf_cdata.sf_tx_cring_map);
1304 if (sc->sf_cdata.sf_tx_cring_map &&
1305 sc->sf_rdata.sf_tx_cring)
1306 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1307 sc->sf_rdata.sf_tx_cring,
1308 sc->sf_cdata.sf_tx_cring_map);
1309 sc->sf_rdata.sf_tx_cring = NULL;
1310 sc->sf_cdata.sf_tx_cring_map = NULL;
1311 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1312 sc->sf_cdata.sf_tx_cring_tag = NULL;
1315 if (sc->sf_cdata.sf_rx_ring_tag) {
1316 if (sc->sf_cdata.sf_rx_ring_map)
1317 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1318 sc->sf_cdata.sf_rx_ring_map);
1319 if (sc->sf_cdata.sf_rx_ring_map &&
1320 sc->sf_rdata.sf_rx_ring)
1321 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1322 sc->sf_rdata.sf_rx_ring,
1323 sc->sf_cdata.sf_rx_ring_map);
1324 sc->sf_rdata.sf_rx_ring = NULL;
1325 sc->sf_cdata.sf_rx_ring_map = NULL;
1326 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1327 sc->sf_cdata.sf_rx_ring_tag = NULL;
1329 /* Rx completion ring. */
1330 if (sc->sf_cdata.sf_rx_cring_tag) {
1331 if (sc->sf_cdata.sf_rx_cring_map)
1332 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1333 sc->sf_cdata.sf_rx_cring_map);
1334 if (sc->sf_cdata.sf_rx_cring_map &&
1335 sc->sf_rdata.sf_rx_cring)
1336 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1337 sc->sf_rdata.sf_rx_cring,
1338 sc->sf_cdata.sf_rx_cring_map);
1339 sc->sf_rdata.sf_rx_cring = NULL;
1340 sc->sf_cdata.sf_rx_cring_map = NULL;
1341 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1342 sc->sf_cdata.sf_rx_cring_tag = NULL;
1345 if (sc->sf_cdata.sf_tx_tag) {
1346 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1347 txd = &sc->sf_cdata.sf_txdesc[i];
1348 if (txd->tx_dmamap) {
1349 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1351 txd->tx_dmamap = NULL;
1354 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1355 sc->sf_cdata.sf_tx_tag = NULL;
1358 if (sc->sf_cdata.sf_rx_tag) {
1359 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1360 rxd = &sc->sf_cdata.sf_rxdesc[i];
1361 if (rxd->rx_dmamap) {
1362 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1364 rxd->rx_dmamap = NULL;
1367 if (sc->sf_cdata.sf_rx_sparemap) {
1368 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1369 sc->sf_cdata.sf_rx_sparemap);
1370 sc->sf_cdata.sf_rx_sparemap = 0;
1372 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1373 sc->sf_cdata.sf_rx_tag = NULL;
1376 if (sc->sf_cdata.sf_parent_tag) {
1377 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1378 sc->sf_cdata.sf_parent_tag = NULL;
1383 sf_init_rx_ring(struct sf_softc *sc)
1385 struct sf_ring_data *rd;
1388 sc->sf_cdata.sf_rxc_cons = 0;
1391 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1392 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1394 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1395 if (sf_newbuf(sc, i) != 0)
1399 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1400 sc->sf_cdata.sf_rx_cring_map,
1401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1403 sc->sf_cdata.sf_rx_ring_map,
1404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1410 sf_init_tx_ring(struct sf_softc *sc)
1412 struct sf_ring_data *rd;
1415 sc->sf_cdata.sf_tx_prod = 0;
1416 sc->sf_cdata.sf_tx_cnt = 0;
1417 sc->sf_cdata.sf_txc_cons = 0;
1420 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1421 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1422 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1423 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1424 sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1425 sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1427 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1429 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1430 sc->sf_cdata.sf_tx_ring_map,
1431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1432 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1433 sc->sf_cdata.sf_tx_cring_map,
1434 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1438 * Initialize an RX descriptor and attach an MBUF cluster.
1441 sf_newbuf(struct sf_softc *sc, int idx)
1443 struct sf_rx_rdesc *desc;
1444 struct sf_rxdesc *rxd;
1446 bus_dma_segment_t segs[1];
1450 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1453 m->m_len = m->m_pkthdr.len = MCLBYTES;
1454 m_adj(m, sizeof(uint32_t));
1456 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1457 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1461 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1463 rxd = &sc->sf_cdata.sf_rxdesc[idx];
1464 if (rxd->rx_m != NULL) {
1465 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1466 BUS_DMASYNC_POSTREAD);
1467 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1469 map = rxd->rx_dmamap;
1470 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1471 sc->sf_cdata.sf_rx_sparemap = map;
1472 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1473 BUS_DMASYNC_PREREAD);
1475 desc = &sc->sf_rdata.sf_rx_ring[idx];
1476 desc->sf_addr = htole64(segs[0].ds_addr);
1481 #ifndef __NO_STRICT_ALIGNMENT
1482 static __inline void
1483 sf_fixup_rx(struct mbuf *m)
1486 uint16_t *src, *dst;
1488 src = mtod(m, uint16_t *);
1491 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1494 m->m_data -= ETHER_ALIGN;
1499 * The starfire is programmed to use 'normal' mode for packet reception,
1500 * which means we use the consumer/producer model for both the buffer
1501 * descriptor queue and the completion descriptor queue. The only problem
1502 * with this is that it involves a lot of register accesses: we have to
1503 * read the RX completion consumer and producer indexes and the RX buffer
1504 * producer index, plus the RX completion consumer and RX buffer producer
1505 * indexes have to be updated. It would have been easier if Adaptec had
1506 * put each index in a separate register, especially given that the damn
1507 * NIC has a 512K register space.
1509 * In spite of all the lovely features that Adaptec crammed into the 6915,
1510 * it is marred by one truly stupid design flaw, which is that receive
1511 * buffer addresses must be aligned on a longword boundary. This forces
1512 * the packet payload to be unaligned, which is suboptimal on the x86 and
1513 * completely unuseable on the Alpha. Our only recourse is to copy received
1514 * packets into properly aligned buffers before handing them off.
1517 sf_rxeof(struct sf_softc *sc)
1521 struct sf_rxdesc *rxd;
1522 struct sf_rx_rcdesc *cur_cmp;
1523 int cons, eidx, prog, rx_npkts;
1524 uint32_t status, status2;
1531 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1532 sc->sf_cdata.sf_rx_ring_map,
1533 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1534 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1535 sc->sf_cdata.sf_rx_cring_map,
1536 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1539 * To reduce register access, directly read Receive completion
1544 for (cons = sc->sf_cdata.sf_rxc_cons;
1545 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1546 SF_INC(cons, SF_RX_CLIST_CNT)) {
1547 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1548 status = le32toh(cur_cmp->sf_rx_status1);
1551 #ifdef DEVICE_POLLING
1552 if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1553 if (sc->rxcycles <= 0)
1559 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1560 rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1564 * Note, if_ipackets and if_ierrors counters
1565 * are handled in sf_stats_update().
1567 if ((status & SF_RXSTAT1_OK) == 0) {
1568 cur_cmp->sf_rx_status1 = 0;
1572 if (sf_newbuf(sc, eidx) != 0) {
1574 cur_cmp->sf_rx_status1 = 0;
1578 /* AIC-6915 supports TCP/UDP checksum offload. */
1579 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1580 status2 = le32toh(cur_cmp->sf_rx_status2);
1582 * Sometimes AIC-6915 generates an interrupt to
1583 * warn RxGFP stall with bad checksum bit set
1584 * in status word. I'm not sure what conditioan
1585 * triggers it but recevied packet's checksum
1586 * was correct even though AIC-6915 does not
1587 * agree on this. This may be an indication of
1588 * firmware bug. To fix the issue, do not rely
1589 * on bad checksum bit in status word and let
1590 * upper layer verify integrity of received
1592 * Another nice feature of AIC-6915 is hardware
1593 * assistance of checksum calculation by
1594 * providing partial checksum value for received
1595 * frame. The partial checksum value can be used
1596 * to accelerate checksum computation for
1597 * fragmented TCP/UDP packets. Upper network
1598 * stack already takes advantage of the partial
1599 * checksum value in IP reassembly stage. But
1600 * I'm not sure the correctness of the partial
1601 * hardware checksum assistance as frequent
1602 * RxGFP stalls are seen on non-fragmented
1603 * frames. Due to the nature of the complexity
1604 * of checksum computation code in firmware it's
1605 * possible to see another bug in RxGFP so
1606 * ignore checksum assistance for fragmented
1607 * frames. This can be changed in future.
1609 if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1610 if ((status2 & (SF_RXSTAT2_TCP |
1611 SF_RXSTAT2_UDP)) != 0) {
1612 if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1613 m->m_pkthdr.csum_flags =
1616 m->m_pkthdr.csum_data = 0xffff;
1620 #ifdef SF_PARTIAL_CSUM_SUPPORT
1621 else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1622 if ((status2 & (SF_RXSTAT2_TCP |
1623 SF_RXSTAT2_UDP)) != 0) {
1624 if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1625 m->m_pkthdr.csum_flags =
1627 m->m_pkthdr.csum_data =
1629 SF_RX_CMPDESC_CSUM2);
1636 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1637 #ifndef __NO_STRICT_ALIGNMENT
1640 m->m_pkthdr.rcvif = ifp;
1643 (*ifp->if_input)(ifp, m);
1647 /* Clear completion status. */
1648 cur_cmp->sf_rx_status1 = 0;
1652 sc->sf_cdata.sf_rxc_cons = cons;
1653 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1654 sc->sf_cdata.sf_rx_ring_map,
1655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1656 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1657 sc->sf_cdata.sf_rx_cring_map,
1658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1660 /* Update Rx completion Q1 consumer index. */
1661 csr_write_4(sc, SF_CQ_CONSIDX,
1662 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1663 (cons & SF_CQ_CONSIDX_RXQ1));
1664 /* Update Rx descriptor Q1 ptr. */
1665 csr_write_4(sc, SF_RXDQ_PTR_Q1,
1666 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1667 (eidx & SF_RXDQ_PRODIDX));
1673 * Read the transmit status from the completion queue and release
1674 * mbufs. Note that the buffer descriptor index in the completion
1675 * descriptor is an offset from the start of the transmit buffer
1676 * descriptor list in bytes. This is important because the manual
1677 * gives the impression that it should match the producer/consumer
1678 * index, which is the offset in 8 byte blocks.
1681 sf_txeof(struct sf_softc *sc)
1683 struct sf_txdesc *txd;
1684 struct sf_tx_rcdesc *cur_cmp;
1687 int cons, idx, prod;
1693 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1694 sc->sf_cdata.sf_tx_cring_map,
1695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1697 cons = sc->sf_cdata.sf_txc_cons;
1698 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1702 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1703 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1704 status = le32toh(cur_cmp->sf_tx_status1);
1707 switch (status & SF_TX_CMPDESC_TYPE) {
1708 case SF_TXCMPTYPE_TX:
1709 /* Tx complete entry. */
1711 case SF_TXCMPTYPE_DMA:
1712 /* DMA complete entry. */
1713 idx = status & SF_TX_CMPDESC_IDX;
1714 idx = idx / sizeof(struct sf_tx_rdesc);
1716 * We don't need to check Tx status here.
1717 * SF_ISR_TX_LOFIFO intr would handle this.
1718 * Note, if_opackets, if_collisions and if_oerrors
1719 * counters are handled in sf_stats_update().
1721 txd = &sc->sf_cdata.sf_txdesc[idx];
1722 if (txd->tx_m != NULL) {
1723 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1725 BUS_DMASYNC_POSTWRITE);
1726 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1731 sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1732 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1733 ("%s: Active Tx desc counter was garbled\n",
1736 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1739 /* It should not happen. */
1740 device_printf(sc->sf_dev,
1741 "unknown Tx completion type : 0x%08x : %d : %d\n",
1742 status, cons, prod);
1745 cur_cmp->sf_tx_status1 = 0;
1748 sc->sf_cdata.sf_txc_cons = cons;
1749 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1750 sc->sf_cdata.sf_tx_cring_map,
1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1753 if (sc->sf_cdata.sf_tx_cnt == 0)
1754 sc->sf_watchdog_timer = 0;
1756 /* Update Tx completion consumer index. */
1757 csr_write_4(sc, SF_CQ_CONSIDX,
1758 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1759 ((cons << 16) & 0xffff0000));
1763 sf_txthresh_adjust(struct sf_softc *sc)
1767 device_printf(sc->sf_dev, "Tx underrun -- ");
1768 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1769 txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1770 /* Increase Tx threshold 256 bytes. */
1771 sc->sf_txthresh += 16;
1772 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1773 sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1774 txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1775 txfctl |= sc->sf_txthresh;
1776 printf("increasing Tx threshold to %d bytes\n",
1777 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1778 csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1783 #ifdef DEVICE_POLLING
1785 sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1787 struct sf_softc *sc;
1795 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1800 sc->rxcycles = count;
1801 rx_npkts = sf_rxeof(sc);
1803 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1804 sf_start_locked(ifp);
1806 if (cmd == POLL_AND_CHECK_STATUS) {
1807 /* Reading the ISR register clears all interrrupts. */
1808 status = csr_read_4(sc, SF_ISR);
1810 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1811 if ((status & SF_ISR_STATSOFLOW) != 0)
1812 sf_stats_update(sc);
1813 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1814 sf_txthresh_adjust(sc);
1815 else if ((status & SF_ISR_DMAERR) != 0) {
1816 device_printf(sc->sf_dev,
1817 "DMA error, resetting\n");
1818 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1822 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1823 sc->sf_statistics.sf_tx_gfp_stall++;
1825 device_printf(sc->sf_dev,
1826 "TxGFP is not responding!\n");
1828 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1829 sc->sf_statistics.sf_rx_gfp_stall++;
1831 device_printf(sc->sf_dev,
1832 "RxGFP is not responding!\n");
1841 #endif /* DEVICE_POLLING */
1846 struct sf_softc *sc;
1851 sc = (struct sf_softc *)arg;
1854 if (sc->sf_suspended != 0)
1857 /* Reading the ISR register clears all interrrupts. */
1858 status = csr_read_4(sc, SF_ISR);
1859 if (status == 0 || status == 0xffffffff ||
1860 (status & SF_ISR_PCIINT_ASSERTED) == 0)
1864 #ifdef DEVICE_POLLING
1865 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1869 /* Disable interrupts. */
1870 csr_write_4(sc, SF_IMR, 0x00000000);
1872 for (cnt = 32; (status & SF_INTRS) != 0;) {
1873 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1875 if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1878 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1879 SF_ISR_TX_QUEUEDONE)) != 0)
1882 if ((status & SF_ISR_ABNORMALINTR) != 0) {
1883 if ((status & SF_ISR_STATSOFLOW) != 0)
1884 sf_stats_update(sc);
1885 else if ((status & SF_ISR_TX_LOFIFO) != 0)
1886 sf_txthresh_adjust(sc);
1887 else if ((status & SF_ISR_DMAERR) != 0) {
1888 device_printf(sc->sf_dev,
1889 "DMA error, resetting\n");
1890 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1894 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1895 sc->sf_statistics.sf_tx_gfp_stall++;
1897 device_printf(sc->sf_dev,
1898 "TxGFP is not responding!\n");
1901 else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1902 sc->sf_statistics.sf_rx_gfp_stall++;
1904 device_printf(sc->sf_dev,
1905 "RxGFP is not responding!\n");
1909 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1910 sf_start_locked(ifp);
1913 /* Reading the ISR register clears all interrrupts. */
1914 status = csr_read_4(sc, SF_ISR);
1917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1918 /* Re-enable interrupts. */
1919 csr_write_4(sc, SF_IMR, SF_INTRS);
1927 sf_download_fw(struct sf_softc *sc)
1934 * A FP instruction is composed of 48bits so we have to
1935 * write it with two parts.
1939 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1940 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1941 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1942 gfpinst = p[0] << 8 | p[1];
1943 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1944 p += SF_GFP_INST_BYTES;
1948 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1952 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1953 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1954 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1955 gfpinst = p[0] << 8 | p[1];
1956 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1957 p += SF_GFP_INST_BYTES;
1961 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1967 struct sf_softc *sc;
1969 sc = (struct sf_softc *)xsc;
1976 sf_init_locked(struct sf_softc *sc)
1979 struct mii_data *mii;
1980 uint8_t eaddr[ETHER_ADDR_LEN];
1986 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1988 mii = device_get_softc(sc->sf_miibus);
1991 /* Reset the hardware to a known state. */
1994 /* Init all the receive filter registers */
1995 for (i = SF_RXFILT_PERFECT_BASE;
1996 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
1997 csr_write_4(sc, i, 0);
1999 /* Empty stats counter registers. */
2000 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2001 csr_write_4(sc, i, 0);
2003 /* Init our MAC address. */
2004 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2005 csr_write_4(sc, SF_PAR0,
2006 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2007 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2008 sf_setperf(sc, 0, eaddr);
2010 if (sf_init_rx_ring(sc) == ENOBUFS) {
2011 device_printf(sc->sf_dev,
2012 "initialization failed: no memory for rx buffers\n");
2017 sf_init_tx_ring(sc);
2020 * 16 perfect address filtering.
2021 * Hash only multicast destination address, Accept matching
2022 * frames regardless of VLAN ID.
2024 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2031 /* Init the completion queue indexes. */
2032 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2033 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2035 /* Init the RX completion queue. */
2036 addr = sc->sf_rdata.sf_rx_cring_paddr;
2037 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2038 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2039 if (SF_ADDR_HI(addr) != 0)
2040 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2041 /* Set RX completion queue type 2. */
2042 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2043 csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2046 * Init RX DMA control.
2047 * default RxHighPriority Threshold,
2048 * default RxBurstSize, 128bytes.
2050 SF_SETBIT(sc, SF_RXDMA_CTL,
2051 SF_RXDMA_REPORTBADPKTS |
2052 (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2055 /* Init the RX buffer descriptor queue. */
2056 addr = sc->sf_rdata.sf_rx_ring_paddr;
2057 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2058 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2060 /* Set RX queue buffer length. */
2061 csr_write_4(sc, SF_RXDQ_CTL_1,
2062 ((MCLBYTES - sizeof(uint32_t)) << 16) |
2063 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2065 if (SF_ADDR_HI(addr) != 0)
2066 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2067 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2068 csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2070 /* Init the TX completion queue */
2071 addr = sc->sf_rdata.sf_tx_cring_paddr;
2072 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2073 if (SF_ADDR_HI(addr) != 0)
2074 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2076 /* Init the TX buffer descriptor queue. */
2077 addr = sc->sf_rdata.sf_tx_ring_paddr;
2078 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2079 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2080 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2081 csr_write_4(sc, SF_TX_FRAMCTL,
2082 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2083 csr_write_4(sc, SF_TXDQ_CTL,
2084 SF_TXDMA_HIPRIO_THRESH << 24 |
2085 SF_TXSKIPLEN_0BYTES << 16 |
2086 SF_TXDDMA_BURST << 8 |
2087 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2088 if (SF_ADDR_HI(addr) != 0)
2089 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2091 /* Set VLAN Type register. */
2092 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2094 /* Set TxPause Timer. */
2095 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2097 /* Enable autopadding of short TX frames. */
2098 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2099 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2100 /* Make sure to reset MAC to take changes effect. */
2101 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2103 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2105 /* Enable PCI bus master. */
2106 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2108 /* Load StarFire firmware. */
2111 /* Intialize interrupt moderation. */
2112 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2113 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2115 #ifdef DEVICE_POLLING
2116 /* Disable interrupts if we are polling. */
2117 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2118 csr_write_4(sc, SF_IMR, 0x00000000);
2121 /* Enable interrupts. */
2122 csr_write_4(sc, SF_IMR, SF_INTRS);
2123 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2125 /* Enable the RX and TX engines. */
2126 csr_write_4(sc, SF_GEN_ETH_CTL,
2127 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2128 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2130 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2131 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2133 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2134 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2135 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2137 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2139 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2140 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2143 sf_ifmedia_upd_locked(ifp);
2145 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2149 sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2151 struct sf_txdesc *txd;
2152 struct sf_tx_rdesc *desc;
2155 bus_dma_segment_t txsegs[SF_MAXTXSEGS];
2156 int error, i, nsegs, prod, si;
2162 prod = sc->sf_cdata.sf_tx_prod;
2163 txd = &sc->sf_cdata.sf_txdesc[prod];
2164 map = txd->tx_dmamap;
2165 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2166 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2167 if (error == EFBIG) {
2168 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2175 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2176 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2182 } else if (error != 0)
2190 /* Check number of available descriptors. */
2191 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2192 if (avail < nsegs) {
2193 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2197 if (prod + nsegs >= SF_TX_DLIST_CNT) {
2198 nskip = SF_TX_DLIST_CNT - prod - 1;
2199 if (avail < nsegs + nskip) {
2200 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2205 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2208 for (i = 0; i < nsegs; i++) {
2209 desc = &sc->sf_rdata.sf_tx_ring[prod];
2210 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2211 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2212 desc->sf_tx_reserved = 0;
2213 desc->sf_addr = htole64(txsegs[i].ds_addr);
2214 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2216 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2219 SF_INC(prod, SF_TX_DLIST_CNT);
2221 /* Update producer index. */
2222 sc->sf_cdata.sf_tx_prod = prod;
2223 sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2225 desc = &sc->sf_rdata.sf_tx_ring[si];
2226 /* Check TDP/UDP checksum offload request. */
2227 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2228 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2230 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2232 txd->tx_dmamap = map;
2234 txd->ndesc = nsegs + nskip;
2240 sf_start(struct ifnet *ifp)
2242 struct sf_softc *sc;
2246 sf_start_locked(ifp);
2251 sf_start_locked(struct ifnet *ifp)
2253 struct sf_softc *sc;
2254 struct mbuf *m_head;
2260 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2261 IFF_DRV_RUNNING || sc->sf_link == 0)
2265 * Since we don't know when descriptor wrap occurrs in advance
2266 * limit available number of active Tx descriptor counter to be
2267 * higher than maximum number of DMA segments allowed in driver.
2269 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2270 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2271 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2275 * Pack the data into the transmit ring. If we
2276 * don't have room, set the OACTIVE flag and wait
2277 * for the NIC to drain the ring.
2279 if (sf_encap(sc, &m_head)) {
2282 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2283 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2289 * If there's a BPF listener, bounce a copy of this frame
2292 ETHER_BPF_MTAP(ifp, m_head);
2296 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2297 sc->sf_cdata.sf_tx_ring_map,
2298 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2299 /* Kick transmit. */
2300 csr_write_4(sc, SF_TXDQ_PRODIDX,
2301 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2303 /* Set a timeout in case the chip goes out to lunch. */
2304 sc->sf_watchdog_timer = 5;
2309 sf_stop(struct sf_softc *sc)
2311 struct sf_txdesc *txd;
2312 struct sf_rxdesc *rxd;
2320 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2322 callout_stop(&sc->sf_co);
2323 sc->sf_watchdog_timer = 0;
2325 /* Reading the ISR register clears all interrrupts. */
2326 csr_read_4(sc, SF_ISR);
2327 /* Disable further interrupts. */
2328 csr_write_4(sc, SF_IMR, 0);
2330 /* Disable Tx/Rx egine. */
2331 csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2333 /* Give hardware chance to drain active DMA cycles. */
2336 csr_write_4(sc, SF_CQ_CONSIDX, 0);
2337 csr_write_4(sc, SF_CQ_PRODIDX, 0);
2338 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2339 csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2340 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2341 csr_write_4(sc, SF_TXCQ_CTL, 0);
2342 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2343 csr_write_4(sc, SF_TXDQ_CTL, 0);
2346 * Free RX and TX mbufs still in the queues.
2348 for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2349 rxd = &sc->sf_cdata.sf_rxdesc[i];
2350 if (rxd->rx_m != NULL) {
2351 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2352 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2353 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2359 for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2360 txd = &sc->sf_cdata.sf_txdesc[i];
2361 if (txd->tx_m != NULL) {
2362 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2363 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2364 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2376 struct sf_softc *sc;
2377 struct mii_data *mii;
2381 mii = device_get_softc(sc->sf_miibus);
2383 sf_stats_update(sc);
2385 callout_reset(&sc->sf_co, hz, sf_tick, sc);
2389 * Note: it is important that this function not be interrupted. We
2390 * use a two-stage register access scheme: if we are interrupted in
2391 * between setting the indirect address register and reading from the
2392 * indirect data register, the contents of the address register could
2393 * be changed out from under us.
2396 sf_stats_update(struct sf_softc *sc)
2399 struct sf_stats now, *stats, *nstats;
2407 stats->sf_tx_frames =
2408 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2409 stats->sf_tx_single_colls =
2410 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2411 stats->sf_tx_multi_colls =
2412 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2413 stats->sf_tx_crcerrs =
2414 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2415 stats->sf_tx_bytes =
2416 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2417 stats->sf_tx_deferred =
2418 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2419 stats->sf_tx_late_colls =
2420 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2421 stats->sf_tx_pause_frames =
2422 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2423 stats->sf_tx_control_frames =
2424 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2425 stats->sf_tx_excess_colls =
2426 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2427 stats->sf_tx_excess_defer =
2428 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2429 stats->sf_tx_mcast_frames =
2430 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2431 stats->sf_tx_bcast_frames =
2432 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2433 stats->sf_tx_frames_lost =
2434 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2435 stats->sf_rx_frames =
2436 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2437 stats->sf_rx_crcerrs =
2438 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2439 stats->sf_rx_alignerrs =
2440 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2441 stats->sf_rx_bytes =
2442 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2443 stats->sf_rx_pause_frames =
2444 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2445 stats->sf_rx_control_frames =
2446 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2447 stats->sf_rx_unsup_control_frames =
2448 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2449 stats->sf_rx_giants =
2450 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2451 stats->sf_rx_runts =
2452 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2453 stats->sf_rx_jabbererrs =
2454 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2455 stats->sf_rx_fragments =
2456 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2457 stats->sf_rx_pkts_64 =
2458 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2459 stats->sf_rx_pkts_65_127 =
2460 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2461 stats->sf_rx_pkts_128_255 =
2462 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2463 stats->sf_rx_pkts_256_511 =
2464 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2465 stats->sf_rx_pkts_512_1023 =
2466 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2467 stats->sf_rx_pkts_1024_1518 =
2468 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2469 stats->sf_rx_frames_lost =
2470 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2471 /* Lower 16bits are valid. */
2472 stats->sf_tx_underruns =
2473 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2475 /* Empty stats counter registers. */
2476 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2477 csr_write_4(sc, i, 0);
2479 ifp->if_opackets += (u_long)stats->sf_tx_frames;
2481 ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2482 (u_long)stats->sf_tx_multi_colls;
2484 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2485 (u_long)stats->sf_tx_excess_defer +
2486 (u_long)stats->sf_tx_frames_lost;
2488 ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2490 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2491 (u_long)stats->sf_rx_alignerrs +
2492 (u_long)stats->sf_rx_giants +
2493 (u_long)stats->sf_rx_runts +
2494 (u_long)stats->sf_rx_jabbererrs +
2495 (u_long)stats->sf_rx_frames_lost;
2497 nstats = &sc->sf_statistics;
2499 nstats->sf_tx_frames += stats->sf_tx_frames;
2500 nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2501 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2502 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2503 nstats->sf_tx_bytes += stats->sf_tx_bytes;
2504 nstats->sf_tx_deferred += stats->sf_tx_deferred;
2505 nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2506 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2507 nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2508 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2509 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2510 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2511 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2512 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2513 nstats->sf_rx_frames += stats->sf_rx_frames;
2514 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2515 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2516 nstats->sf_rx_bytes += stats->sf_rx_bytes;
2517 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2518 nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2519 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2520 nstats->sf_rx_giants += stats->sf_rx_giants;
2521 nstats->sf_rx_runts += stats->sf_rx_runts;
2522 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2523 nstats->sf_rx_fragments += stats->sf_rx_fragments;
2524 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2525 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2526 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2527 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2528 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2529 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2530 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2531 nstats->sf_tx_underruns += stats->sf_tx_underruns;
2535 sf_watchdog(struct sf_softc *sc)
2541 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2547 if (sc->sf_link == 0) {
2549 if_printf(sc->sf_ifp, "watchdog timeout "
2552 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2553 sc->sf_cdata.sf_tx_cnt);
2555 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2558 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2559 sf_start_locked(ifp);
2563 sf_shutdown(device_t dev)
2565 struct sf_softc *sc;
2567 sc = device_get_softc(dev);
2577 sf_suspend(device_t dev)
2579 struct sf_softc *sc;
2581 sc = device_get_softc(dev);
2585 sc->sf_suspended = 1;
2586 bus_generic_suspend(dev);
2593 sf_resume(device_t dev)
2595 struct sf_softc *sc;
2598 sc = device_get_softc(dev);
2601 bus_generic_resume(dev);
2603 if ((ifp->if_flags & IFF_UP) != 0)
2606 sc->sf_suspended = 0;
2613 sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2615 struct sf_softc *sc;
2616 struct sf_stats *stats;
2621 error = sysctl_handle_int(oidp, &result, 0, req);
2623 if (error != 0 || req->newptr == NULL)
2629 sc = (struct sf_softc *)arg1;
2630 stats = &sc->sf_statistics;
2632 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2633 printf("Transmit good frames : %ju\n",
2634 (uintmax_t)stats->sf_tx_frames);
2635 printf("Transmit good octets : %ju\n",
2636 (uintmax_t)stats->sf_tx_bytes);
2637 printf("Transmit single collisions : %u\n",
2638 stats->sf_tx_single_colls);
2639 printf("Transmit multiple collisions : %u\n",
2640 stats->sf_tx_multi_colls);
2641 printf("Transmit late collisions : %u\n",
2642 stats->sf_tx_late_colls);
2643 printf("Transmit abort due to excessive collisions : %u\n",
2644 stats->sf_tx_excess_colls);
2645 printf("Transmit CRC errors : %u\n",
2646 stats->sf_tx_crcerrs);
2647 printf("Transmit deferrals : %u\n",
2648 stats->sf_tx_deferred);
2649 printf("Transmit abort due to excessive deferrals : %u\n",
2650 stats->sf_tx_excess_defer);
2651 printf("Transmit pause control frames : %u\n",
2652 stats->sf_tx_pause_frames);
2653 printf("Transmit control frames : %u\n",
2654 stats->sf_tx_control_frames);
2655 printf("Transmit good multicast frames : %u\n",
2656 stats->sf_tx_mcast_frames);
2657 printf("Transmit good broadcast frames : %u\n",
2658 stats->sf_tx_bcast_frames);
2659 printf("Transmit frames lost due to internal transmit errors : %u\n",
2660 stats->sf_tx_frames_lost);
2661 printf("Transmit FIFO underflows : %u\n",
2662 stats->sf_tx_underruns);
2663 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2664 printf("Receive good frames : %ju\n",
2665 (uint64_t)stats->sf_rx_frames);
2666 printf("Receive good octets : %ju\n",
2667 (uint64_t)stats->sf_rx_bytes);
2668 printf("Receive CRC errors : %u\n",
2669 stats->sf_rx_crcerrs);
2670 printf("Receive alignment errors : %u\n",
2671 stats->sf_rx_alignerrs);
2672 printf("Receive pause frames : %u\n",
2673 stats->sf_rx_pause_frames);
2674 printf("Receive control frames : %u\n",
2675 stats->sf_rx_control_frames);
2676 printf("Receive control frames with unsupported opcode : %u\n",
2677 stats->sf_rx_unsup_control_frames);
2678 printf("Receive frames too long : %u\n",
2679 stats->sf_rx_giants);
2680 printf("Receive frames too short : %u\n",
2681 stats->sf_rx_runts);
2682 printf("Receive frames jabber errors : %u\n",
2683 stats->sf_rx_jabbererrs);
2684 printf("Receive frames fragments : %u\n",
2685 stats->sf_rx_fragments);
2686 printf("Receive packets 64 bytes : %ju\n",
2687 (uint64_t)stats->sf_rx_pkts_64);
2688 printf("Receive packets 65 to 127 bytes : %ju\n",
2689 (uint64_t)stats->sf_rx_pkts_65_127);
2690 printf("Receive packets 128 to 255 bytes : %ju\n",
2691 (uint64_t)stats->sf_rx_pkts_128_255);
2692 printf("Receive packets 256 to 511 bytes : %ju\n",
2693 (uint64_t)stats->sf_rx_pkts_256_511);
2694 printf("Receive packets 512 to 1023 bytes : %ju\n",
2695 (uint64_t)stats->sf_rx_pkts_512_1023);
2696 printf("Receive packets 1024 to 1518 bytes : %ju\n",
2697 (uint64_t)stats->sf_rx_pkts_1024_1518);
2698 printf("Receive frames lost due to internal receive errors : %u\n",
2699 stats->sf_rx_frames_lost);
2700 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2706 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2712 value = *(int *)arg1;
2713 error = sysctl_handle_int(oidp, &value, 0, req);
2714 if (error || !req->newptr)
2716 if (value < low || value > high)
2718 *(int *)arg1 = value;
2724 sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2727 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));