2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * VIA Rhine fast ethernet PCI NIC driver
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
63 #ifdef HAVE_KERNEL_OPTION_HEADERS
64 #include "opt_device_polling.h"
67 #include <sys/param.h>
68 #include <sys/systm.h>
70 #include <sys/endian.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
74 #include <sys/module.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/sysctl.h>
79 #include <sys/taskqueue.h>
83 #include <net/ethernet.h>
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86 #include <net/if_types.h>
87 #include <net/if_vlan_var.h>
89 #include <dev/mii/mii.h>
90 #include <dev/mii/miivar.h>
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
95 #include <machine/bus.h>
97 #include <dev/vr/if_vrreg.h>
99 /* "device miibus" required. See GENERIC if you get errors here. */
100 #include "miibus_if.h"
102 MODULE_DEPEND(vr, pci, 1, 1, 1);
103 MODULE_DEPEND(vr, ether, 1, 1, 1);
104 MODULE_DEPEND(vr, miibus, 1, 1, 1);
106 /* Define to show Rx/Tx error status. */
107 #undef VR_SHOW_ERRORS
108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
111 * Various supported device vendors/types, their names & quirks.
113 #define VR_Q_NEEDALIGN (1<<0)
114 #define VR_Q_CSUM (1<<1)
115 #define VR_Q_CAM (1<<2)
117 static struct vr_type {
123 { VIA_VENDORID, VIA_DEVICEID_RHINE,
125 "VIA VT3043 Rhine I 10/100BaseTX" },
126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
128 "VIA VT86C100A Rhine II 10/100BaseTX" },
129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
131 "VIA VT6102 Rhine II 10/100BaseTX" },
132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III,
134 "VIA VT6105 Rhine III 10/100BaseTX" },
135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
136 VR_Q_CSUM | VR_Q_CAM,
137 "VIA VT6105M Rhine III 10/100BaseTX" },
138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
140 "Delta Electronics Rhine II 10/100BaseTX" },
141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
143 "Addtron Technology Rhine II 10/100BaseTX" },
147 static int vr_probe(device_t);
148 static int vr_attach(device_t);
149 static int vr_detach(device_t);
150 static int vr_shutdown(device_t);
151 static int vr_suspend(device_t);
152 static int vr_resume(device_t);
154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
155 static int vr_dma_alloc(struct vr_softc *);
156 static void vr_dma_free(struct vr_softc *);
157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
158 static int vr_newbuf(struct vr_softc *, int);
160 #ifndef __NO_STRICT_ALIGNMENT
161 static __inline void vr_fixup_rx(struct mbuf *);
163 static void vr_rxeof(struct vr_softc *);
164 static void vr_txeof(struct vr_softc *);
165 static void vr_tick(void *);
166 static int vr_error(struct vr_softc *, uint16_t);
167 static void vr_tx_underrun(struct vr_softc *);
168 static void vr_intr(void *);
169 static void vr_start(struct ifnet *);
170 static void vr_start_locked(struct ifnet *);
171 static int vr_encap(struct vr_softc *, struct mbuf **);
172 static int vr_ioctl(struct ifnet *, u_long, caddr_t);
173 static void vr_init(void *);
174 static void vr_init_locked(struct vr_softc *);
175 static void vr_tx_start(struct vr_softc *);
176 static void vr_rx_start(struct vr_softc *);
177 static int vr_tx_stop(struct vr_softc *);
178 static int vr_rx_stop(struct vr_softc *);
179 static void vr_stop(struct vr_softc *);
180 static void vr_watchdog(struct vr_softc *);
181 static int vr_ifmedia_upd(struct ifnet *);
182 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
184 static int vr_miibus_readreg(device_t, int, int);
185 static int vr_miibus_writereg(device_t, int, int, int);
186 static void vr_miibus_statchg(device_t);
188 static void vr_link_task(void *, int);
189 static int vr_setperf(struct vr_softc *, int, uint8_t *);
190 static void vr_set_filter(struct vr_softc *);
191 static void vr_reset(const struct vr_softc *);
192 static int vr_tx_ring_init(struct vr_softc *);
193 static int vr_rx_ring_init(struct vr_softc *);
194 static void vr_setwol(struct vr_softc *);
195 static void vr_clrwol(struct vr_softc *);
196 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
198 static struct vr_tx_threshold_table {
202 } vr_tx_threshold_tables[] = {
203 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 },
204 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
205 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
206 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
207 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
208 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
211 static device_method_t vr_methods[] = {
212 /* Device interface */
213 DEVMETHOD(device_probe, vr_probe),
214 DEVMETHOD(device_attach, vr_attach),
215 DEVMETHOD(device_detach, vr_detach),
216 DEVMETHOD(device_shutdown, vr_shutdown),
217 DEVMETHOD(device_suspend, vr_suspend),
218 DEVMETHOD(device_resume, vr_resume),
221 DEVMETHOD(bus_print_child, bus_generic_print_child),
222 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
225 DEVMETHOD(miibus_readreg, vr_miibus_readreg),
226 DEVMETHOD(miibus_writereg, vr_miibus_writereg),
227 DEVMETHOD(miibus_statchg, vr_miibus_statchg),
228 DEVMETHOD(miibus_linkchg, vr_miibus_statchg),
233 static driver_t vr_driver = {
236 sizeof(struct vr_softc)
239 static devclass_t vr_devclass;
241 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
242 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
245 vr_miibus_readreg(device_t dev, int phy, int reg)
250 sc = device_get_softc(dev);
251 if (sc->vr_phyaddr != phy)
254 /* Set the register address. */
255 CSR_WRITE_1(sc, VR_MIIADDR, reg);
256 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
258 for (i = 0; i < VR_MII_TIMEOUT; i++) {
260 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
263 if (i == VR_MII_TIMEOUT)
264 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
266 return (CSR_READ_2(sc, VR_MIIDATA));
270 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
275 sc = device_get_softc(dev);
276 if (sc->vr_phyaddr != phy)
279 /* Set the register address and data to write. */
280 CSR_WRITE_1(sc, VR_MIIADDR, reg);
281 CSR_WRITE_2(sc, VR_MIIDATA, data);
282 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
284 for (i = 0; i < VR_MII_TIMEOUT; i++) {
286 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
289 if (i == VR_MII_TIMEOUT)
290 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
297 vr_miibus_statchg(device_t dev)
301 sc = device_get_softc(dev);
302 taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task);
306 * In order to fiddle with the
307 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
308 * first have to put the transmit and/or receive logic in the idle state.
311 vr_link_task(void *arg, int pending)
314 struct mii_data *mii;
317 uint8_t cr0, cr1, fc;
319 sc = (struct vr_softc *)arg;
322 mii = device_get_softc(sc->vr_miibus);
324 if (mii == NULL || ifp == NULL ||
325 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
330 if (mii->mii_media_status & IFM_ACTIVE) {
331 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
336 if (sc->vr_link != 0) {
337 cr0 = CSR_READ_1(sc, VR_CR0);
338 cr1 = CSR_READ_1(sc, VR_CR1);
339 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
340 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
342 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
343 if (vr_tx_stop(sc) != 0 ||
344 vr_rx_stop(sc) != 0) {
345 device_printf(sc->vr_dev,
346 "%s: Tx/Rx shutdown error -- "
347 "resetting\n", __func__);
348 sc->vr_flags |= VR_F_RESTART;
354 cr1 |= VR_CR1_FULLDUPLEX;
356 cr1 &= ~VR_CR1_FULLDUPLEX;
357 CSR_WRITE_1(sc, VR_CR1, cr1);
361 /* Configure flow-control. */
362 if (sc->vr_revid >= REV_ID_VT6105_A0) {
363 fc = CSR_READ_1(sc, VR_FLOWCR1);
364 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
365 if ((IFM_OPTIONS(mii->mii_media_active) &
366 IFM_ETH_RXPAUSE) != 0)
367 fc |= VR_FLOWCR1_RXPAUSE;
368 if ((IFM_OPTIONS(mii->mii_media_active) &
369 IFM_ETH_TXPAUSE) != 0)
370 fc |= VR_FLOWCR1_TXPAUSE;
371 CSR_WRITE_1(sc, VR_FLOWCR1, fc);
372 } else if (sc->vr_revid >= REV_ID_VT6102_A) {
373 /* No Tx puase capability available for Rhine II. */
374 fc = CSR_READ_1(sc, VR_MISC_CR0);
375 fc &= ~VR_MISCCR0_RXPAUSE;
376 if ((IFM_OPTIONS(mii->mii_media_active) &
377 IFM_ETH_RXPAUSE) != 0)
378 fc |= VR_MISCCR0_RXPAUSE;
379 CSR_WRITE_1(sc, VR_MISC_CR0, fc);
385 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
386 device_printf(sc->vr_dev,
387 "%s: Tx/Rx shutdown error -- resetting\n",
389 sc->vr_flags |= VR_F_RESTART;
398 * Copy the address 'mac' into the perfect RX filter entry at
399 * offset 'idx.' The perfect filter only has 32 entries so do
403 vr_setperf(struct vr_softc *sc, int idx, uint8_t *mac)
407 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
410 /* Set CAM entry address. */
411 CSR_WRITE_1(sc, VR_CAMADDR, idx);
412 /* Set CAM entry data. */
413 for (i = 0; i < ETHER_ADDR_LEN; i++)
414 CSR_WRITE_1(sc, VR_MAR0 + i, mac[i]);
415 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
416 CSR_WRITE_1(sc, VR_CAMCTL,
417 VR_CAMCTL_ENA | VR_CAMCTL_MCAST | VR_CAMCTL_WRITE);
418 for (i = 0; i < VR_TIMEOUT; i++) {
420 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
425 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
428 return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
432 * Program the 64-bit multicast hash filter.
435 vr_set_filter(struct vr_softc *sc)
439 uint32_t hashes[2] = { 0, 0 };
440 struct ifmultiaddr *ifma;
448 rxfilt = CSR_READ_1(sc, VR_RXCFG);
449 rxfilt = ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI);
450 if (ifp->if_flags & IFF_BROADCAST)
451 rxfilt |= VR_RXCFG_RX_BROAD;
452 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
453 rxfilt |= VR_RXCFG_RX_MULTI;
454 if (ifp->if_flags & IFF_PROMISC)
455 rxfilt |= VR_RXCFG_RX_PROMISC;
456 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
457 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
458 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
462 /* Now program new ones. */
465 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
467 * For hardwares that have CAM capability, use
468 * 32 entries multicast perfect filter.
472 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
474 if (ifma->ifma_addr->sa_family != AF_LINK)
476 error = vr_setperf(sc, mcnt,
477 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
482 cam_mask |= 1 << mcnt;
485 /* Enable multicast CAM entries depending on mask. */
486 CSR_WRITE_1(sc, VR_CAMMASK, cam_mask);
487 /* Accessing CAM done. */
488 CSR_WRITE_1(sc, VR_CAMCTL, 0);
492 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
494 * If there are too many multicast addresses or
495 * setting multicast CAM filter failed, use hash
496 * table based filtering.
498 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
499 if (ifma->ifma_addr->sa_family != AF_LINK)
501 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
502 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
504 hashes[0] |= (1 << h);
506 hashes[1] |= (1 << (h - 32));
513 rxfilt |= VR_RXCFG_RX_MULTI;
515 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
516 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
517 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
521 vr_reset(const struct vr_softc *sc)
525 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
527 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
528 if (sc->vr_revid < REV_ID_VT6102_A) {
529 /* VT86C100A needs more delay after reset. */
532 for (i = 0; i < VR_TIMEOUT; i++) {
534 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
537 if (i == VR_TIMEOUT) {
538 if (sc->vr_revid < REV_ID_VT6102_A)
539 device_printf(sc->vr_dev, "reset never completed!\n");
541 /* Use newer force reset command. */
542 device_printf(sc->vr_dev,
543 "Using force reset command.\n");
544 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
546 * Wait a little while for the chip to get its brains
556 * Probe for a VIA Rhine chip. Check the PCI vendor and device
557 * IDs against our list and return a match or NULL
559 static struct vr_type *
560 vr_match(device_t dev)
562 struct vr_type *t = vr_devs;
564 for (t = vr_devs; t->vr_name != NULL; t++)
565 if ((pci_get_vendor(dev) == t->vr_vid) &&
566 (pci_get_device(dev) == t->vr_did))
572 * Probe for a VIA Rhine chip. Check the PCI vendor and device
573 * IDs against our list and return a device name if we find a match.
576 vr_probe(device_t dev)
582 device_set_desc(dev, t->vr_name);
583 return (BUS_PROBE_DEFAULT);
589 * Attach the interface. Allocate softc structures, do ifmedia
590 * setup and ethernet/BPF attach.
593 vr_attach(device_t dev)
598 uint8_t eaddr[ETHER_ADDR_LEN];
602 sc = device_get_softc(dev);
605 KASSERT(t != NULL, ("Lost if_vr device match"));
606 sc->vr_quirks = t->vr_quirks;
607 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
609 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
611 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
612 TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc);
613 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
614 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
615 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
616 vr_sysctl_stats, "I", "Statistics");
621 * Map control/status registers.
623 pci_enable_busmaster(dev);
624 sc->vr_revid = pci_get_revid(dev);
625 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
627 sc->vr_res_id = PCIR_BAR(0);
628 sc->vr_res_type = SYS_RES_IOPORT;
629 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
630 &sc->vr_res_id, RF_ACTIVE);
631 if (sc->vr_res == NULL) {
632 device_printf(dev, "couldn't map ports\n");
637 /* Allocate interrupt. */
639 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
640 RF_SHAREABLE | RF_ACTIVE);
642 if (sc->vr_irq == NULL) {
643 device_printf(dev, "couldn't map interrupt\n");
648 /* Allocate ifnet structure. */
649 ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
651 device_printf(dev, "couldn't allocate ifnet structure\n");
656 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
657 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
658 ifp->if_ioctl = vr_ioctl;
659 ifp->if_start = vr_start;
660 ifp->if_init = vr_init;
661 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
662 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
663 IFQ_SET_READY(&ifp->if_snd);
665 /* Configure Tx FIFO threshold. */
666 sc->vr_txthresh = VR_TXTHRESH_MIN;
667 if (sc->vr_revid < REV_ID_VT6105_A0) {
669 * Use store and forward mode for Rhine I/II.
670 * Otherwise they produce a lot of Tx underruns and
671 * it would take a while to get working FIFO threshold
674 sc->vr_txthresh = VR_TXTHRESH_MAX;
676 if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
677 ifp->if_hwassist = VR_CSUM_FEATURES;
678 ifp->if_capabilities |= IFCAP_HWCSUM;
680 * To update checksum field the hardware may need to
681 * store entire frames into FIFO before transmitting.
683 sc->vr_txthresh = VR_TXTHRESH_MAX;
686 if (sc->vr_revid >= REV_ID_VT6102_A &&
687 pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
688 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
690 /* Rhine supports oversized VLAN frame. */
691 ifp->if_capabilities |= IFCAP_VLAN_MTU;
692 ifp->if_capenable = ifp->if_capabilities;
693 #ifdef DEVICE_POLLING
694 ifp->if_capabilities |= IFCAP_POLLING;
698 * Windows may put the chip in suspend mode when it
699 * shuts down. Be sure to kick it in the head to wake it
702 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
703 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
706 * Get station address. The way the Rhine chips work,
707 * you're not allowed to directly access the EEPROM once
708 * they've been programmed a special way. Consequently,
709 * we need to read the node address from the PAR0 and PAR1
711 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
712 * VR_CFGC and VR_CFGD such that memory mapped IO configured
713 * by driver is reset to default state.
715 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
716 for (i = VR_TIMEOUT; i > 0; i--) {
718 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
722 device_printf(dev, "Reloading EEPROM timeout!\n");
723 for (i = 0; i < ETHER_ADDR_LEN; i++)
724 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
726 /* Reset the adapter. */
728 /* Ack intr & disable further interrupts. */
729 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
730 CSR_WRITE_2(sc, VR_IMR, 0);
731 if (sc->vr_revid >= REV_ID_VT6102_A)
732 CSR_WRITE_2(sc, VR_MII_IMR, 0);
734 if (sc->vr_revid < REV_ID_VT6102_A) {
735 pci_write_config(dev, VR_PCI_MODE2,
736 pci_read_config(dev, VR_PCI_MODE2, 1) |
737 VR_MODE2_MODE10T, 1);
739 /* Report error instead of retrying forever. */
740 pci_write_config(dev, VR_PCI_MODE2,
741 pci_read_config(dev, VR_PCI_MODE2, 1) |
742 VR_MODE2_PCEROPT, 1);
743 /* Detect MII coding error. */
744 pci_write_config(dev, VR_PCI_MODE3,
745 pci_read_config(dev, VR_PCI_MODE3, 1) |
747 if (sc->vr_revid >= REV_ID_VT6105_LOM &&
748 sc->vr_revid < REV_ID_VT6105M_A0)
749 pci_write_config(dev, VR_PCI_MODE2,
750 pci_read_config(dev, VR_PCI_MODE2, 1) |
751 VR_MODE2_MODE10T, 1);
752 /* Enable Memory-Read-Multiple. */
753 if (sc->vr_revid >= REV_ID_VT6107_A1 &&
754 sc->vr_revid < REV_ID_VT6105M_A0)
755 pci_write_config(dev, VR_PCI_MODE2,
756 pci_read_config(dev, VR_PCI_MODE2, 1) |
759 /* Disable MII AUTOPOLL. */
760 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
762 if (vr_dma_alloc(sc) != 0) {
767 /* Save PHY address. */
768 if (sc->vr_revid >= REV_ID_VT6105_A0)
771 sc->vr_phyaddr = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
774 if (mii_phy_probe(dev, &sc->vr_miibus,
775 vr_ifmedia_upd, vr_ifmedia_sts)) {
776 device_printf(dev, "MII without any phy!\n");
781 /* Call MI attach routine. */
782 ether_ifattach(ifp, eaddr);
784 * Tell the upper layer(s) we support long frames.
785 * Must appear after the call to ether_ifattach() because
786 * ether_ifattach() sets ifi_hdrlen to the default value.
788 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
790 /* Hook interrupt last to avoid having to lock softc. */
791 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
792 NULL, vr_intr, sc, &sc->vr_intrhand);
795 device_printf(dev, "couldn't set up irq\n");
808 * Shutdown hardware and free up resources. This can be called any
809 * time after the mutex has been initialized. It is called in both
810 * the error case in attach and the normal detach case so it needs
811 * to be careful about only freeing resources that have actually been
815 vr_detach(device_t dev)
817 struct vr_softc *sc = device_get_softc(dev);
818 struct ifnet *ifp = sc->vr_ifp;
820 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
822 #ifdef DEVICE_POLLING
823 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
824 ether_poll_deregister(ifp);
827 /* These should only be active if attach succeeded. */
828 if (device_is_attached(dev)) {
833 callout_drain(&sc->vr_stat_callout);
834 taskqueue_drain(taskqueue_swi, &sc->vr_link_task);
838 device_delete_child(dev, sc->vr_miibus);
839 bus_generic_detach(dev);
842 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
844 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
846 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
854 mtx_destroy(&sc->vr_mtx);
859 struct vr_dmamap_arg {
860 bus_addr_t vr_busaddr;
864 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
866 struct vr_dmamap_arg *ctx;
871 ctx->vr_busaddr = segs[0].ds_addr;
875 vr_dma_alloc(struct vr_softc *sc)
877 struct vr_dmamap_arg ctx;
878 struct vr_txdesc *txd;
879 struct vr_rxdesc *rxd;
880 bus_size_t tx_alignment;
883 /* Create parent DMA tag. */
884 error = bus_dma_tag_create(
885 bus_get_dma_tag(sc->vr_dev), /* parent */
886 1, 0, /* alignment, boundary */
887 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
888 BUS_SPACE_MAXADDR, /* highaddr */
889 NULL, NULL, /* filter, filterarg */
890 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
892 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
894 NULL, NULL, /* lockfunc, lockarg */
895 &sc->vr_cdata.vr_parent_tag);
897 device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
900 /* Create tag for Tx ring. */
901 error = bus_dma_tag_create(
902 sc->vr_cdata.vr_parent_tag, /* parent */
903 VR_RING_ALIGN, 0, /* alignment, boundary */
904 BUS_SPACE_MAXADDR, /* lowaddr */
905 BUS_SPACE_MAXADDR, /* highaddr */
906 NULL, NULL, /* filter, filterarg */
907 VR_TX_RING_SIZE, /* maxsize */
909 VR_TX_RING_SIZE, /* maxsegsize */
911 NULL, NULL, /* lockfunc, lockarg */
912 &sc->vr_cdata.vr_tx_ring_tag);
914 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
918 /* Create tag for Rx ring. */
919 error = bus_dma_tag_create(
920 sc->vr_cdata.vr_parent_tag, /* parent */
921 VR_RING_ALIGN, 0, /* alignment, boundary */
922 BUS_SPACE_MAXADDR, /* lowaddr */
923 BUS_SPACE_MAXADDR, /* highaddr */
924 NULL, NULL, /* filter, filterarg */
925 VR_RX_RING_SIZE, /* maxsize */
927 VR_RX_RING_SIZE, /* maxsegsize */
929 NULL, NULL, /* lockfunc, lockarg */
930 &sc->vr_cdata.vr_rx_ring_tag);
932 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
936 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
937 tx_alignment = sizeof(uint32_t);
940 /* Create tag for Tx buffers. */
941 error = bus_dma_tag_create(
942 sc->vr_cdata.vr_parent_tag, /* parent */
943 tx_alignment, 0, /* alignment, boundary */
944 BUS_SPACE_MAXADDR, /* lowaddr */
945 BUS_SPACE_MAXADDR, /* highaddr */
946 NULL, NULL, /* filter, filterarg */
947 MCLBYTES * VR_MAXFRAGS, /* maxsize */
948 VR_MAXFRAGS, /* nsegments */
949 MCLBYTES, /* maxsegsize */
951 NULL, NULL, /* lockfunc, lockarg */
952 &sc->vr_cdata.vr_tx_tag);
954 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
958 /* Create tag for Rx buffers. */
959 error = bus_dma_tag_create(
960 sc->vr_cdata.vr_parent_tag, /* parent */
961 VR_RX_ALIGN, 0, /* alignment, boundary */
962 BUS_SPACE_MAXADDR, /* lowaddr */
963 BUS_SPACE_MAXADDR, /* highaddr */
964 NULL, NULL, /* filter, filterarg */
965 MCLBYTES, /* maxsize */
967 MCLBYTES, /* maxsegsize */
969 NULL, NULL, /* lockfunc, lockarg */
970 &sc->vr_cdata.vr_rx_tag);
972 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
976 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
977 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
978 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
979 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
981 device_printf(sc->vr_dev,
982 "failed to allocate DMA'able memory for Tx ring\n");
987 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
988 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
989 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
990 if (error != 0 || ctx.vr_busaddr == 0) {
991 device_printf(sc->vr_dev,
992 "failed to load DMA'able memory for Tx ring\n");
995 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
997 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
998 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
999 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
1000 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1002 device_printf(sc->vr_dev,
1003 "failed to allocate DMA'able memory for Rx ring\n");
1008 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1009 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1010 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1011 if (error != 0 || ctx.vr_busaddr == 0) {
1012 device_printf(sc->vr_dev,
1013 "failed to load DMA'able memory for Rx ring\n");
1016 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1018 /* Create DMA maps for Tx buffers. */
1019 for (i = 0; i < VR_TX_RING_CNT; i++) {
1020 txd = &sc->vr_cdata.vr_txdesc[i];
1022 txd->tx_dmamap = NULL;
1023 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1026 device_printf(sc->vr_dev,
1027 "failed to create Tx dmamap\n");
1031 /* Create DMA maps for Rx buffers. */
1032 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1033 &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1034 device_printf(sc->vr_dev,
1035 "failed to create spare Rx dmamap\n");
1038 for (i = 0; i < VR_RX_RING_CNT; i++) {
1039 rxd = &sc->vr_cdata.vr_rxdesc[i];
1041 rxd->rx_dmamap = NULL;
1042 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1045 device_printf(sc->vr_dev,
1046 "failed to create Rx dmamap\n");
1056 vr_dma_free(struct vr_softc *sc)
1058 struct vr_txdesc *txd;
1059 struct vr_rxdesc *rxd;
1063 if (sc->vr_cdata.vr_tx_ring_tag) {
1064 if (sc->vr_cdata.vr_tx_ring_map)
1065 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1066 sc->vr_cdata.vr_tx_ring_map);
1067 if (sc->vr_cdata.vr_tx_ring_map &&
1068 sc->vr_rdata.vr_tx_ring)
1069 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1070 sc->vr_rdata.vr_tx_ring,
1071 sc->vr_cdata.vr_tx_ring_map);
1072 sc->vr_rdata.vr_tx_ring = NULL;
1073 sc->vr_cdata.vr_tx_ring_map = NULL;
1074 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1075 sc->vr_cdata.vr_tx_ring_tag = NULL;
1078 if (sc->vr_cdata.vr_rx_ring_tag) {
1079 if (sc->vr_cdata.vr_rx_ring_map)
1080 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1081 sc->vr_cdata.vr_rx_ring_map);
1082 if (sc->vr_cdata.vr_rx_ring_map &&
1083 sc->vr_rdata.vr_rx_ring)
1084 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1085 sc->vr_rdata.vr_rx_ring,
1086 sc->vr_cdata.vr_rx_ring_map);
1087 sc->vr_rdata.vr_rx_ring = NULL;
1088 sc->vr_cdata.vr_rx_ring_map = NULL;
1089 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1090 sc->vr_cdata.vr_rx_ring_tag = NULL;
1093 if (sc->vr_cdata.vr_tx_tag) {
1094 for (i = 0; i < VR_TX_RING_CNT; i++) {
1095 txd = &sc->vr_cdata.vr_txdesc[i];
1096 if (txd->tx_dmamap) {
1097 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1099 txd->tx_dmamap = NULL;
1102 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1103 sc->vr_cdata.vr_tx_tag = NULL;
1106 if (sc->vr_cdata.vr_rx_tag) {
1107 for (i = 0; i < VR_RX_RING_CNT; i++) {
1108 rxd = &sc->vr_cdata.vr_rxdesc[i];
1109 if (rxd->rx_dmamap) {
1110 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1112 rxd->rx_dmamap = NULL;
1115 if (sc->vr_cdata.vr_rx_sparemap) {
1116 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1117 sc->vr_cdata.vr_rx_sparemap);
1118 sc->vr_cdata.vr_rx_sparemap = 0;
1120 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1121 sc->vr_cdata.vr_rx_tag = NULL;
1124 if (sc->vr_cdata.vr_parent_tag) {
1125 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1126 sc->vr_cdata.vr_parent_tag = NULL;
1131 * Initialize the transmit descriptors.
1134 vr_tx_ring_init(struct vr_softc *sc)
1136 struct vr_ring_data *rd;
1137 struct vr_txdesc *txd;
1141 sc->vr_cdata.vr_tx_prod = 0;
1142 sc->vr_cdata.vr_tx_cons = 0;
1143 sc->vr_cdata.vr_tx_cnt = 0;
1144 sc->vr_cdata.vr_tx_pkts = 0;
1147 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1148 for (i = 0; i < VR_TX_RING_CNT; i++) {
1149 if (i == VR_TX_RING_CNT - 1)
1150 addr = VR_TX_RING_ADDR(sc, 0);
1152 addr = VR_TX_RING_ADDR(sc, i + 1);
1153 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1154 txd = &sc->vr_cdata.vr_txdesc[i];
1158 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1159 sc->vr_cdata.vr_tx_ring_map,
1160 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1166 * Initialize the RX descriptors and allocate mbufs for them. Note that
1167 * we arrange the descriptors in a closed ring, so that the last descriptor
1168 * points back to the first.
1171 vr_rx_ring_init(struct vr_softc *sc)
1173 struct vr_ring_data *rd;
1174 struct vr_rxdesc *rxd;
1178 sc->vr_cdata.vr_rx_cons = 0;
1181 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1182 for (i = 0; i < VR_RX_RING_CNT; i++) {
1183 rxd = &sc->vr_cdata.vr_rxdesc[i];
1185 rxd->desc = &rd->vr_rx_ring[i];
1186 if (i == VR_RX_RING_CNT - 1)
1187 addr = VR_RX_RING_ADDR(sc, 0);
1189 addr = VR_RX_RING_ADDR(sc, i + 1);
1190 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1191 if (vr_newbuf(sc, i) != 0)
1195 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1196 sc->vr_cdata.vr_rx_ring_map,
1197 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1202 static __inline void
1203 vr_discard_rxbuf(struct vr_rxdesc *rxd)
1205 struct vr_desc *desc;
1208 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1209 desc->vr_status = htole32(VR_RXSTAT_OWN);
1213 * Initialize an RX descriptor and attach an MBUF cluster.
1214 * Note: the length fields are only 11 bits wide, which means the
1215 * largest size we can specify is 2047. This is important because
1216 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1217 * overflow the field and make a mess.
1220 vr_newbuf(struct vr_softc *sc, int idx)
1222 struct vr_desc *desc;
1223 struct vr_rxdesc *rxd;
1225 bus_dma_segment_t segs[1];
1229 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1232 m->m_len = m->m_pkthdr.len = MCLBYTES;
1233 m_adj(m, sizeof(uint64_t));
1235 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1236 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1240 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1242 rxd = &sc->vr_cdata.vr_rxdesc[idx];
1243 if (rxd->rx_m != NULL) {
1244 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1245 BUS_DMASYNC_POSTREAD);
1246 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1248 map = rxd->rx_dmamap;
1249 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1250 sc->vr_cdata.vr_rx_sparemap = map;
1251 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1252 BUS_DMASYNC_PREREAD);
1255 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1256 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1257 desc->vr_status = htole32(VR_RXSTAT_OWN);
1262 #ifndef __NO_STRICT_ALIGNMENT
1263 static __inline void
1264 vr_fixup_rx(struct mbuf *m)
1266 uint16_t *src, *dst;
1269 src = mtod(m, uint16_t *);
1272 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1275 m->m_data -= ETHER_ALIGN;
1280 * A frame has been uploaded: pass the resulting mbuf chain up to
1281 * the higher level protocols.
1284 vr_rxeof(struct vr_softc *sc)
1286 struct vr_rxdesc *rxd;
1289 struct vr_desc *cur_rx;
1290 int cons, prog, total_len;
1291 uint32_t rxstat, rxctl;
1295 cons = sc->vr_cdata.vr_rx_cons;
1297 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1298 sc->vr_cdata.vr_rx_ring_map,
1299 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1301 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1302 #ifdef DEVICE_POLLING
1303 if (ifp->if_capenable & IFCAP_POLLING) {
1304 if (sc->rxcycles <= 0)
1309 cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1310 rxstat = le32toh(cur_rx->vr_status);
1311 rxctl = le32toh(cur_rx->vr_ctl);
1312 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1316 rxd = &sc->vr_cdata.vr_rxdesc[cons];
1320 * If an error occurs, update stats, clear the
1321 * status word and leave the mbuf cluster in place:
1322 * it should simply get re-used next time this descriptor
1323 * comes up in the ring.
1324 * We don't support SG in Rx path yet, so discard
1327 if ((rxstat & (VR_RXSTAT_RXERR | VR_RXSTAT_FIRSTFRAG |
1328 VR_RXSTAT_LASTFRAG)) !=
1329 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1331 sc->vr_stat.rx_errors++;
1332 if (rxstat & VR_RXSTAT_CRCERR)
1333 sc->vr_stat.rx_crc_errors++;
1334 if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1335 sc->vr_stat.rx_alignment++;
1336 if (rxstat & VR_RXSTAT_FIFOOFLOW)
1337 sc->vr_stat.rx_fifo_overflows++;
1338 if (rxstat & VR_RXSTAT_GIANT)
1339 sc->vr_stat.rx_giants++;
1340 if (rxstat & VR_RXSTAT_RUNT)
1341 sc->vr_stat.rx_runts++;
1342 if (rxstat & VR_RXSTAT_BUFFERR)
1343 sc->vr_stat.rx_no_buffers++;
1344 #ifdef VR_SHOW_ERRORS
1345 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1346 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1348 vr_discard_rxbuf(rxd);
1352 if (vr_newbuf(sc, cons) != 0) {
1354 sc->vr_stat.rx_errors++;
1355 sc->vr_stat.rx_no_mbufs++;
1356 vr_discard_rxbuf(rxd);
1361 * XXX The VIA Rhine chip includes the CRC with every
1362 * received frame, and there's no way to turn this
1363 * behavior off (at least, I can't find anything in
1364 * the manual that explains how to do it) so we have
1365 * to trim off the CRC manually.
1367 total_len = VR_RXBYTES(rxstat);
1368 total_len -= ETHER_CRC_LEN;
1369 m->m_pkthdr.len = m->m_len = total_len;
1370 #ifndef __NO_STRICT_ALIGNMENT
1372 * RX buffers must be 32-bit aligned.
1373 * Ignore the alignment problems on the non-strict alignment
1374 * platform. The performance hit incurred due to unaligned
1375 * accesses is much smaller than the hit produced by forcing
1376 * buffer copies all the time.
1380 m->m_pkthdr.rcvif = ifp;
1382 sc->vr_stat.rx_ok++;
1383 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1384 (rxstat & VR_RXSTAT_FRAG) == 0 &&
1385 (rxctl & VR_RXCTL_IP) != 0) {
1386 /* Checksum is valid for non-fragmented IP packets. */
1387 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1388 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1389 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1390 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1391 m->m_pkthdr.csum_flags |=
1392 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1393 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1394 m->m_pkthdr.csum_data = 0xffff;
1399 (*ifp->if_input)(ifp, m);
1404 sc->vr_cdata.vr_rx_cons = cons;
1405 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1406 sc->vr_cdata.vr_rx_ring_map,
1407 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412 * A frame was downloaded to the chip. It's safe for us to clean up
1416 vr_txeof(struct vr_softc *sc)
1418 struct vr_txdesc *txd;
1419 struct vr_desc *cur_tx;
1421 uint32_t txctl, txstat;
1426 cons = sc->vr_cdata.vr_tx_cons;
1427 prod = sc->vr_cdata.vr_tx_prod;
1431 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1432 sc->vr_cdata.vr_tx_ring_map,
1433 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1437 * Go through our tx list and free mbufs for those
1438 * frames that have been transmitted.
1440 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1441 cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1442 txctl = le32toh(cur_tx->vr_ctl);
1443 txstat = le32toh(cur_tx->vr_status);
1444 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1447 sc->vr_cdata.vr_tx_cnt--;
1448 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1449 /* Only the first descriptor in the chain is valid. */
1450 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1453 txd = &sc->vr_cdata.vr_txdesc[cons];
1454 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1457 if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1459 sc->vr_stat.tx_errors++;
1460 if ((txstat & VR_TXSTAT_ABRT) != 0) {
1461 /* Give up and restart Tx. */
1462 sc->vr_stat.tx_abort++;
1463 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1464 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1465 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1469 VR_INC(cons, VR_TX_RING_CNT);
1470 sc->vr_cdata.vr_tx_cons = cons;
1471 if (vr_tx_stop(sc) != 0) {
1472 device_printf(sc->vr_dev,
1473 "%s: Tx shutdown error -- "
1474 "resetting\n", __func__);
1475 sc->vr_flags |= VR_F_RESTART;
1481 if ((sc->vr_revid < REV_ID_VT3071_A &&
1482 (txstat & VR_TXSTAT_UNDERRUN)) ||
1483 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1484 sc->vr_stat.tx_underrun++;
1485 /* Retry and restart Tx. */
1486 sc->vr_cdata.vr_tx_cnt++;
1487 sc->vr_cdata.vr_tx_cons = cons;
1488 cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1489 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1490 sc->vr_cdata.vr_tx_ring_map,
1491 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1495 if ((txstat & VR_TXSTAT_DEFER) != 0) {
1496 ifp->if_collisions++;
1497 sc->vr_stat.tx_collisions++;
1499 if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1500 ifp->if_collisions++;
1501 sc->vr_stat.tx_late_collisions++;
1504 sc->vr_stat.tx_ok++;
1508 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1509 BUS_DMASYNC_POSTWRITE);
1510 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1511 if (sc->vr_revid < REV_ID_VT3071_A) {
1512 ifp->if_collisions +=
1513 (txstat & VR_TXSTAT_COLLCNT) >> 3;
1514 sc->vr_stat.tx_collisions +=
1515 (txstat & VR_TXSTAT_COLLCNT) >> 3;
1517 ifp->if_collisions += (txstat & 0x0f);
1518 sc->vr_stat.tx_collisions += (txstat & 0x0f);
1524 sc->vr_cdata.vr_tx_cons = cons;
1525 if (sc->vr_cdata.vr_tx_cnt == 0)
1526 sc->vr_watchdog_timer = 0;
1532 struct vr_softc *sc;
1533 struct mii_data *mii;
1535 sc = (struct vr_softc *)xsc;
1539 if ((sc->vr_flags & VR_F_RESTART) != 0) {
1540 device_printf(sc->vr_dev, "restarting\n");
1541 sc->vr_stat.num_restart++;
1545 sc->vr_flags &= ~VR_F_RESTART;
1548 mii = device_get_softc(sc->vr_miibus);
1551 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1554 #ifdef DEVICE_POLLING
1555 static poll_handler_t vr_poll;
1556 static poll_handler_t vr_poll_locked;
1559 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1561 struct vr_softc *sc;
1566 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1567 vr_poll_locked(ifp, cmd, count);
1572 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1574 struct vr_softc *sc;
1580 sc->rxcycles = count;
1583 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1584 vr_start_locked(ifp);
1586 if (cmd == POLL_AND_CHECK_STATUS) {
1589 /* Also check status register. */
1590 status = CSR_READ_2(sc, VR_ISR);
1592 CSR_WRITE_2(sc, VR_ISR, status);
1594 if ((status & VR_INTRS) == 0)
1597 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1598 VR_ISR_STATSOFLOW)) != 0) {
1599 if (vr_error(sc, status) != 0)
1602 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1603 #ifdef VR_SHOW_ERRORS
1604 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1605 __func__, status, VR_ISR_ERR_BITS);
1611 #endif /* DEVICE_POLLING */
1613 /* Back off the transmit threshold. */
1615 vr_tx_underrun(struct vr_softc *sc)
1619 device_printf(sc->vr_dev, "Tx underrun -- ");
1620 if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1621 thresh = sc->vr_txthresh;
1623 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1624 sc->vr_txthresh = VR_TXTHRESH_MAX;
1625 printf("using store and forward mode\n");
1627 printf("increasing Tx threshold(%d -> %d)\n",
1628 vr_tx_threshold_tables[thresh].value,
1629 vr_tx_threshold_tables[thresh + 1].value);
1632 sc->vr_stat.tx_underrun++;
1633 if (vr_tx_stop(sc) != 0) {
1634 device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1635 "resetting\n", __func__);
1636 sc->vr_flags |= VR_F_RESTART;
1645 struct vr_softc *sc;
1649 sc = (struct vr_softc *)arg;
1653 if (sc->vr_suspended != 0)
1656 status = CSR_READ_2(sc, VR_ISR);
1657 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1661 #ifdef DEVICE_POLLING
1662 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1666 /* Suppress unwanted interrupts. */
1667 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1668 (sc->vr_flags & VR_F_RESTART) != 0) {
1669 CSR_WRITE_2(sc, VR_IMR, 0);
1670 CSR_WRITE_2(sc, VR_ISR, status);
1674 /* Disable interrupts. */
1675 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1677 for (; (status & VR_INTRS) != 0;) {
1678 CSR_WRITE_2(sc, VR_ISR, status);
1679 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1680 VR_ISR_STATSOFLOW)) != 0) {
1681 if (vr_error(sc, status) != 0) {
1687 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1688 #ifdef VR_SHOW_ERRORS
1689 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1690 __func__, status, VR_ISR_ERR_BITS);
1692 /* Restart Rx if RxDMA SM was stopped. */
1696 status = CSR_READ_2(sc, VR_ISR);
1699 /* Re-enable interrupts. */
1700 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1702 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1703 vr_start_locked(ifp);
1710 vr_error(struct vr_softc *sc, uint16_t status)
1714 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1715 if ((status & VR_ISR_BUSERR) != 0) {
1716 status &= ~VR_ISR_BUSERR;
1717 sc->vr_stat.bus_errors++;
1718 /* Disable further interrupts. */
1719 CSR_WRITE_2(sc, VR_IMR, 0);
1720 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1721 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1722 "resetting\n", pcis);
1723 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1724 sc->vr_flags |= VR_F_RESTART;
1727 if ((status & VR_ISR_LINKSTAT2) != 0) {
1728 /* Link state change, duplex changes etc. */
1729 status &= ~VR_ISR_LINKSTAT2;
1731 if ((status & VR_ISR_STATSOFLOW) != 0) {
1732 status &= ~VR_ISR_STATSOFLOW;
1733 if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1734 /* Update MIB counters. */
1739 device_printf(sc->vr_dev,
1740 "unhandled interrupt, status = 0x%04x\n", status);
1745 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1746 * pointers to the fragment pointers.
1749 vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1751 struct vr_txdesc *txd;
1752 struct vr_desc *desc;
1754 bus_dma_segment_t txsegs[VR_MAXFRAGS];
1755 uint32_t csum_flags, txctl;
1756 int error, i, nsegs, prod, si;
1761 M_ASSERTPKTHDR((*m_head));
1764 * Some VIA Rhine wants packet buffers to be longword
1765 * aligned, but very often our mbufs aren't. Rather than
1766 * waste time trying to decide when to copy and when not
1767 * to copy, just do it all the time.
1769 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1770 m = m_defrag(*m_head, M_DONTWAIT);
1780 * The Rhine chip doesn't auto-pad, so we have to make
1781 * sure to pad short frames out to the minimum frame length
1784 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1786 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1787 if (M_WRITABLE(m) == 0) {
1788 /* Get a writable copy. */
1789 m = m_dup(*m_head, M_DONTWAIT);
1797 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1798 m = m_defrag(m, M_DONTWAIT);
1806 * Manually pad short frames, and zero the pad space
1807 * to avoid leaking data.
1809 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1810 m->m_pkthdr.len += padlen;
1811 m->m_len = m->m_pkthdr.len;
1815 prod = sc->vr_cdata.vr_tx_prod;
1816 txd = &sc->vr_cdata.vr_txdesc[prod];
1817 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1818 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1819 if (error == EFBIG) {
1820 m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS);
1827 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1828 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1834 } else if (error != 0)
1842 /* Check number of available descriptors. */
1843 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1844 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1848 txd->tx_m = *m_head;
1849 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1850 BUS_DMASYNC_PREWRITE);
1852 /* Set checksum offload. */
1854 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1855 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1856 csum_flags |= VR_TXCTL_IPCSUM;
1857 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1858 csum_flags |= VR_TXCTL_TCPCSUM;
1859 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1860 csum_flags |= VR_TXCTL_UDPCSUM;
1864 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1865 * is required for all descriptors regardless of single or
1866 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1867 * the first descriptor for a multi-fragmented frames. Without
1868 * that VIA Rhine chip generates Tx underrun interrupts and can't
1872 for (i = 0; i < nsegs; i++) {
1873 desc = &sc->vr_rdata.vr_tx_ring[prod];
1874 desc->vr_status = 0;
1875 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1877 txctl |= VR_TXCTL_FIRSTFRAG;
1878 desc->vr_ctl = htole32(txctl);
1879 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1880 sc->vr_cdata.vr_tx_cnt++;
1881 VR_INC(prod, VR_TX_RING_CNT);
1883 /* Update producer index. */
1884 sc->vr_cdata.vr_tx_prod = prod;
1886 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1887 desc = &sc->vr_rdata.vr_tx_ring[prod];
1890 * Set EOP on the last desciptor and reuqest Tx completion
1891 * interrupt for every VR_TX_INTR_THRESH-th frames.
1893 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1894 if (sc->vr_cdata.vr_tx_pkts == 0)
1895 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1897 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1899 /* Lastly turn the first descriptor ownership to hardware. */
1900 desc = &sc->vr_rdata.vr_tx_ring[si];
1901 desc->vr_status |= htole32(VR_TXSTAT_OWN);
1903 /* Sync descriptors. */
1904 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1905 sc->vr_cdata.vr_tx_ring_map,
1906 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1912 vr_start(struct ifnet *ifp)
1914 struct vr_softc *sc;
1918 vr_start_locked(ifp);
1923 vr_start_locked(struct ifnet *ifp)
1925 struct vr_softc *sc;
1926 struct mbuf *m_head;
1933 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1934 IFF_DRV_RUNNING || sc->vr_link == 0)
1937 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1938 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1939 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1943 * Pack the data into the transmit ring. If we
1944 * don't have room, set the OACTIVE flag and wait
1945 * for the NIC to drain the ring.
1947 if (vr_encap(sc, &m_head)) {
1950 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1951 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1957 * If there's a BPF listener, bounce a copy of this frame
1960 ETHER_BPF_MTAP(ifp, m_head);
1964 /* Tell the chip to start transmitting. */
1965 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
1966 /* Set a timeout in case the chip goes out to lunch. */
1967 sc->vr_watchdog_timer = 5;
1974 struct vr_softc *sc;
1976 sc = (struct vr_softc *)xsc;
1983 vr_init_locked(struct vr_softc *sc)
1986 struct mii_data *mii;
1993 mii = device_get_softc(sc->vr_miibus);
1995 /* Cancel pending I/O and free all RX/TX buffers. */
1999 /* Set our station address. */
2000 for (i = 0; i < ETHER_ADDR_LEN; i++)
2001 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2004 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2005 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2008 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2009 * so we must set both.
2011 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2012 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2014 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2015 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2017 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2018 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2020 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2021 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2023 /* Init circular RX list. */
2024 if (vr_rx_ring_init(sc) != 0) {
2025 device_printf(sc->vr_dev,
2026 "initialization failed: no memory for rx buffers\n");
2031 /* Init tx descriptors. */
2032 vr_tx_ring_init(sc);
2034 /* Disable all VLAN CAM entries. */
2035 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2036 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
2037 CSR_WRITE_1(sc, VR_CAMMASK, 0);
2038 CSR_WRITE_1(sc, VR_CAMCTL, 0);
2042 * Set up receive filter.
2047 * Load the address of the RX ring.
2049 addr = VR_RX_RING_ADDR(sc, 0);
2050 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2052 * Load the address of the TX ring.
2054 addr = VR_TX_RING_ADDR(sc, 0);
2055 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2056 /* Default : full-duplex, no Tx poll. */
2057 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2059 /* Set flow-control parameters for Rhine III. */
2060 if (sc->vr_revid >= REV_ID_VT6105_A0) {
2061 /* Rx buffer count available for incoming packet. */
2062 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT);
2064 * Tx pause low threshold : 16 free receive buffers
2065 * Tx pause XON high threshold : 48 free receive buffers
2067 CSR_WRITE_1(sc, VR_FLOWCR1,
2068 VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF);
2069 /* Set Tx pause timer. */
2070 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2073 /* Enable receiver and transmitter. */
2074 CSR_WRITE_1(sc, VR_CR0,
2075 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2077 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2078 #ifdef DEVICE_POLLING
2080 * Disable interrupts if we are polling.
2082 if (ifp->if_capenable & IFCAP_POLLING)
2083 CSR_WRITE_2(sc, VR_IMR, 0);
2087 * Enable interrupts and disable MII intrs.
2089 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2090 if (sc->vr_revid > REV_ID_VT6102_A)
2091 CSR_WRITE_2(sc, VR_MII_IMR, 0);
2096 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2097 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2099 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2103 * Set media options.
2106 vr_ifmedia_upd(struct ifnet *ifp)
2108 struct vr_softc *sc;
2109 struct mii_data *mii;
2110 struct mii_softc *miisc;
2115 mii = device_get_softc(sc->vr_miibus);
2116 if (mii->mii_instance) {
2117 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2118 mii_phy_reset(miisc);
2120 error = mii_mediachg(mii);
2127 * Report current media status.
2130 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2132 struct vr_softc *sc;
2133 struct mii_data *mii;
2136 mii = device_get_softc(sc->vr_miibus);
2140 ifmr->ifm_active = mii->mii_media_active;
2141 ifmr->ifm_status = mii->mii_media_status;
2145 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2147 struct vr_softc *sc;
2149 struct mii_data *mii;
2153 ifr = (struct ifreq *)data;
2159 if (ifp->if_flags & IFF_UP) {
2160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2161 if ((ifp->if_flags ^ sc->vr_if_flags) &
2162 (IFF_PROMISC | IFF_ALLMULTI))
2165 if (sc->vr_detach == 0)
2169 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2172 sc->vr_if_flags = ifp->if_flags;
2183 mii = device_get_softc(sc->vr_miibus);
2184 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2187 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2188 #ifdef DEVICE_POLLING
2189 if (mask & IFCAP_POLLING) {
2190 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2191 error = ether_poll_register(vr_poll, ifp);
2195 /* Disable interrupts. */
2196 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2197 ifp->if_capenable |= IFCAP_POLLING;
2200 error = ether_poll_deregister(ifp);
2201 /* Enable interrupts. */
2203 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2204 ifp->if_capenable &= ~IFCAP_POLLING;
2208 #endif /* DEVICE_POLLING */
2209 if ((mask & IFCAP_TXCSUM) != 0 &&
2210 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2211 ifp->if_capenable ^= IFCAP_TXCSUM;
2212 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2213 ifp->if_hwassist |= VR_CSUM_FEATURES;
2215 ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2217 if ((mask & IFCAP_RXCSUM) != 0 &&
2218 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2219 ifp->if_capenable ^= IFCAP_RXCSUM;
2220 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2221 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2222 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2223 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2224 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2225 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2228 error = ether_ioctl(ifp, command, data);
2236 vr_watchdog(struct vr_softc *sc)
2242 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2247 * Reclaim first as we don't request interrupt for every packets.
2250 if (sc->vr_cdata.vr_tx_cnt == 0)
2253 if (sc->vr_link == 0) {
2255 if_printf(sc->vr_ifp, "watchdog timeout "
2263 if_printf(ifp, "watchdog timeout\n");
2269 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2270 vr_start_locked(ifp);
2274 vr_tx_start(struct vr_softc *sc)
2279 cmd = CSR_READ_1(sc, VR_CR0);
2280 if ((cmd & VR_CR0_TX_ON) == 0) {
2281 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2282 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2283 cmd |= VR_CR0_TX_ON;
2284 CSR_WRITE_1(sc, VR_CR0, cmd);
2286 if (sc->vr_cdata.vr_tx_cnt != 0) {
2287 sc->vr_watchdog_timer = 5;
2288 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2293 vr_rx_start(struct vr_softc *sc)
2298 cmd = CSR_READ_1(sc, VR_CR0);
2299 if ((cmd & VR_CR0_RX_ON) == 0) {
2300 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2301 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2302 cmd |= VR_CR0_RX_ON;
2303 CSR_WRITE_1(sc, VR_CR0, cmd);
2305 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2309 vr_tx_stop(struct vr_softc *sc)
2314 cmd = CSR_READ_1(sc, VR_CR0);
2315 if ((cmd & VR_CR0_TX_ON) != 0) {
2316 cmd &= ~VR_CR0_TX_ON;
2317 CSR_WRITE_1(sc, VR_CR0, cmd);
2318 for (i = VR_TIMEOUT; i > 0; i--) {
2320 cmd = CSR_READ_1(sc, VR_CR0);
2321 if ((cmd & VR_CR0_TX_ON) == 0)
2331 vr_rx_stop(struct vr_softc *sc)
2336 cmd = CSR_READ_1(sc, VR_CR0);
2337 if ((cmd & VR_CR0_RX_ON) != 0) {
2338 cmd &= ~VR_CR0_RX_ON;
2339 CSR_WRITE_1(sc, VR_CR0, cmd);
2340 for (i = VR_TIMEOUT; i > 0; i--) {
2342 cmd = CSR_READ_1(sc, VR_CR0);
2343 if ((cmd & VR_CR0_RX_ON) == 0)
2353 * Stop the adapter and free any mbufs allocated to the
2357 vr_stop(struct vr_softc *sc)
2359 struct vr_txdesc *txd;
2360 struct vr_rxdesc *rxd;
2367 sc->vr_watchdog_timer = 0;
2369 callout_stop(&sc->vr_stat_callout);
2370 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2372 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2373 if (vr_rx_stop(sc) != 0)
2374 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2375 if (vr_tx_stop(sc) != 0)
2376 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2377 /* Clear pending interrupts. */
2378 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2379 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2380 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2381 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2384 * Free RX and TX mbufs still in the queues.
2386 for (i = 0; i < VR_RX_RING_CNT; i++) {
2387 rxd = &sc->vr_cdata.vr_rxdesc[i];
2388 if (rxd->rx_m != NULL) {
2389 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2390 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2391 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2397 for (i = 0; i < VR_TX_RING_CNT; i++) {
2398 txd = &sc->vr_cdata.vr_txdesc[i];
2399 if (txd->tx_m != NULL) {
2400 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2401 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2402 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2411 * Stop all chip I/O so that the kernel's probe routines don't
2412 * get confused by errant DMAs when rebooting.
2415 vr_shutdown(device_t dev)
2418 return (vr_suspend(dev));
2422 vr_suspend(device_t dev)
2424 struct vr_softc *sc;
2426 sc = device_get_softc(dev);
2431 sc->vr_suspended = 1;
2438 vr_resume(device_t dev)
2440 struct vr_softc *sc;
2443 sc = device_get_softc(dev);
2449 if (ifp->if_flags & IFF_UP)
2452 sc->vr_suspended = 0;
2459 vr_setwol(struct vr_softc *sc)
2468 if (sc->vr_revid < REV_ID_VT6102_A ||
2469 pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2474 /* Clear WOL configuration. */
2475 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2476 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2477 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2478 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2479 if (sc->vr_revid > REV_ID_VT6105_B0) {
2480 /* Newer Rhine III supports two additional patterns. */
2481 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2482 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2483 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2485 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2486 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2487 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2488 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2490 * It seems that multicast wakeup frames require programming pattern
2491 * registers and valid CRC as well as pattern mask for each pattern.
2492 * While it's possible to setup such a pattern it would complicate
2493 * WOL configuration so ignore multicast wakeup frames.
2495 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2496 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2497 v = CSR_READ_1(sc, VR_STICKHW);
2498 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2499 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2502 /* Put hardware into sleep. */
2503 v = CSR_READ_1(sc, VR_STICKHW);
2504 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2505 CSR_WRITE_1(sc, VR_STICKHW, v);
2507 /* Request PME if WOL is requested. */
2508 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2509 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2510 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2511 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2512 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2516 vr_clrwol(struct vr_softc *sc)
2522 if (sc->vr_revid < REV_ID_VT6102_A)
2525 /* Take hardware out of sleep. */
2526 v = CSR_READ_1(sc, VR_STICKHW);
2527 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2528 CSR_WRITE_1(sc, VR_STICKHW, v);
2530 /* Clear WOL configuration as WOL may interfere normal operation. */
2531 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2532 CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2533 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2534 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2535 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2536 if (sc->vr_revid > REV_ID_VT6105_B0) {
2537 /* Newer Rhine III supports two additional patterns. */
2538 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2539 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2540 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2545 vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2547 struct vr_softc *sc;
2548 struct vr_statistics *stat;
2553 error = sysctl_handle_int(oidp, &result, 0, req);
2555 if (error != 0 || req->newptr == NULL)
2559 sc = (struct vr_softc *)arg1;
2560 stat = &sc->vr_stat;
2562 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2563 printf("Outbound good frames : %ju\n",
2564 (uintmax_t)stat->tx_ok);
2565 printf("Inbound good frames : %ju\n",
2566 (uintmax_t)stat->rx_ok);
2567 printf("Outbound errors : %u\n", stat->tx_errors);
2568 printf("Inbound errors : %u\n", stat->rx_errors);
2569 printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2570 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2571 printf("Inbound FIFO overflows : %d\n",
2572 stat->rx_fifo_overflows);
2573 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2574 printf("Inbound frame alignment errors : %u\n",
2575 stat->rx_alignment);
2576 printf("Inbound giant frames : %u\n", stat->rx_giants);
2577 printf("Inbound runt frames : %u\n", stat->rx_runts);
2578 printf("Outbound aborted with excessive collisions : %u\n",
2580 printf("Outbound collisions : %u\n", stat->tx_collisions);
2581 printf("Outbound late collisions : %u\n",
2582 stat->tx_late_collisions);
2583 printf("Outbound underrun : %u\n", stat->tx_underrun);
2584 printf("PCI bus errors : %u\n", stat->bus_errors);
2585 printf("driver restarted due to Rx/Tx shutdown failure : %u\n",